hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf0462af2dd59bd04442e89508274bb84dcbd0b | 1,762 | py | Python | examples/query_vin_and_stats.py | screaminbug/panda | 778fc0bc4323ad9f5473b9deb6e065d6a5aeee1c | [
"MIT"
] | null | null | null | examples/query_vin_and_stats.py | screaminbug/panda | 778fc0bc4323ad9f5473b9deb6e065d6a5aeee1c | [
"MIT"
] | null | null | null | examples/query_vin_and_stats.py | screaminbug/panda | 778fc0bc4323ad9f5473b9deb6e065d6a5aeee1c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import time
import struct
from panda import Panda
from hexdump import hexdump
from panda.python.isotp import isotp_send, isotp_recv
# 0x7e0 = Toyota
# 0x18DB33F1 for Honda?
def get_current_data_for_pid(pid):
# 01 xx = Show current data
isotp_send(panda, b"\x01"+ bytes([pid]), 0x7e0)
return isotp_recv(panda, 0x7e8)
def get_supported_pids():
ret = []
pid = 0
while 1:
supported = struct.unpack(">I", get_current_data_for_pid(pid)[2:])[0]
for i in range(1+pid, 0x21+pid):
if supported & 0x80000000:
ret.append(i)
supported <<= 1
pid += 0x20
if pid not in ret:
break
return ret
if __name__ == "__main__":
panda = Panda()
panda.set_safety_mode(Panda.SAFETY_ELM327)
panda.can_clear(0)
# 09 02 = Get VIN
isotp_send(panda, b"\x09\x02", 0x7df)
ret = isotp_recv(panda, 0x7e8)
hexdump(ret)
print("VIN: %s" % "".join(map(chr, ret[:2])))
# 03 = get DTCS
isotp_send(panda, b"\x03", 0x7e0)
dtcs = isotp_recv(panda, 0x7e8)
print("DTCs:", "".join(map(chr, dtcs[:2])))
supported_pids = get_supported_pids()
print("Supported PIDs:",supported_pids)
while 1:
speed = struct.unpack(">B", get_current_data_for_pid(13)[2:])[0] # kph
rpm = struct.unpack(">H", get_current_data_for_pid(12)[2:])[0]/4.0 # revs
throttle = struct.unpack(">B", get_current_data_for_pid(17)[2:])[0]/255.0 * 100 # percent
temp = struct.unpack(">B", get_current_data_for_pid(5)[2:])[0] - 40 # degrees C
load = struct.unpack(">B", get_current_data_for_pid(4)[2:])[0]/255.0 * 100 # percent
print("%d KPH, %d RPM, %.1f%% Throttle, %d deg C, %.1f%% load" % (speed, rpm, throttle, temp, load))
time.sleep(0.2)
| 28.419355 | 104 | 0.632236 |
acf046713d69be2e9fc1096d22ce3775a433bcd9 | 1,091 | py | Python | wordlist.py | Dikaeinstein/Think_Python | 370cb5af25230ff20994206e2d8023fd1d4c2c74 | [
"MIT"
] | null | null | null | wordlist.py | Dikaeinstein/Think_Python | 370cb5af25230ff20994206e2d8023fd1d4c2c74 | [
"MIT"
] | null | null | null | wordlist.py | Dikaeinstein/Think_Python | 370cb5af25230ff20994206e2d8023fd1d4c2c74 | [
"MIT"
] | null | null | null | """This module contains a code example related to
Think Python, 2nd Edition
by Allen Downey
http://thinkpython2.com
Copyright 2015 Allen Downey
License: http://creativecommons.org/licenses/by/4.0/
"""
from __future__ import print_function, division
import cwd
import time
def make_word_list1():
"""Reads lines from a file and builds a list using append."""
t = []
fin = open('words.txt')
for line in fin:
word = line.strip()
t.append(word)
return t
def make_word_list2():
"""Reads lines from a file and builds a list using list +."""
t = []
fin = open('words.txt')
for line in fin:
word = line.strip()
t = t + [word]
return t
if __name__ == "__main__":
start_time = time.time()
t = make_word_list1()
elapsed_time = time.time() - start_time
print(len(t))
print(t[:10])
print(elapsed_time, 'seconds')
start_time = time.time()
t = make_word_list2()
elapsed_time = time.time() - start_time
print(len(t))
print(t[:10])
print(elapsed_time, 'seconds')
| 20.203704 | 65 | 0.624198 |
acf047fed2554ab58a538745b0fe5289aac4ab13 | 2,131 | py | Python | c2cwsgiutils/prometheus.py | arnaud-morvan/c2cwsgiutils | aa06b77b247bd8969b88225ee3ea109886aefeac | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | c2cwsgiutils/prometheus.py | arnaud-morvan/c2cwsgiutils | aa06b77b247bd8969b88225ee3ea109886aefeac | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | c2cwsgiutils/prometheus.py | arnaud-morvan/c2cwsgiutils | aa06b77b247bd8969b88225ee3ea109886aefeac | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | """
Implement parts of the Prometheus Pushgateway protocol, as defined here:
https://github.com/prometheus/pushgateway
"""
from typing import Mapping, MutableMapping, Any, Optional # noqa # pylint: disable=unused-import
import requests
LabelsType = Optional[Mapping[str, Any]]
class PushgatewayGroupPublisher(object):
def __init__(self, base_url: str, job: str, instance: Optional[str] = None,
labels: LabelsType = None) -> None:
if not base_url.endswith('/'):
base_url += '/'
self._url = "%smetrics/job/%s" % (base_url, job)
if instance is not None:
self._url += '/instance/' + instance
self._labels = labels
self._reset()
def _merge_labels(self, labels: LabelsType) -> LabelsType:
if labels is None:
return self._labels
elif self._labels is None:
return labels
else:
tmp = dict(self._labels)
tmp.update(labels)
return tmp
def add(self, metric_name: str, metric_value: Any, metric_type: str = 'gauge',
metric_labels: Optional[Mapping[str, str]] = None) -> None:
if metric_name in self._types:
if self._types[metric_name] != metric_type:
raise ValueError("Cannot change the type of a given metric")
else:
self._types[metric_name] = metric_type
self._to_send += '# TYPE %s %s\n' % (metric_name, metric_type)
self._to_send += metric_name
labels = self._merge_labels(metric_labels)
if labels is not None:
self._to_send += \
'{' + \
', '.join('%s="%s"' % (k, v) for k, v in sorted(labels.items())) + \
'}'
self._to_send += ' %s\n' % metric_value
def commit(self) -> None:
requests.put(self._url, data=self._to_send.encode('utf-8')).raise_for_status()
self._reset()
def _reset(self) -> None:
self._to_send = ''
self._types: MutableMapping[str, str] = {}
def __str__(self) -> str:
return self._url + ' ->\n' + self._to_send
| 34.934426 | 98 | 0.581886 |
acf0484892dac73b2fc57c3706c06c11351dc747 | 4,279 | py | Python | hx711.py | smurry/Pool_controller | 30124114a2b73f7288b349f0be0e9ea84eb3ca60 | [
"MIT"
] | 50 | 2018-04-13T13:40:21.000Z | 2022-03-13T13:43:17.000Z | hx711.py | smurry/Pool_controller | 30124114a2b73f7288b349f0be0e9ea84eb3ca60 | [
"MIT"
] | 3 | 2019-04-08T14:58:48.000Z | 2021-08-23T06:10:46.000Z | hx711.py | smurry/Pool_controller | 30124114a2b73f7288b349f0be0e9ea84eb3ca60 | [
"MIT"
] | 16 | 2019-03-15T22:12:04.000Z | 2022-03-12T20:35:46.000Z | from utime import sleep_us, time
from machine import Pin
from micropython import const
class HX711Exception(Exception):
pass
class InvalidMode(HX711Exception):
pass
class DeviceIsNotReady(HX711Exception):
pass
class HX711(object):
"""
Micropython driver for Avia Semiconductor's HX711
24-Bit Analog-to-Digital Converter
"""
CHANNEL_A_128 = const(1)
CHANNEL_A_64 = const(3)
CHANNEL_B_32 = const(2)
DATA_BITS = const(24)
MAX_VALUE = const(0x7fffff)
MIN_VALUE = const(0x800000)
READY_TIMEOUT_SEC = const(5)
SLEEP_DELAY_USEC = const(80)
def __init__(self, d_out: int, pd_sck: int, channel: int = CHANNEL_A_128):
self.d_out_pin = Pin(d_out, Pin.IN)
self.pd_sck_pin = Pin(pd_sck, Pin.OUT, value=0)
self.channel = channel
def __repr__(self):
return "HX711 on channel %s, gain=%s" % self.channel
def _convert_from_twos_complement(self, value: int) -> int:
"""
Converts a given integer from the two's complement format.
"""
if value & (1 << (self.DATA_BITS - 1)):
value -= 1 << self.DATA_BITS
return value
def _set_channel(self):
"""
Input and gain selection is controlled by the
number of the input PD_SCK pulses
3 pulses for Channel A with gain 64
2 pulses for Channel B with gain 32
1 pulse for Channel A with gain 128
"""
for i in range(self._channel):
self.pd_sck_pin.value(1)
self.pd_sck_pin.value(0)
def _wait(self):
"""
If the HX711 is not ready within READY_TIMEOUT_SEC
the DeviceIsNotReady exception will be thrown.
"""
t0 = time()
while not self.is_ready():
if time() - t0 > self.READY_TIMEOUT_SEC:
raise DeviceIsNotReady()
@property
def channel(self) -> tuple:
"""
Get current input channel in a form
of a tuple (Channel, Gain)
"""
if self._channel == self.CHANNEL_A_128:
return 'A', 128
if self._channel == self.CHANNEL_A_64:
return 'A', 64
if self._channel == self.CHANNEL_B_32:
return 'B', 32
@channel.setter
def channel(self, value):
"""
Set input channel
HX711.CHANNEL_A_128 - Channel A with gain 128
HX711.CHANNEL_A_64 - Channel A with gain 64
HX711.CHANNEL_B_32 - Channel B with gain 32
"""
if value not in (self.CHANNEL_A_128, self.CHANNEL_A_64, self.CHANNEL_B_32):
raise InvalidMode('Gain should be one of HX711.CHANNEL_A_128, HX711.CHANNEL_A_64, HX711.CHANNEL_B_32')
else:
self._channel = value
if not self.is_ready():
self._wait()
for i in range(self.DATA_BITS):
self.pd_sck_pin.value(1)
self.pd_sck_pin.value(0)
self._set_channel()
def is_ready(self) -> bool:
"""
When output data is not ready for retrieval,
digital output pin DOUT is high.
"""
return self.d_out_pin.value() == 0
def power_off(self):
"""
When PD_SCK pin changes from low to high
and stays at high for longer than 60 us ,
HX711 enters power down mode.
"""
self.pd_sck_pin.value(0)
self.pd_sck_pin.value(1)
sleep_us(self.SLEEP_DELAY_USEC)
def power_on(self):
"""
When PD_SCK returns to low, HX711 will reset
and enter normal operation mode.
"""
self.pd_sck_pin.value(0)
self.channel = self._channel
def read(self, raw=False):
"""
Read current value for current channel with current gain.
if raw is True, the HX711 output will not be converted
from two's complement format.
"""
if not self.is_ready():
self._wait()
raw_data = 0
for i in range(self.DATA_BITS):
self.pd_sck_pin.value(1)
self.pd_sck_pin.value(0)
raw_data = raw_data << 1 | self.d_out_pin.value()
self._set_channel()
if raw:
return raw_data
else:
return self._convert_from_twos_complement(raw_data)
| 28.337748 | 114 | 0.592896 |
acf048cc9a631664c5950ed92dbe8bc279b0ebc4 | 1,866 | py | Python | trapper/metrics/input_handlers/token_classification_input_handler.py | obss/trapper | 40e6fc25a2d8c1ece8bf006c362a9cb163c4355c | [
"MIT"
] | 36 | 2021-11-01T19:29:31.000Z | 2022-02-25T15:19:08.000Z | trapper/metrics/input_handlers/token_classification_input_handler.py | obss/trapper | 40e6fc25a2d8c1ece8bf006c362a9cb163c4355c | [
"MIT"
] | 7 | 2021-11-01T14:33:21.000Z | 2022-03-22T09:01:36.000Z | trapper/metrics/input_handlers/token_classification_input_handler.py | obss/trapper | 40e6fc25a2d8c1ece8bf006c362a9cb163c4355c | [
"MIT"
] | 4 | 2021-11-30T00:34:20.000Z | 2022-03-31T21:06:30.000Z | import numpy as np
from transformers import EvalPrediction
from trapper.common.constants import IGNORED_LABEL_ID
from trapper.data import LabelMapper
from trapper.metrics.input_handlers import MetricInputHandler
@MetricInputHandler.register("token-classification")
class MetricInputHandlerForTokenClassification(MetricInputHandler):
"""
`MetricInputHandlerForTokenClassification` provides the conversion of predictions
and labels from ids to labels by using a `LabelMapper`.
Args:
label_mapper (): Required to convert ids to matching labels.
"""
def __init__(
self,
label_mapper: LabelMapper,
):
super(MetricInputHandlerForTokenClassification, self).__init__()
self._label_mapper = label_mapper
@property
def label_mapper(self):
return self._label_mapper
def _id_to_label(self, id_: int) -> str:
return self.label_mapper.get_label(id_)
def __call__(self, eval_pred: EvalPrediction) -> EvalPrediction:
predictions, references = eval_pred.predictions, eval_pred.label_ids
all_predicted_ids = np.argmax(predictions, axis=2)
all_label_ids = references
actual_predictions = []
actual_labels = []
for predicted_ids, label_ids in zip(all_predicted_ids, all_label_ids):
actual_prediction = []
actual_label = []
for (p, l) in zip(predicted_ids, label_ids):
if l != IGNORED_LABEL_ID:
actual_prediction.append(self._id_to_label(p))
actual_label.append(self._id_to_label(l))
actual_predictions.append(actual_prediction)
actual_labels.append(actual_label)
processed_eval_pred = EvalPrediction(
predictions=predictions, label_ids=references
)
return processed_eval_pred
| 34.555556 | 85 | 0.69239 |
acf04a09948bf0b32b7ecf747336e1af918bf6fb | 9,499 | py | Python | scripts/data_loader/data_preprocessor.py | er1ca/Gesture-Generation-from-Trimodal-Context | 6e0d9bb5ce29c49ad398fa80c6a4e00c3076d3e3 | [
"MIT"
] | 126 | 2020-09-07T05:54:30.000Z | 2022-03-31T04:53:52.000Z | scripts/data_loader/data_preprocessor.py | MaxCodeXTC/Gesture-Generation-from-Trimodal-Context | 6d988a7211a4d8294e1ef4b45c45ee25d12455d2 | [
"MIT"
] | 31 | 2020-09-09T10:23:36.000Z | 2022-03-13T07:03:32.000Z | scripts/data_loader/data_preprocessor.py | MaxCodeXTC/Gesture-Generation-from-Trimodal-Context | 6d988a7211a4d8294e1ef4b45c45ee25d12455d2 | [
"MIT"
] | 22 | 2020-09-07T08:19:55.000Z | 2022-02-15T13:26:15.000Z | """ create data samples """
import logging
from collections import defaultdict
import lmdb
import math
import numpy as np
import pyarrow
import tqdm
from sklearn.preprocessing import normalize
import utils.data_utils
from data_loader.motion_preprocessor import MotionPreprocessor
class DataPreprocessor:
def __init__(self, clip_lmdb_dir, out_lmdb_dir, n_poses, subdivision_stride,
pose_resampling_fps, mean_pose, mean_dir_vec, disable_filtering=False):
self.n_poses = n_poses
self.subdivision_stride = subdivision_stride
self.skeleton_resampling_fps = pose_resampling_fps
self.mean_pose = mean_pose
self.mean_dir_vec = mean_dir_vec
self.disable_filtering = disable_filtering
self.src_lmdb_env = lmdb.open(clip_lmdb_dir, readonly=True, lock=False)
with self.src_lmdb_env.begin() as txn:
self.n_videos = txn.stat()['entries']
self.spectrogram_sample_length = utils.data_utils.calc_spectrogram_length_from_motion_length(self.n_poses, self.skeleton_resampling_fps)
self.audio_sample_length = int(self.n_poses / self.skeleton_resampling_fps * 16000)
# create db for samples
map_size = 1024 * 50 # in MB
map_size <<= 20 # in B
self.dst_lmdb_env = lmdb.open(out_lmdb_dir, map_size=map_size)
self.n_out_samples = 0
def run(self):
n_filtered_out = defaultdict(int)
src_txn = self.src_lmdb_env.begin(write=False)
# sampling and normalization
cursor = src_txn.cursor()
for key, value in cursor:
video = pyarrow.deserialize(value)
vid = video['vid']
clips = video['clips']
for clip_idx, clip in enumerate(clips):
filtered_result = self._sample_from_clip(vid, clip)
for type in filtered_result.keys():
n_filtered_out[type] += filtered_result[type]
# print stats
with self.dst_lmdb_env.begin() as txn:
print('no. of samples: ', txn.stat()['entries'])
n_total_filtered = 0
for type, n_filtered in n_filtered_out.items():
print('{}: {}'.format(type, n_filtered))
n_total_filtered += n_filtered
print('no. of excluded samples: {} ({:.1f}%)'.format(
n_total_filtered, 100 * n_total_filtered / (txn.stat()['entries'] + n_total_filtered)))
# close db
self.src_lmdb_env.close()
self.dst_lmdb_env.sync()
self.dst_lmdb_env.close()
def _sample_from_clip(self, vid, clip):
clip_skeleton = clip['skeletons_3d']
clip_audio = clip['audio_feat']
clip_audio_raw = clip['audio_raw']
clip_word_list = clip['words']
clip_s_f, clip_e_f = clip['start_frame_no'], clip['end_frame_no']
clip_s_t, clip_e_t = clip['start_time'], clip['end_time']
n_filtered_out = defaultdict(int)
# skeleton resampling
clip_skeleton = utils.data_utils.resample_pose_seq(clip_skeleton, clip_e_t - clip_s_t, self.skeleton_resampling_fps)
# divide
aux_info = []
sample_skeletons_list = []
sample_words_list = []
sample_audio_list = []
sample_spectrogram_list = []
num_subdivision = math.floor(
(len(clip_skeleton) - self.n_poses)
/ self.subdivision_stride) + 1 # floor((K - (N+M)) / S) + 1
expected_audio_length = utils.data_utils.calc_spectrogram_length_from_motion_length(len(clip_skeleton), self.skeleton_resampling_fps)
assert abs(expected_audio_length - clip_audio.shape[1]) <= 5, 'audio and skeleton lengths are different'
for i in range(num_subdivision):
start_idx = i * self.subdivision_stride
fin_idx = start_idx + self.n_poses
sample_skeletons = clip_skeleton[start_idx:fin_idx]
subdivision_start_time = clip_s_t + start_idx / self.skeleton_resampling_fps
subdivision_end_time = clip_s_t + fin_idx / self.skeleton_resampling_fps
sample_words = self.get_words_in_time_range(word_list=clip_word_list,
start_time=subdivision_start_time,
end_time=subdivision_end_time)
# spectrogram
audio_start = math.floor(start_idx / len(clip_skeleton) * clip_audio.shape[1])
audio_end = audio_start + self.spectrogram_sample_length
if audio_end > clip_audio.shape[1]: # correct size mismatch between poses and audio
# logging.info('expanding audio array, audio start={}, end={}, clip_length={}'.format(
# audio_start, audio_end, clip_audio.shape[1]))
n_padding = audio_end - clip_audio.shape[1]
padded_data = np.pad(clip_audio, ((0, 0), (0, n_padding)), mode='symmetric')
sample_spectrogram = padded_data[:, audio_start:audio_end]
else:
sample_spectrogram = clip_audio[:, audio_start:audio_end]
# raw audio
audio_start = math.floor(start_idx / len(clip_skeleton) * len(clip_audio_raw))
audio_end = audio_start + self.audio_sample_length
if audio_end > len(clip_audio_raw): # correct size mismatch between poses and audio
# logging.info('expanding audio array, audio start={}, end={}, clip_length={}'.format(
# audio_start, audio_end, len(clip_audio_raw)))
n_padding = audio_end - len(clip_audio_raw)
padded_data = np.pad(clip_audio_raw, (0, n_padding), mode='symmetric')
sample_audio = padded_data[audio_start:audio_end]
else:
sample_audio = clip_audio_raw[audio_start:audio_end]
if len(sample_words) >= 2:
# filtering motion skeleton data
sample_skeletons, filtering_message = MotionPreprocessor(sample_skeletons, self.mean_pose).get()
is_correct_motion = (sample_skeletons != [])
motion_info = {'vid': vid,
'start_frame_no': clip_s_f + start_idx,
'end_frame_no': clip_s_f + fin_idx,
'start_time': subdivision_start_time,
'end_time': subdivision_end_time,
'is_correct_motion': is_correct_motion, 'filtering_message': filtering_message}
if is_correct_motion or self.disable_filtering:
sample_skeletons_list.append(sample_skeletons)
sample_words_list.append(sample_words)
sample_audio_list.append(sample_audio)
sample_spectrogram_list.append(sample_spectrogram)
aux_info.append(motion_info)
else:
n_filtered_out[filtering_message] += 1
if len(sample_skeletons_list) > 0:
with self.dst_lmdb_env.begin(write=True) as txn:
for words, poses, audio, spectrogram, aux in zip(sample_words_list, sample_skeletons_list,
sample_audio_list, sample_spectrogram_list,
aux_info):
# preprocessing for poses
poses = np.asarray(poses)
dir_vec = utils.data_utils.convert_pose_seq_to_dir_vec(poses)
normalized_dir_vec = self.normalize_dir_vec(dir_vec, self.mean_dir_vec)
# save
k = '{:010}'.format(self.n_out_samples).encode('ascii')
v = [words, poses, normalized_dir_vec, audio, spectrogram, aux]
v = pyarrow.serialize(v).to_buffer()
txn.put(k, v)
self.n_out_samples += 1
return n_filtered_out
@staticmethod
def normalize_dir_vec(dir_vec, mean_dir_vec):
return dir_vec - mean_dir_vec
@staticmethod
def get_words_in_time_range(word_list, start_time, end_time):
words = []
for word in word_list:
_, word_s, word_e = word[0], word[1], word[2]
if word_s >= end_time:
break
if word_e <= start_time:
continue
words.append(word)
return words
@staticmethod
def unnormalize_data(normalized_data, data_mean, data_std, dimensions_to_ignore):
"""
this method is from https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/generateMotionData.py#L12
"""
T = normalized_data.shape[0]
D = data_mean.shape[0]
origData = np.zeros((T, D), dtype=np.float32)
dimensions_to_use = []
for i in range(D):
if i in dimensions_to_ignore:
continue
dimensions_to_use.append(i)
dimensions_to_use = np.array(dimensions_to_use)
origData[:, dimensions_to_use] = normalized_data
# potentially inefficient, but only done once per experiment
stdMat = data_std.reshape((1, D))
stdMat = np.repeat(stdMat, T, axis=0)
meanMat = data_mean.reshape((1, D))
meanMat = np.repeat(meanMat, T, axis=0)
origData = np.multiply(origData, stdMat) + meanMat
return origData
| 43.976852 | 144 | 0.607327 |
acf04af2efe30e768b9f8c138c900f44bbacc26a | 347 | py | Python | beetsplug/printimport.py | Schweinepriester/beets-printimport | 3501b6bbf307e8ea6a65e4b545063434540f4e75 | [
"MIT"
] | null | null | null | beetsplug/printimport.py | Schweinepriester/beets-printimport | 3501b6bbf307e8ea6a65e4b545063434540f4e75 | [
"MIT"
] | null | null | null | beetsplug/printimport.py | Schweinepriester/beets-printimport | 3501b6bbf307e8ea6a65e4b545063434540f4e75 | [
"MIT"
] | null | null | null | from beets.plugins import BeetsPlugin
class PrintImport(BeetsPlugin):
def __init__(self):
super(PrintImport, self).__init__()
self.register_listener('album_imported', self.printimport)
def printimport(library, album):
print "Musik: {albumartist} - {album} ({genre} - {year})".format(**album) # @ {bitrate} kBit/s | 38.555556 | 103 | 0.682997 |
acf04b786b738ce66153b24ab2e57eb0186292b5 | 29,192 | py | Python | sdks/python/apache_beam/runners/portability/fn_api_runner/execution.py | ibzib/beam | f98104a22b69972744a13378e17af5f2361fbb3e | [
"Apache-2.0"
] | 1 | 2020-06-10T10:38:02.000Z | 2020-06-10T10:38:02.000Z | sdks/python/apache_beam/runners/portability/fn_api_runner/execution.py | ibzib/beam | f98104a22b69972744a13378e17af5f2361fbb3e | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/runners/portability/fn_api_runner/execution.py | ibzib/beam | f98104a22b69972744a13378e17af5f2361fbb3e | [
"Apache-2.0"
] | 1 | 2020-06-25T23:45:11.000Z | 2020-06-25T23:45:11.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Set of utilities for execution of a pipeline by the FnApiRunner."""
from __future__ import absolute_import
import collections
import copy
import itertools
from typing import TYPE_CHECKING
from typing import Any
from typing import DefaultDict
from typing import Dict
from typing import Iterator
from typing import List
from typing import MutableMapping
from typing import Optional
from typing import Tuple
from typing_extensions import Protocol
from apache_beam import coders
from apache_beam.coders import BytesCoder
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.coders.coders import GlobalWindowCoder
from apache_beam.coders.coders import WindowedValueCoder
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners.portability.fn_api_runner import translations
from apache_beam.runners.portability.fn_api_runner.translations import create_buffer_id
from apache_beam.runners.portability.fn_api_runner.translations import only_element
from apache_beam.runners.portability.fn_api_runner.translations import split_buffer_id
from apache_beam.runners.portability.fn_api_runner.translations import unique_name
from apache_beam.runners.worker import bundle_processor
from apache_beam.transforms import trigger
from apache_beam.transforms import window
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import proto_utils
from apache_beam.utils import windowed_value
if TYPE_CHECKING:
from apache_beam.coders.coder_impl import CoderImpl
from apache_beam.runners.portability.fn_api_runner import worker_handlers
from apache_beam.runners.portability.fn_api_runner.translations import DataSideInput
from apache_beam.transforms.window import BoundedWindow
ENCODED_IMPULSE_VALUE = WindowedValueCoder(
BytesCoder(), GlobalWindowCoder()).get_impl().encode_nested(
GlobalWindows.windowed_value(b''))
SAFE_WINDOW_FNS = set(window.WindowFn._known_urns.keys()) - set(
[python_urns.PICKLED_WINDOWFN])
class Buffer(Protocol):
def __iter__(self):
# type: () -> Iterator[bytes]
pass
def append(self, item):
# type: (bytes) -> None
pass
class PartitionableBuffer(Buffer, Protocol):
def partition(self, n):
# type: (int) -> List[List[bytes]]
pass
class ListBuffer(object):
"""Used to support parititioning of a list."""
def __init__(self, coder_impl):
self._coder_impl = coder_impl
self._inputs = [] # type: List[bytes]
self._grouped_output = None
self.cleared = False
def append(self, element):
# type: (bytes) -> None
if self.cleared:
raise RuntimeError('Trying to append to a cleared ListBuffer.')
if self._grouped_output:
raise RuntimeError('ListBuffer append after read.')
self._inputs.append(element)
def partition(self, n):
# type: (int) -> List[List[bytes]]
if self.cleared:
raise RuntimeError('Trying to partition a cleared ListBuffer.')
if len(self._inputs) >= n or len(self._inputs) == 0:
return [self._inputs[k::n] for k in range(n)]
else:
if not self._grouped_output:
output_stream_list = [create_OutputStream() for _ in range(n)]
idx = 0
for input in self._inputs:
input_stream = create_InputStream(input)
while input_stream.size() > 0:
decoded_value = self._coder_impl.decode_from_stream(
input_stream, True)
self._coder_impl.encode_to_stream(
decoded_value, output_stream_list[idx], True)
idx = (idx + 1) % n
self._grouped_output = [[output_stream.get()]
for output_stream in output_stream_list]
return self._grouped_output
def __iter__(self):
# type: () -> Iterator[bytes]
if self.cleared:
raise RuntimeError('Trying to iterate through a cleared ListBuffer.')
return iter(self._inputs)
def clear(self):
# type: () -> None
self.cleared = True
self._inputs = []
self._grouped_output = None
def reset(self):
"""Resets a cleared buffer for reuse."""
if not self.cleared:
raise RuntimeError('Trying to reset a non-cleared ListBuffer.')
self.cleared = False
class GroupingBuffer(object):
"""Used to accumulate groupded (shuffled) results."""
def __init__(self,
pre_grouped_coder, # type: coders.Coder
post_grouped_coder, # type: coders.Coder
windowing
):
# type: (...) -> None
self._key_coder = pre_grouped_coder.key_coder()
self._pre_grouped_coder = pre_grouped_coder
self._post_grouped_coder = post_grouped_coder
self._table = collections.defaultdict(
list) # type: DefaultDict[bytes, List[Any]]
self._windowing = windowing
self._grouped_output = None # type: Optional[List[List[bytes]]]
def append(self, elements_data):
# type: (bytes) -> None
if self._grouped_output:
raise RuntimeError('Grouping table append after read.')
input_stream = create_InputStream(elements_data)
coder_impl = self._pre_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
# TODO(robertwb): We could optimize this even more by using a
# window-dropping coder for the data plane.
is_trivial_windowing = self._windowing.is_default()
while input_stream.size() > 0:
windowed_key_value = coder_impl.decode_from_stream(input_stream, True)
key, value = windowed_key_value.value
self._table[key_coder_impl.encode(key)].append(
value if is_trivial_windowing else windowed_key_value.
with_value(value))
def partition(self, n):
# type: (int) -> List[List[bytes]]
""" It is used to partition _GroupingBuffer to N parts. Once it is
partitioned, it would not be re-partitioned with diff N. Re-partition
is not supported now.
"""
if not self._grouped_output:
if self._windowing.is_default():
globally_window = GlobalWindows.windowed_value(
None,
timestamp=GlobalWindow().max_timestamp(),
pane_info=windowed_value.PaneInfo(
is_first=True,
is_last=True,
timing=windowed_value.PaneInfoTiming.ON_TIME,
index=0,
nonspeculative_index=0)).with_value
windowed_key_values = lambda key, values: [
globally_window((key, values))]
else:
# TODO(pabloem, BEAM-7514): Trigger driver needs access to the clock
# note that this only comes through if windowing is default - but what
# about having multiple firings on the global window.
# May need to revise.
trigger_driver = trigger.create_trigger_driver(self._windowing, True)
windowed_key_values = trigger_driver.process_entire_key
coder_impl = self._post_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
self._grouped_output = [[] for _ in range(n)]
output_stream_list = [create_OutputStream() for _ in range(n)]
for idx, (encoded_key, windowed_values) in enumerate(self._table.items()):
key = key_coder_impl.decode(encoded_key)
for wkvs in windowed_key_values(key, windowed_values):
coder_impl.encode_to_stream(wkvs, output_stream_list[idx % n], True)
for ix, output_stream in enumerate(output_stream_list):
self._grouped_output[ix] = [output_stream.get()]
self._table.clear()
return self._grouped_output
def __iter__(self):
# type: () -> Iterator[bytes]
""" Since partition() returns a list of lists, add this __iter__ to return
a list to simplify code when we need to iterate through ALL elements of
_GroupingBuffer.
"""
return itertools.chain(*self.partition(1))
class WindowGroupingBuffer(object):
"""Used to partition windowed side inputs."""
def __init__(
self,
access_pattern,
coder # type: WindowedValueCoder
):
# type: (...) -> None
# Here's where we would use a different type of partitioning
# (e.g. also by key) for a different access pattern.
if access_pattern.urn == common_urns.side_inputs.ITERABLE.urn:
self._kv_extractor = lambda value: ('', value)
self._key_coder = coders.SingletonCoder('') # type: coders.Coder
self._value_coder = coder.wrapped_value_coder
elif access_pattern.urn == common_urns.side_inputs.MULTIMAP.urn:
self._kv_extractor = lambda value: value
self._key_coder = coder.wrapped_value_coder.key_coder()
self._value_coder = (coder.wrapped_value_coder.value_coder())
else:
raise ValueError("Unknown access pattern: '%s'" % access_pattern.urn)
self._windowed_value_coder = coder
self._window_coder = coder.window_coder
self._values_by_window = collections.defaultdict(
list) # type: DefaultDict[Tuple[str, BoundedWindow], List[Any]]
def append(self, elements_data):
# type: (bytes) -> None
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_val_coder_impl = self._windowed_value_coder.get_impl(
) # type: WindowedValueCoderImpl
windowed_value = windowed_val_coder_impl.decode_from_stream(
input_stream, True)
key, value = self._kv_extractor(windowed_value.value)
for window in windowed_value.windows:
self._values_by_window[key, window].append(value)
def encoded_items(self):
# type: () -> Iterator[Tuple[bytes, bytes, bytes]]
value_coder_impl = self._value_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for (key, window), values in self._values_by_window.items():
encoded_window = self._window_coder.encode(window)
encoded_key = key_coder_impl.encode_nested(key)
output_stream = create_OutputStream()
for value in values:
value_coder_impl.encode_to_stream(value, output_stream, True)
yield encoded_key, encoded_window, output_stream.get()
class GenericNonMergingWindowFn(window.NonMergingWindowFn):
URN = 'internal-generic-non-merging'
def __init__(self, coder):
self._coder = coder
def assign(self, assign_context):
raise NotImplementedError()
def get_window_coder(self):
return self._coder
@staticmethod
@window.urns.RunnerApiFn.register_urn(URN, bytes)
def from_runner_api_parameter(window_coder_id, context):
return GenericNonMergingWindowFn(
context.coders[window_coder_id.decode('utf-8')])
class FnApiRunnerExecutionContext(object):
"""
:var pcoll_buffers: (dict): Mapping of
PCollection IDs to list that functions as buffer for the
``beam.PCollection``.
"""
def __init__(self,
stages, # type: List[translations.Stage]
worker_handler_manager, # type: worker_handlers.WorkerHandlerManager
pipeline_components, # type: beam_runner_api_pb2.Components
safe_coders,
data_channel_coders,
):
"""
:param worker_handler_manager: This class manages the set of worker
handlers, and the communication with state / control APIs.
:param pipeline_components: (beam_runner_api_pb2.Components): TODO
:param safe_coders:
:param data_channel_coders:
"""
self.stages = stages
self.side_input_descriptors_by_stage = (
self._build_data_side_inputs_map(stages))
self.pcoll_buffers = {} # type: MutableMapping[bytes, PartitionableBuffer]
self.timer_buffers = {} # type: MutableMapping[bytes, ListBuffer]
self.worker_handler_manager = worker_handler_manager
self.pipeline_components = pipeline_components
self.safe_coders = safe_coders
self.data_channel_coders = data_channel_coders
self.pipeline_context = pipeline_context.PipelineContext(
self.pipeline_components,
iterable_state_write=self._iterable_state_write)
self._last_uid = -1
self.safe_windowing_strategies = {
id: self._make_safe_windowing_strategy(id)
for id in self.pipeline_components.windowing_strategies.keys()
}
@staticmethod
def _build_data_side_inputs_map(stages):
# type: (Iterable[translations.Stage]) -> MutableMapping[str, DataSideInput]
"""Builds an index mapping stages to side input descriptors.
A side input descriptor is a map of side input IDs to side input access
patterns for all of the outputs of a stage that will be consumed as a
side input.
"""
transform_consumers = collections.defaultdict(
list) # type: DefaultDict[str, List[beam_runner_api_pb2.PTransform]]
stage_consumers = collections.defaultdict(
list) # type: DefaultDict[str, List[translations.Stage]]
def get_all_side_inputs():
# type: () -> Set[str]
all_side_inputs = set() # type: Set[str]
for stage in stages:
for transform in stage.transforms:
for input in transform.inputs.values():
transform_consumers[input].append(transform)
stage_consumers[input].append(stage)
for si in stage.side_inputs():
all_side_inputs.add(si)
return all_side_inputs
all_side_inputs = frozenset(get_all_side_inputs())
data_side_inputs_by_producing_stage = {}
producing_stages_by_pcoll = {}
for s in stages:
data_side_inputs_by_producing_stage[s.name] = {}
for transform in s.transforms:
for o in transform.outputs.values():
if o in s.side_inputs():
continue
producing_stages_by_pcoll[o] = s
for side_pc in all_side_inputs:
for consuming_transform in transform_consumers[side_pc]:
if consuming_transform.spec.urn not in translations.PAR_DO_URNS:
continue
producing_stage = producing_stages_by_pcoll[side_pc]
payload = proto_utils.parse_Bytes(
consuming_transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for si_tag in payload.side_inputs:
if consuming_transform.inputs[si_tag] == side_pc:
side_input_id = (consuming_transform.unique_name, si_tag)
data_side_inputs_by_producing_stage[
producing_stage.name][side_input_id] = (
translations.create_buffer_id(side_pc),
payload.side_inputs[si_tag].access_pattern)
return data_side_inputs_by_producing_stage
def _make_safe_windowing_strategy(self, id):
windowing_strategy_proto = self.pipeline_components.windowing_strategies[id]
if windowing_strategy_proto.window_fn.urn in SAFE_WINDOW_FNS:
return id
elif (windowing_strategy_proto.merge_status ==
beam_runner_api_pb2.MergeStatus.NON_MERGING) or True:
safe_id = id + '_safe'
while safe_id in self.pipeline_components.windowing_strategies:
safe_id += '_'
safe_proto = copy.copy(windowing_strategy_proto)
safe_proto.window_fn.urn = GenericNonMergingWindowFn.URN
safe_proto.window_fn.payload = (
windowing_strategy_proto.window_coder_id.encode('utf-8'))
self.pipeline_context.windowing_strategies.put_proto(safe_id, safe_proto)
return safe_id
elif windowing_strategy_proto.window_fn.urn == python_urns.PICKLED_WINDOWFN:
return id
else:
raise NotImplementedError(
'[BEAM-10119] Unknown merging WindowFn: %s' %
windowing_strategy_proto)
@property
def state_servicer(self):
# TODO(BEAM-9625): Ensure FnApiRunnerExecutionContext owns StateServicer
return self.worker_handler_manager.state_servicer
def next_uid(self):
self._last_uid += 1
return str(self._last_uid)
def _iterable_state_write(self, values, element_coder_impl):
# type: (...) -> bytes
token = unique_name(None, 'iter').encode('ascii')
out = create_OutputStream()
for element in values:
element_coder_impl.encode_to_stream(element, out, True)
self.worker_handler_manager.state_servicer.append_raw(
beam_fn_api_pb2.StateKey(
runner=beam_fn_api_pb2.StateKey.Runner(key=token)),
out.get())
return token
def commit_side_inputs_to_state(
self,
data_side_input, # type: DataSideInput
):
# type: (...) -> None
for (consuming_transform_id, tag), (buffer_id,
func_spec) in data_side_input.items():
_, pcoll_id = split_buffer_id(buffer_id)
value_coder = self.pipeline_context.coders[self.safe_coders[
self.data_channel_coders[pcoll_id]]]
elements_by_window = WindowGroupingBuffer(func_spec, value_coder)
if buffer_id not in self.pcoll_buffers:
self.pcoll_buffers[buffer_id] = ListBuffer(
coder_impl=value_coder.get_impl())
for element_data in self.pcoll_buffers[buffer_id]:
elements_by_window.append(element_data)
if func_spec.urn == common_urns.side_inputs.ITERABLE.urn:
for _, window, elements_data in elements_by_window.encoded_items():
state_key = beam_fn_api_pb2.StateKey(
iterable_side_input=beam_fn_api_pb2.StateKey.IterableSideInput(
transform_id=consuming_transform_id,
side_input_id=tag,
window=window))
self.state_servicer.append_raw(state_key, elements_data)
elif func_spec.urn == common_urns.side_inputs.MULTIMAP.urn:
for key, window, elements_data in elements_by_window.encoded_items():
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
transform_id=consuming_transform_id,
side_input_id=tag,
window=window,
key=key))
self.state_servicer.append_raw(state_key, elements_data)
else:
raise ValueError("Unknown access pattern: '%s'" % func_spec.urn)
class BundleContextManager(object):
def __init__(self,
execution_context, # type: FnApiRunnerExecutionContext
stage, # type: translations.Stage
num_workers, # type: int
):
self.execution_context = execution_context
self.stage = stage
self.bundle_uid = self.execution_context.next_uid()
self.num_workers = num_workers
# Properties that are lazily initialized
self._process_bundle_descriptor = None
self._worker_handlers = None
# a mapping of {(transform_id, timer_family_id): timer_coder_id}. The map
# is built after self._process_bundle_descriptor is initialized.
# This field can be used to tell whether current bundle has timers.
self._timer_coder_ids = None
@property
def worker_handlers(self):
if self._worker_handlers is None:
self._worker_handlers = (
self.execution_context.worker_handler_manager.get_worker_handlers(
self.stage.environment, self.num_workers))
return self._worker_handlers
def data_api_service_descriptor(self):
# All worker_handlers share the same grpc server, so we can read grpc server
# info from any worker_handler and read from the first worker_handler.
return self.worker_handlers[0].data_api_service_descriptor()
def state_api_service_descriptor(self):
# All worker_handlers share the same grpc server, so we can read grpc server
# info from any worker_handler and read from the first worker_handler.
return self.worker_handlers[0].state_api_service_descriptor()
@property
def process_bundle_descriptor(self):
if self._process_bundle_descriptor is None:
self._process_bundle_descriptor = self._build_process_bundle_descriptor()
self._timer_coder_ids = self._build_timer_coders_id_map()
return self._process_bundle_descriptor
def _build_process_bundle_descriptor(self):
# Cannot be invoked until *after* _extract_endpoints is called.
# Always populate the timer_api_service_descriptor.
return beam_fn_api_pb2.ProcessBundleDescriptor(
id=self.bundle_uid,
transforms={
transform.unique_name: transform
for transform in self.stage.transforms
},
pcollections=dict(
self.execution_context.pipeline_components.pcollections.items()),
coders=dict(self.execution_context.pipeline_components.coders.items()),
windowing_strategies=dict(
self.execution_context.pipeline_components.windowing_strategies.
items()),
environments=dict(
self.execution_context.pipeline_components.environments.items()),
state_api_service_descriptor=self.state_api_service_descriptor(),
timer_api_service_descriptor=self.data_api_service_descriptor())
def extract_bundle_inputs_and_outputs(self):
# type: (...) -> Tuple[Dict[str, PartitionableBuffer], DataOutput, Dict[Tuple[str, str], str]]
"""Returns maps of transform names to PCollection identifiers.
Also mutates IO stages to point to the data ApiServiceDescriptor.
Returns:
A tuple of (data_input, data_output, expected_timer_output) dictionaries.
`data_input` is a dictionary mapping (transform_name, output_name) to a
PCollection buffer; `data_output` is a dictionary mapping
(transform_name, output_name) to a PCollection ID.
`expected_timer_output` is a dictionary mapping transform_id and
timer family ID to a buffer id for timers.
"""
data_input = {} # type: Dict[str, PartitionableBuffer]
data_output = {} # type: DataOutput
# A mapping of {(transform_id, timer_family_id) : buffer_id}
expected_timer_output = {} # type: Dict[Tuple[str, str], str]
for transform in self.stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
coder_id = self.execution_context.data_channel_coders[only_element(
transform.outputs.values())]
coder = self.execution_context.pipeline_context.coders[
self.execution_context.safe_coders.get(coder_id, coder_id)]
if pcoll_id == translations.IMPULSE_BUFFER:
data_input[transform.unique_name] = ListBuffer(
coder_impl=coder.get_impl())
data_input[transform.unique_name].append(ENCODED_IMPULSE_VALUE)
else:
if pcoll_id not in self.execution_context.pcoll_buffers:
self.execution_context.pcoll_buffers[pcoll_id] = ListBuffer(
coder_impl=coder.get_impl())
data_input[transform.unique_name] = (
self.execution_context.pcoll_buffers[pcoll_id])
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
data_output[transform.unique_name] = pcoll_id
coder_id = self.execution_context.data_channel_coders[only_element(
transform.inputs.values())]
else:
raise NotImplementedError
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
data_api_service_descriptor = self.data_api_service_descriptor()
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (
data_api_service_descriptor.url)
transform.spec.payload = data_spec.SerializeToString()
elif transform.spec.urn in translations.PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for timer_family_id in payload.timer_family_specs.keys():
expected_timer_output[(transform.unique_name, timer_family_id)] = (
create_buffer_id(timer_family_id, 'timers'))
return data_input, data_output, expected_timer_output
def get_input_coder_impl(self, transform_id):
# type: (str) -> CoderImpl
coder_id = beam_fn_api_pb2.RemoteGrpcPort.FromString(
self.process_bundle_descriptor.transforms[transform_id].spec.payload
).coder_id
assert coder_id
return self.get_coder_impl(coder_id)
def _build_timer_coders_id_map(self):
timer_coder_ids = {}
for transform_id, transform_proto in (self._process_bundle_descriptor
.transforms.items()):
if transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn:
pardo_payload = proto_utils.parse_Bytes(
transform_proto.spec.payload, beam_runner_api_pb2.ParDoPayload)
for id, timer_family_spec in pardo_payload.timer_family_specs.items():
timer_coder_ids[(transform_id, id)] = (
timer_family_spec.timer_family_coder_id)
return timer_coder_ids
def get_coder_impl(self, coder_id):
if coder_id in self.execution_context.safe_coders:
return self.execution_context.pipeline_context.coders[
self.execution_context.safe_coders[coder_id]].get_impl()
else:
return self.execution_context.pipeline_context.coders[coder_id].get_impl()
def get_timer_coder_impl(self, transform_id, timer_family_id):
return self.get_coder_impl(
self._timer_coder_ids[(transform_id, timer_family_id)])
def get_buffer(self, buffer_id, transform_id):
# type: (bytes, str) -> PartitionableBuffer
"""Returns the buffer for a given (operation_type, PCollection ID).
For grouping-typed operations, we produce a ``GroupingBuffer``. For
others, we produce a ``ListBuffer``.
"""
kind, name = split_buffer_id(buffer_id)
if kind == 'materialize':
if buffer_id not in self.execution_context.pcoll_buffers:
self.execution_context.pcoll_buffers[buffer_id] = ListBuffer(
coder_impl=self.get_input_coder_impl(transform_id))
return self.execution_context.pcoll_buffers[buffer_id]
# For timer buffer, name = timer_family_id
elif kind == 'timers':
if buffer_id not in self.execution_context.timer_buffers:
timer_coder_impl = self.get_timer_coder_impl(transform_id, name)
self.execution_context.timer_buffers[buffer_id] = ListBuffer(
timer_coder_impl)
return self.execution_context.timer_buffers[buffer_id]
elif kind == 'group':
# This is a grouping write, create a grouping buffer if needed.
if buffer_id not in self.execution_context.pcoll_buffers:
original_gbk_transform = name
transform_proto = self.execution_context.pipeline_components.transforms[
original_gbk_transform]
input_pcoll = only_element(list(transform_proto.inputs.values()))
output_pcoll = only_element(list(transform_proto.outputs.values()))
pre_gbk_coder = self.execution_context.pipeline_context.coders[
self.execution_context.safe_coders[
self.execution_context.data_channel_coders[input_pcoll]]]
post_gbk_coder = self.execution_context.pipeline_context.coders[
self.execution_context.safe_coders[
self.execution_context.data_channel_coders[output_pcoll]]]
windowing_strategy = (
self.execution_context.pipeline_context.windowing_strategies[
self.execution_context.safe_windowing_strategies[
self.execution_context.pipeline_components.
pcollections[output_pcoll].windowing_strategy_id]])
self.execution_context.pcoll_buffers[buffer_id] = GroupingBuffer(
pre_gbk_coder, post_gbk_coder, windowing_strategy)
else:
# These should be the only two identifiers we produce for now,
# but special side input writes may go here.
raise NotImplementedError(buffer_id)
return self.execution_context.pcoll_buffers[buffer_id]
def input_for(self, transform_id, input_id):
# type: (str, str) -> str
input_pcoll = self.process_bundle_descriptor.transforms[
transform_id].inputs[input_id]
for read_id, proto in self.process_bundle_descriptor.transforms.items():
if (proto.spec.urn == bundle_processor.DATA_INPUT_URN and
input_pcoll in proto.outputs.values()):
return read_id
raise RuntimeError('No IO transform feeds %s' % transform_id)
| 42.430233 | 98 | 0.713552 |
acf04b819f4c54a459fedb669ef5f133c6bc69d8 | 172 | py | Python | app/ext/cli/__init__.py | castilhoin/keep-it | 5971dc6f948a79530adc94306b3e82fef6518385 | [
"MIT"
] | null | null | null | app/ext/cli/__init__.py | castilhoin/keep-it | 5971dc6f948a79530adc94306b3e82fef6518385 | [
"MIT"
] | null | null | null | app/ext/cli/__init__.py | castilhoin/keep-it | 5971dc6f948a79530adc94306b3e82fef6518385 | [
"MIT"
] | null | null | null | from app.ext.db import db, models
def init_app(app):
@app.cli.command()
def create_db():
""" This command creates the database """
db.create_all()
| 21.5 | 49 | 0.616279 |
acf04bbaf7d741f79e3a549b763c9a09e67de08e | 1,592 | py | Python | back-end/success_roadmap/urls.py | sepydev/success-roadmap | a92d95349ea2a3dbd0f484e79a6079620681f56f | [
"MIT"
] | 1 | 2022-01-08T17:57:13.000Z | 2022-01-08T17:57:13.000Z | back-end/success_roadmap/urls.py | mrprocs/success-roadmap | a92d95349ea2a3dbd0f484e79a6079620681f56f | [
"MIT"
] | null | null | null | back-end/success_roadmap/urls.py | mrprocs/success-roadmap | a92d95349ea2a3dbd0f484e79a6079620681f56f | [
"MIT"
] | null | null | null | """success_roadmap URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework import permissions
schema_view = get_schema_view(
openapi.Info(
title="Success Roadmap API",
default_version='v1',
description="This documentation was generated to help front-end developers.",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="sepydev@gmail.com"),
license=openapi.License(name="MIT License"),
),
public=True,
permission_classes=[permissions.AllowAny],
)
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('users.urls')),
path('personal-to-dos/', include('personal_to_dos.urls')),
path('core/', include('core.urls')),
re_path(r'^doc/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
]
| 37.023256 | 98 | 0.705402 |
acf04d707b4896c7902442dbc21789821e5c857d | 384 | py | Python | midterm/problem4/load.py | haochunchang/Bioinformatics_Course | c060c1cfe2ccc3c6f0adbe3e414b3c7ce8a5385c | [
"MIT"
] | null | null | null | midterm/problem4/load.py | haochunchang/Bioinformatics_Course | c060c1cfe2ccc3c6f0adbe3e414b3c7ce8a5385c | [
"MIT"
] | null | null | null | midterm/problem4/load.py | haochunchang/Bioinformatics_Course | c060c1cfe2ccc3c6f0adbe3e414b3c7ce8a5385c | [
"MIT"
] | 3 | 2020-04-12T04:43:24.000Z | 2021-11-30T02:01:02.000Z | """
Functions for loading testing data
"""
def load_data(filepath):
test_ans = []
with open(filepath, "r") as f:
for line in f:
name, ans = line.split("\t")
test_ans.append(ans)
return test_ans
def load_answer(filepath):
ans = []
with open(filepath, "r") as f:
for line in f:
ans.append(line)
return ans
| 18.285714 | 40 | 0.554688 |
acf04d9c70203f3147c581ce5821008f136e7096 | 3,474 | py | Python | section3/src/utils/utils.py | ahmedokasha000/Hippocampal-Volume-Quantification-in-Alzheimer-s-Progression | 9783e547e88dc5d27bd77b7e3cc6d147faac5159 | [
"MIT"
] | null | null | null | section3/src/utils/utils.py | ahmedokasha000/Hippocampal-Volume-Quantification-in-Alzheimer-s-Progression | 9783e547e88dc5d27bd77b7e3cc6d147faac5159 | [
"MIT"
] | null | null | null | section3/src/utils/utils.py | ahmedokasha000/Hippocampal-Volume-Quantification-in-Alzheimer-s-Progression | 9783e547e88dc5d27bd77b7e3cc6d147faac5159 | [
"MIT"
] | null | null | null | """
Various utility methods in this module
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import torch
from PIL import Image
# Tell Matplotlib to not try and use interactive backend
mpl.use("agg")
def mpl_image_grid(images):
"""
Create an image grid from an array of images. Show up to 16 images in one figure
Arguments:
image {Torch tensor} -- NxWxH array of images
Returns:
Matplotlib figure
"""
# Create a figure to contain the plot.
n = min(images.shape[0], 16) # no more than 16 thumbnails
rows = 4
cols = (n // 4) + (1 if (n % 4) != 0 else 0)
figure = plt.figure(figsize=(2*rows, 2*cols))
plt.subplots_adjust(0, 0, 1, 1, 0.001, 0.001)
for i in range(n):
# Start next subplot.
plt.subplot(cols, rows, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
if images.shape[1] == 3:
# this is specifically for 3 softmax'd classes with 0 being bg
# We are building a probability map from our three classes using
# fractional probabilities contained in the mask
vol = images[i].detach().numpy()
img = [[[(1-vol[0,x,y])*vol[1,x,y], (1-vol[0,x,y])*vol[2,x,y], 0] \
for y in range(vol.shape[2])] \
for x in range(vol.shape[1])]
plt.imshow(img)
else: # plotting only 1st channel
plt.imshow((images[i, 0]*255).int(), cmap= "gray")
return figure
def log_to_tensorboard(writer, loss, data, target, prediction_softmax, prediction, counter):
"""Logs data to Tensorboard
Arguments:
writer {SummaryWriter} -- PyTorch Tensorboard wrapper to use for logging
loss {float} -- loss
data {tensor} -- image data
target {tensor} -- ground truth label
prediction_softmax {tensor} -- softmax'd prediction
prediction {tensor} -- raw prediction (to be used in argmax)
counter {int} -- batch and epoch counter
"""
writer.add_scalar("Loss",\
loss, counter)
writer.add_figure("Image Data",\
mpl_image_grid(data.float().cpu()), global_step=counter)
writer.add_figure("Mask",\
mpl_image_grid(target.float().cpu()), global_step=counter)
writer.add_figure("Probability map",\
mpl_image_grid(prediction_softmax.cpu()), global_step=counter)
writer.add_figure("Prediction",\
mpl_image_grid(torch.argmax(prediction.cpu(), dim=1, keepdim=True)), global_step=counter)
def save_numpy_as_image(arr, path):
"""
This saves image (2D array) as a file using matplotlib
Arguments:
arr {array} -- 2D array of pixels
path {string} -- path to file
"""
plt.imshow(arr, cmap="gray") #Needs to be in row,col order
plt.savefig(path)
def med_reshape(image, new_shape):
"""
This function reshapes 3D data to new dimension padding with zeros
and leaving the content in the top-left corner
Arguments:
image {array} -- 3D array of pixel data
new_shape {3-tuple} -- expected output shape
Returns:
3D array of desired shape, padded with zeroes
"""
reshaped_image = np.zeros(new_shape)
# TASK: write your original image into the reshaped image
# <CODE GOES HERE>
im_shape=image.shape
reshaped_image[:im_shape[0],:im_shape[1],:im_shape[2]]=image
return reshaped_image
| 33.085714 | 97 | 0.624352 |
acf04fde8a6a630c10b5f0cb6fdcc56680189030 | 3,367 | py | Python | bb-master/sandbox/lib/python3.5/site-packages/buildbot/configurators/janitor.py | Alecto3-D/testable-greeter | 09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78 | [
"MIT"
] | 2 | 2017-07-11T18:56:27.000Z | 2017-07-28T14:01:12.000Z | bb-master/sandbox/lib/python3.5/site-packages/buildbot/configurators/janitor.py | Alecto3-D/testable-greeter | 09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78 | [
"MIT"
] | 1 | 2017-07-28T13:53:41.000Z | 2017-07-31T15:30:40.000Z | bb-master/sandbox/lib/python3.5/site-packages/buildbot/configurators/janitor.py | Alecto3-D/testable-greeter | 09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78 | [
"MIT"
] | null | null | null | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
#
from __future__ import absolute_import
from __future__ import print_function
import datetime
from twisted.internet import defer
from buildbot.config import BuilderConfig
from buildbot.configurators import ConfiguratorBase
from buildbot.process.buildstep import BuildStep
from buildbot.process.factory import BuildFactory
from buildbot.process.results import SUCCESS
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.schedulers.timed import Nightly
from buildbot.util import datetime2epoch
from buildbot.worker.local import LocalWorker
""" Janitor is a configurator which create a Janitor Builder with all needed Janitor steps
"""
JANITOR_NAME = "__Janitor" # If you read this code, you may want to patch this name.
def now():
"""patchable now (datetime is not patchable as builtin)"""
return datetime.datetime.utcnow()
class LogChunksJanitor(BuildStep):
name = 'LogChunksJanitor'
renderables = ["logHorizon"]
def __init__(self, logHorizon):
BuildStep.__init__(self)
self.logHorizon = logHorizon
@defer.inlineCallbacks
def run(self):
older_than_timestamp = datetime2epoch(now() - self.logHorizon)
deleted = yield self.master.db.logs.deleteOldLogChunks(older_than_timestamp)
self.descriptionDone = ["deleted", str(deleted), "logchunks"]
defer.returnValue(SUCCESS)
class JanitorConfigurator(ConfiguratorBase):
def __init__(self, logHorizon=None, hour=0, **kwargs):
ConfiguratorBase.__init__(self)
self.logHorizon = logHorizon
self.hour = hour
self.kwargs = kwargs
def configure(self, config_dict):
if self.logHorizon is None:
return
logHorizon = self.logHorizon
hour = self.hour
kwargs = self.kwargs
ConfiguratorBase.configure(self, config_dict)
nightly_kwargs = {}
# we take the defaults of Nightly, except for hour
for arg in ('minute', 'dayOfMonth', 'month', 'dayOfWeek'):
if arg in kwargs:
nightly_kwargs[arg] = kwargs[arg]
self.schedulers.append(Nightly(
name=JANITOR_NAME, builderNames=[JANITOR_NAME], hour=hour, **nightly_kwargs))
self.schedulers.append(ForceScheduler(
name=JANITOR_NAME + "_force",
builderNames=[JANITOR_NAME]))
self.builders.append(BuilderConfig(
name=JANITOR_NAME, workername=JANITOR_NAME, factory=BuildFactory(steps=[
LogChunksJanitor(logHorizon=logHorizon)
])
))
self.protocols.setdefault('null', {})
self.workers.append(LocalWorker(JANITOR_NAME))
| 35.072917 | 90 | 0.71488 |
acf05065c9ed47ff7c5d9c7c7dc9a49035494f67 | 2,896 | py | Python | backend/yummyfood_28480/urls.py | crowdbotics-apps/yummyfood-28480 | 012239c80a589e0e8390bd68ee13248c26baebba | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/yummyfood_28480/urls.py | crowdbotics-apps/yummyfood-28480 | 012239c80a589e0e8390bd68ee13248c26baebba | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/yummyfood_28480/urls.py | crowdbotics-apps/yummyfood-28480 | 012239c80a589e0e8390bd68ee13248c26baebba | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """yummyfood_28480 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("task.api.v1.urls")),
path("task/", include("task.urls")),
path("api/v1/", include("task_profile.api.v1.urls")),
path("task_profile/", include("task_profile.urls")),
path("api/v1/", include("tasker_business.api.v1.urls")),
path("tasker_business/", include("tasker_business.urls")),
path("api/v1/", include("location.api.v1.urls")),
path("location/", include("location.urls")),
path("api/v1/", include("wallet.api.v1.urls")),
path("wallet/", include("wallet.urls")),
path("api/v1/", include("task_category.api.v1.urls")),
path("task_category/", include("task_category.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "Yummyfood"
admin.site.site_title = "Yummyfood Admin Portal"
admin.site.index_title = "Yummyfood Admin"
# swagger
api_info = openapi.Info(
title="Yummyfood API",
default_version="v1",
description="API documentation for Yummyfood App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name="index.html"))]
urlpatterns += [
re_path(r"^(?:.*)/?$", TemplateView.as_view(template_name="index.html"))
]
| 37.61039 | 87 | 0.696823 |
acf050675c1b10f4b8471c1d662fd99084958d5b | 5,686 | py | Python | examples/11_task_input_folder.py | lsawade/radical.pilot | b430f5c53a7cfdeb124ef81a8c0272d4dbe4987e | [
"MIT"
] | null | null | null | examples/11_task_input_folder.py | lsawade/radical.pilot | b430f5c53a7cfdeb124ef81a8c0272d4dbe4987e | [
"MIT"
] | null | null | null | examples/11_task_input_folder.py | lsawade/radical.pilot | b430f5c53a7cfdeb124ef81a8c0272d4dbe4987e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
__copyright__ = 'Copyright 2013-2014, http://radical.rutgers.edu'
__license__ = 'MIT'
import os
import sys
verbose = os.environ.get('RADICAL_PILOT_VERBOSE', 'REPORT')
os.environ['RADICAL_PILOT_VERBOSE'] = verbose
import radical.pilot as rp
import radical.utils as ru
# ------------------------------------------------------------------------------
#
# READ the RADICAL-Pilot documentation: https://radicalpilot.readthedocs.io/
#
# ------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
#
if __name__ == '__main__':
# we use a reporter class for nicer output
report = ru.Reporter(name='radical.pilot')
report.title('Getting Started (RP version %s)' % rp.version)
# use the resource specified as argument, fall back to localhost
if len(sys.argv) > 2: report.exit('Usage:\t%s [resource]\n\n' % sys.argv[0])
elif len(sys.argv) == 2: resource = sys.argv[1]
else : resource = 'local.localhost'
# Create a new session. No need to try/except this: if session creation
# fails, there is not much we can do anyways...
session = rp.Session()
# all other pilot code is now tried/excepted. If an exception is caught, we
# can rely on the session object to exist and be valid, and we can thus tear
# the whole RP stack down via a 'session.close()' call in the 'finally'
# clause...
try:
# read the config used for resource details
report.info('read config')
config = ru.read_json('%s/config.json' % os.path.dirname(os.path.abspath(__file__)))
report.ok('>>ok\n')
report.header('submit pilots')
# Add a Pilot Manager. Pilot managers manage one or more Pilots.
pmgr = rp.PilotManager(session=session)
# Define an [n]-core local pilot that runs for [x] minutes
# Here we use a dict to initialize the description object
pd_init = {
'resource' : resource,
'runtime' : 15, # pilot runtime (min)
'exit_on_error' : True,
'project' : config[resource].get('project', None),
'queue' : config[resource].get('queue', None),
'access_schema' : config[resource].get('schema', None),
'cores' : config[resource].get('cores', 1),
'gpus' : config[resource].get('gpus', 0),
}
pdesc = rp.PilotDescription(pd_init)
pilot = pmgr.submit_pilots(pdesc)
report.header('submit tasks')
# Register the Pilot in a TaskManager object.
tmgr = rp.TaskManager(session=session)
tmgr.add_pilots(pilot)
n = 4 # number of tasks to run
# create a folder to the remote machine
t = rp.TaskDescription()
t.executable = 'python'
t.arguments = ['make_folders.py', n]
t.input_staging = ['make_folders.py']
print("Creating dummy folder")
tmgr.submit_tasks([t])
tmgr.wait_tasks()
print('Dummy folder created')
report.info('create %d task description(s)\n\t' % n)
tds = list()
for i in range(0, n):
path = '/tmp/stage_in_folder_%d/' % i
fname = 'input_file.dat'
full = '%s/%s' % (path, fname)
# create a new Task description, and fill it.
# Here we don't use dict initialization.
td = rp.TaskDescription()
td.executable = '/usr/bin/wc'
td.arguments = ['-c', fname]
td.input_staging = {'source': full,
'target': 'task:///%s' % fname,
'action': rp.MOVE
}
td.output_staging = {'source': 'task:///%s' % fname,
'target': 'pilot:///input_%d_moved' % i,
'action': rp.MOVE
}
tds.append(td)
report.progress()
report.ok('>>ok\n')
# Submit the previously created Task descriptions to the
# PilotManager. This will trigger the selected scheduler to start
# assigning Tasks to the Pilots.
tasks = tmgr.submit_tasks(tds)
# Wait for all tasks to reach a final state (DONE, CANCELED or FAILED).
report.header('gather results')
tmgr.wait_tasks()
report.info('\n')
for task in tasks:
report.plain(' * %s: %s, exit: %3s, out: %s\n'
% (task.uid, task.state[:4],
task.exit_code, task.stdout.strip()[:35]))
# delete sample files and folders
# TODO:
except Exception as e:
# Something unexpected happened in the pilot code above
report.error('caught Exception: %s\n' % e)
raise
except (KeyboardInterrupt, SystemExit):
# the callback called sys.exit(), and we can here catch the
# corresponding KeyboardInterrupt exception for shutdown. We also catch
# SystemExit (which gets raised if the main threads exits for some other
# reason).
report.warn('exit requested\n')
finally:
# always clean up the session, no matter if we caught an exception or
# not. This will kill all remaining pilots.
report.header('finalize')
session.close()
report.header()
# ------------------------------------------------------------------------------
| 35.761006 | 92 | 0.528315 |
acf0508b9d4e28d0f223e440de51dce94e2d388a | 5,764 | py | Python | Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/pyperclip.py | tchamabe1979/exareme | 462983e4feec7808e1fd447d02901502588a8879 | [
"MIT"
] | null | null | null | Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/pyperclip.py | tchamabe1979/exareme | 462983e4feec7808e1fd447d02901502588a8879 | [
"MIT"
] | null | null | null | Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/pyperclip.py | tchamabe1979/exareme | 462983e4feec7808e1fd447d02901502588a8879 | [
"MIT"
] | null | null | null | # Pyperclip v1.0
# A cross-platform clipboard module for Python. (only handles plain text for now)
# By Al Sweigart al@coffeeghost.net
# Usage:
# import pyperclip
# pyperclip.setcb('The text to be copied to the clipboard.')
# spam = pyperclip.getcb()
# On Mac, this module makes use of the pbcopy and pbpaste commands, which should come with the os.
# On Linux, this module makes use of the xclip command, which should come with the os. Otherwise run "sudo apt-get install xclip"
# Copyright (c) 2010, Albert Sweigart
# All rights reserved.
#
# BSD-style license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the pyperclip nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Albert Sweigart "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Albert Sweigart BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import subprocess
def winGetClipboard():
try:
ctypes.windll.user32.OpenClipboard(None)
except:
ctypes.windll.user32.OpenClipboard(0)
pcontents = ctypes.windll.user32.GetClipboardData(1) # 1 is CF_TEXT
data = ctypes.c_char_p(pcontents).value
# ctypes.windll.kernel32.GlobalUnlock(pcontents)
ctypes.windll.user32.CloseClipboard()
return data
def winSetClipboard(text):
GMEM_DDESHARE = 0x2000
try:
ctypes.windll.user32.OpenClipboard(None)
except:
ctypes.windll.user32.OpenClipboard(0)
ctypes.windll.user32.EmptyClipboard()
try:
# works on Python 2 (bytes() only takes one argument)
hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text)) + 1)
except TypeError:
# works on Python 3 (bytes() requires an encoding)
hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text, 'ascii')) + 1)
pchData = ctypes.windll.kernel32.GlobalLock(hCd)
try:
# works on Python 2 (bytes() only takes one argument)
ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text))
except TypeError:
# works on Python 3 (bytes() requires an encoding)
ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text, 'ascii'))
ctypes.windll.kernel32.GlobalUnlock(hCd)
ctypes.windll.user32.SetClipboardData(1, hCd)
ctypes.windll.user32.CloseClipboard()
def macSetClipboard(text):
outf = os.popen('pbcopy', 'w')
outf.write(text)
outf.close()
def macGetClipboard():
outf = os.popen('pbpaste', 'r')
content = outf.read()
outf.close()
return content
def gtkGetClipboard():
return gtk.Clipboard().wait_for_text()
def gtkSetClipboard(text):
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def qtGetClipboard():
return str(cb.text())
def qtSetClipboard(text):
cb.setText(text)
def xclipSetClipboard(text):
outf = os.popen('xclip -selection c', 'w')
outf.write(text)
outf.close()
def xclipGetClipboard():
outf = os.popen('xclip -selection c -o', 'r')
content = outf.read()
outf.close()
return content
def xselSetClipboard(text):
outf = os.popen('xsel -i -b', 'w')
outf.write(text)
outf.close()
def xselGetClipboard():
outf = os.popen('xsel -o -b', 'r')
content = outf.read()
outf.close()
return content
if os.name == 'nt':
import ctypes
getcb = winGetClipboard
setcb = winSetClipboard
elif os.uname()[0].lower() == 'darwin' or os.name == 'mac':
getcb = macGetClipboard
setcb = macSetClipboard
elif os.name == 'posix':
xselExists = False
try:
xselExists = (subprocess.check_output(['which', 'xsel']) != '')
except:
pass
if xselExists:
getcb = xselGetClipboard
setcb = xselSetClipboard
else:
xclipExists = False
try:
xclipExists = (subprocess.check_output(['which', 'xclip']) != '')
except:
pass
if xclipExists:
getcb = xclipGetClipboard
setcb = xclipSetClipboard
try:
import gtk
signal.signal(signal.SIGINT, signal.SIG_DFL)
getcb = gtkGetClipboard
setcb = gtkSetClipboard
except:
try:
import PyQt4.QtCore
import PyQt4.QtGui
app = QApplication([])
cb = PyQt4.QtGui.QApplication.clipboard()
getcb = qtGetClipboard
setcb = qtSetClipboard
except:
raise Exception('Pyperclip requires the gtk or PyQt4 module installed, or the xclip command.')
copy = setcb
paste = getcb
| 30.989247 | 129 | 0.672623 |
acf051fad585c6f140c95f2a0ad0eb81871ccad3 | 510 | py | Python | spotifyapi/Oauth.py | abh80/spotify-api.py | 6c0e1117e245e68e34e7a718c6659101cd42cd00 | [
"MIT"
] | 9 | 2020-09-25T09:15:01.000Z | 2021-05-26T15:31:19.000Z | spotifyapi/Oauth.py | Scientific-Guy/spotify-api.py | c19b4dff8da71c1d265070796e4b9c43ed07c677 | [
"MIT"
] | 8 | 2020-09-25T20:26:22.000Z | 2021-04-29T11:16:08.000Z | spotifyapi/Oauth.py | Scientific-Guy/spotify-api.py | c19b4dff8da71c1d265070796e4b9c43ed07c677 | [
"MIT"
] | 6 | 2020-09-25T09:15:26.000Z | 2021-04-29T07:49:27.000Z | # Import Packages
import requests
from .Util import b64
# Read Class
class Auth():
def __init__(self, oauth: str):
self.token = oauth
def get(self, client_id: str, client_secret: str):
return requests.request(
'POST',
'https://accounts.spotify.com/api/token',
data={
'grant_type': 'client_credentials'
},
headers={'Authorization': 'Basic ' + b64(str(client_id) + ':' + str(client_secret))}
).json()
| 25.5 | 96 | 0.560784 |
acf05276af593c8c50f3e6e8cc2a2f606c8ac981 | 9,211 | py | Python | congress/policy/tests/test_compiler.py | aaronorosen/congress | 2f74410c93a4d761a6fb3d913ea6bec87fd3085c | [
"Apache-2.0"
] | 1 | 2016-02-10T00:59:31.000Z | 2016-02-10T00:59:31.000Z | congress/policy/tests/test_compiler.py | aaronorosen/congress | 2f74410c93a4d761a6fb3d913ea6bec87fd3085c | [
"Apache-2.0"
] | null | null | null | congress/policy/tests/test_compiler.py | aaronorosen/congress | 2f74410c93a4d761a6fb3d913ea6bec87fd3085c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import unittest
from congress.policy import compile
class TestCompiler(unittest.TestCase):
def setUp(self):
pass
def test_foo(self):
self.assertTrue("a" in "abc", "'a' is a substring of 'abc'")
def test_type_checkers(self):
"""Test the type checkers, e.g. is_atom, is_rule."""
atom = compile.Literal("p", [])
atom2 = compile.Literal("q", [])
atom3 = compile.Literal("r", [])
lit = compile.Literal("r", [], negated=True)
regular_rule = compile.Rule(atom, [atom2, atom3])
regular_rule2 = compile.Rule(atom, [lit, atom2])
multi_rule = compile.Rule([atom, atom2], [atom3])
fake_rule = compile.Rule([atom, 1], [atom2])
fake_rule2 = compile.Rule(atom, [atom2, 1])
# is_atom
self.assertTrue(compile.is_atom(atom))
self.assertTrue(compile.is_atom(atom2))
self.assertTrue(compile.is_atom(atom3))
self.assertFalse(compile.is_atom(lit))
self.assertFalse(compile.is_atom(regular_rule))
self.assertFalse(compile.is_atom(regular_rule2))
self.assertFalse(compile.is_atom(multi_rule))
self.assertFalse(compile.is_atom(fake_rule))
self.assertFalse(compile.is_atom(fake_rule2))
self.assertFalse(compile.is_atom("a string"))
# is_literal
self.assertTrue(compile.is_literal(atom))
self.assertTrue(compile.is_literal(atom2))
self.assertTrue(compile.is_literal(atom3))
self.assertTrue(compile.is_literal(lit))
self.assertFalse(compile.is_literal(regular_rule))
self.assertFalse(compile.is_literal(regular_rule2))
self.assertFalse(compile.is_literal(multi_rule))
self.assertFalse(compile.is_literal(fake_rule))
self.assertFalse(compile.is_literal(fake_rule2))
self.assertFalse(compile.is_literal("a string"))
# is_regular_rule
self.assertFalse(compile.is_regular_rule(atom))
self.assertFalse(compile.is_regular_rule(atom2))
self.assertFalse(compile.is_regular_rule(atom3))
self.assertFalse(compile.is_regular_rule(lit))
self.assertTrue(compile.is_regular_rule(regular_rule))
self.assertTrue(compile.is_regular_rule(regular_rule2))
self.assertFalse(compile.is_regular_rule(multi_rule))
self.assertFalse(compile.is_regular_rule(fake_rule))
self.assertFalse(compile.is_regular_rule(fake_rule2))
self.assertFalse(compile.is_regular_rule("a string"))
# is_multi_rule
self.assertFalse(compile.is_multi_rule(atom))
self.assertFalse(compile.is_multi_rule(atom2))
self.assertFalse(compile.is_multi_rule(atom3))
self.assertFalse(compile.is_multi_rule(lit))
self.assertFalse(compile.is_multi_rule(regular_rule))
self.assertFalse(compile.is_multi_rule(regular_rule2))
self.assertTrue(compile.is_multi_rule(multi_rule))
self.assertFalse(compile.is_multi_rule(fake_rule))
self.assertFalse(compile.is_multi_rule(fake_rule2))
self.assertFalse(compile.is_multi_rule("a string"))
# is_rule
self.assertFalse(compile.is_rule(atom))
self.assertFalse(compile.is_rule(atom2))
self.assertFalse(compile.is_rule(atom3))
self.assertFalse(compile.is_rule(lit))
self.assertTrue(compile.is_rule(regular_rule))
self.assertTrue(compile.is_rule(regular_rule2))
self.assertTrue(compile.is_rule(multi_rule))
self.assertFalse(compile.is_rule(fake_rule))
self.assertFalse(compile.is_rule(fake_rule2))
self.assertFalse(compile.is_rule("a string"))
# is_datalog
self.assertTrue(compile.is_datalog(atom))
self.assertTrue(compile.is_datalog(atom2))
self.assertTrue(compile.is_datalog(atom3))
self.assertFalse(compile.is_datalog(lit))
self.assertTrue(compile.is_datalog(regular_rule))
self.assertTrue(compile.is_datalog(regular_rule2))
self.assertFalse(compile.is_datalog(multi_rule))
self.assertFalse(compile.is_datalog(fake_rule))
self.assertFalse(compile.is_datalog(fake_rule2))
self.assertFalse(compile.is_datalog("a string"))
# is_extended_datalog
self.assertTrue(compile.is_extended_datalog(atom))
self.assertTrue(compile.is_extended_datalog(atom2))
self.assertTrue(compile.is_extended_datalog(atom3))
self.assertFalse(compile.is_extended_datalog(lit))
self.assertTrue(compile.is_extended_datalog(regular_rule))
self.assertTrue(compile.is_extended_datalog(regular_rule2))
self.assertTrue(compile.is_extended_datalog(multi_rule))
self.assertFalse(compile.is_extended_datalog(fake_rule))
self.assertFalse(compile.is_extended_datalog(fake_rule2))
self.assertFalse(compile.is_extended_datalog("a string"))
def test_rule_validation(self):
"""Test that rules are properly validated."""
# unsafe var in head
rule = compile.parse1('p(x) :- q(y)')
errs = compile.rule_errors(rule)
self.assertEqual(len(errs), 1)
# multiple unsafe vars in head
rule = compile.parse1('p(x,y,z) :- q(w)')
errs = compile.rule_errors(rule)
self.assertEqual(len(set([str(x) for x in errs])), 3)
# unsafe var in negtative literal:
rule = compile.parse1('p(x) :- q(x), not r(y)')
errs = compile.rule_errors(rule)
self.assertEqual(len(set([str(x) for x in errs])), 1)
# unsafe var in negative literal: ensure head doesn't make safe
rule = compile.parse1('p(x) :- not q(x)')
errs = compile.rule_errors(rule)
self.assertEqual(len(set([str(x) for x in errs])), 1)
# unsafe var in negative literal:
# ensure partial safety not total safety
rule = compile.parse1('p(x) :- q(x), not r(x,y)')
errs = compile.rule_errors(rule)
self.assertEqual(len(set([str(x) for x in errs])), 1)
# unsafe var in negative literal: ensure double negs doesn't make safe
rule = compile.parse1('p(x) :- q(x), not r(x,y), not s(x, y)')
errs = compile.rule_errors(rule)
self.assertEqual(len(set([str(x) for x in errs])), 1)
def test_rule_recursion(self):
rules = compile.parse('p(x) :- q(x), r(x) q(x) :- r(x) r(x) :- t(x)')
self.assertFalse(compile.is_recursive(rules))
rules = compile.parse('p(x) :- p(x)')
self.assertTrue(compile.is_recursive(rules))
rules = compile.parse('p(x) :- q(x) q(x) :- r(x) r(x) :- p(x)')
self.assertTrue(compile.is_recursive(rules))
rules = compile.parse('p(x) :- q(x) q(x) :- not p(x)')
self.assertTrue(compile.is_recursive(rules))
rules = compile.parse('p(x) :- q(x), s(x) q(x) :- t(x) s(x) :- p(x)')
self.assertTrue(compile.is_recursive(rules))
def test_rule_stratification(self):
rules = compile.parse('p(x) :- not q(x)')
self.assertTrue(compile.is_stratified(rules))
rules = compile.parse('p(x) :- p(x)')
self.assertTrue(compile.is_stratified(rules))
rules = compile.parse('p(x) :- q(x) q(x) :- p(x)')
self.assertTrue(compile.is_stratified(rules))
rules = compile.parse('p(x) :- q(x) q(x) :- not r(x)')
self.assertTrue(compile.is_stratified(rules))
rules = compile.parse('p(x) :- not q(x) q(x) :- not r(x)')
self.assertTrue(compile.is_stratified(rules))
rules = compile.parse('p(x) :- not q(x) '
'q(x) :- not r(x) '
'r(x) :- not s(x)')
self.assertTrue(compile.is_stratified(rules))
rules = compile.parse('p(x) :- q(x), r(x) '
'q(x) :- not t(x) '
'r(x) :- not s(x)')
self.assertTrue(compile.is_stratified(rules))
rules = compile.parse('p(x) :- not p(x)')
self.assertFalse(compile.is_stratified(rules))
rules = compile.parse('p(x) :- q(x) q(x) :- not p(x)')
self.assertFalse(compile.is_stratified(rules))
rules = compile.parse('p(x) :- q(x),r(x) r(x) :- not p(x)')
self.assertFalse(compile.is_stratified(rules))
rules = compile.parse('p(x) :- q(x), r(x) '
'q(x) :- not t(x) '
'r(x) :- not s(x) '
't(x) :- p(x)')
self.assertFalse(compile.is_stratified(rules))
if __name__ == '__main__':
unittest.main()
| 42.447005 | 79 | 0.635653 |
acf0532b0100331b5ea7748fb4b13db4079b9a26 | 5,822 | py | Python | CLIP-ViL-Direct/vqa/pythia_clip_grid_feature.py | yuweijiang/CLIP-ViL | 007b3311eb160dbc243fce38f43e7c0161c742b5 | [
"MIT"
] | 1 | 2021-07-15T04:30:04.000Z | 2021-07-15T04:30:04.000Z | CLIP-ViL-Direct/vqa/pythia_clip_grid_feature.py | yuweijiang/CLIP-ViL | 007b3311eb160dbc243fce38f43e7c0161c742b5 | [
"MIT"
] | null | null | null | CLIP-ViL-Direct/vqa/pythia_clip_grid_feature.py | yuweijiang/CLIP-ViL | 007b3311eb160dbc243fce38f43e7c0161c742b5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Grid features extraction script.
"""
import argparse
import os
import torch
import tqdm
from fvcore.common.file_io import PathManager
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import default_setup
from detectron2.evaluation import inference_context
from detectron2.modeling import build_model
import numpy as np
from clip.clip import load
import torch.nn as nn
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from grid_feats import (
add_attribute_config,
build_detection_test_loader_with_attributes,
)
from timm.models.vision_transformer import resize_pos_embed
# A simple mapper from object detection dataset to VQA dataset names
dataset_to_folder_mapper = {}
dataset_to_folder_mapper['coco_2014_train'] = 'train2014'
dataset_to_folder_mapper['coco_2014_val'] = 'val2014'
#dataset_to_folder_mapper['coco_2014_val'] = 'trainval2014'
#dataset_to_folder_mapper['coco_2014_train'] = 'trainval2014'
# One may need to change the Detectron2 code to support coco_2015_test
# insert "coco_2015_test": ("coco/test2015", "coco/annotations/image_info_test2015.json"),
# at: https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/builtin.py#L36
dataset_to_folder_mapper['coco_2015_test'] = 'test2015'
dataset_to_folder_mapper['coco_2015_test-dev'] = 'test-dev2015'
def extract_grid_feature_argument_parser():
parser = argparse.ArgumentParser(description="Grid feature extraction")
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument("--dataset", help="name of the dataset", default="coco_2014_train",
choices=['coco_2014_train', 'coco_2014_val', 'coco_2015_test', 'coco_2015_test-dev'])
parser.add_argument('--model_type', default='RN50', type=str, help='RN50, RN101, RN50x4, ViT-B/32, vit_base_patch32_224_in21k')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
def extract_grid_feature_on_dataset(model, data_loader, dump_folder):
for idx, inputs in enumerate(tqdm.tqdm(data_loader)):
with torch.no_grad():
image_id = inputs[0]['image_id']
file_name = '%d.pth' % image_id
# compute features
images = model.preprocess_image(inputs)
features = model.backbone(images.tensor)
outputs = model.roi_heads.get_conv5_features(features)
# modify the filename
file_name = inputs[0]['file_name'].split("/")[-1].replace("jpg", "npy")
outputs = outputs.permute(0, 2, 3, 1)
exit()
with PathManager.open(os.path.join(dump_folder, file_name), "wb") as f:
np.save(f, outputs.cpu().numpy())
def do_feature_extraction(cfg, model, dataset_name, args):
with inference_context(model):
dump_folder = os.path.join(cfg.OUTPUT_DIR, "features", dataset_to_folder_mapper[dataset_name])
PathManager.mkdirs(dump_folder)
data_loader = build_detection_test_loader_with_attributes(cfg, dataset_name, args.model_type='clip')
extract_clip_feature_on_dataset(model, data_loader, dump_folder, args)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_attribute_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# force the final residual block to have dilations 1
cfg.MODEL.RESNETS.RES5_DILATION = 1
cfg.freeze()
default_setup(cfg, args)
return cfg
def extract_clip_feature_on_dataset(model, data_loader, dump_folder, args):
save_args.model_type = args.model_type.split("-")[0]
mean = torch.Tensor([0.48145466, 0.4578275, 0.40821073]).to("cuda").reshape(3, 1, 1)
std = torch.Tensor([0.26862954, 0.26130258, 0.27577711]).to("cuda").reshape(3, 1, 1)
dump_folder = f"clip/{save_args.model_type}/" + dump_folder.split("/")[-1]
if args.model_type == "ViT-B/32":
num_patches = 558 #600 * 1000 // 32 // 32
print(num_patches)
pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768, device='cuda'),)
pos_embed.weight = resize_pos_embed(model.visual.positional_embedding.unsqueeze(0), pos_embed.unsqueeze(0))
model.visual.positional_embedding = pos_embed
print(model.visual.positional_embedding.device)
# pass
dump_folder.replace( "rscratch", "dnn" )
dump_folder = "/dnn/sheng.s/clip_boi/grid-feats-vqa/" + dump_folder
if not os.path.exists(dump_folder):
os.makedirs(dump_folder)
for idx, inputs in enumerate(tqdm.tqdm(data_loader)):
with torch.no_grad():
image_id = inputs[0]['image_id']
file_name = '%d.pth' % image_id
# compute features
image = inputs[0]['image'].to("cuda").float() / 255.0
image = (image - mean) / std
image = image.unsqueeze(0)
outputs = model.encode_image(image)
if "RN" in args.model_type:
outputs = outputs.permute(0, 2, 3, 1)
else:
outputs = outputs[:, :, :].reshape(1, 13, 43, 768)
with PathManager.open(os.path.join(dump_folder, file_name), "wb") as f:
# save as CPU tensors
torch.save(outputs.cpu(), f)
def main(args):
cfg = setup(args)
model, transform = load(args.model_type, jit=False)
do_feature_extraction(cfg, model, args.dataset, args)
if __name__ == "__main__":
args = extract_grid_feature_argument_parser().parse_args()
print("Command Line Args:", args)
main(args)
| 41 | 131 | 0.687564 |
acf0549d410b9f69c8b24ab91b2e7cedeae0fa7e | 1,481 | py | Python | ice/persistence/config_file_backing_store.py | reavessm/Ice | e78d046abfd6006b1a81d1cbdb516b7c3e141ac9 | [
"MIT"
] | 578 | 2015-01-02T12:43:52.000Z | 2022-03-27T23:45:32.000Z | ice/persistence/config_file_backing_store.py | raphaelcastaneda/Ice | b380de7fc7830251b883fb55c46fea894058afa3 | [
"MIT"
] | 271 | 2015-01-05T01:56:38.000Z | 2021-08-14T02:51:24.000Z | ice/persistence/config_file_backing_store.py | raphaelcastaneda/Ice | b380de7fc7830251b883fb55c46fea894058afa3 | [
"MIT"
] | 156 | 2015-01-07T15:43:20.000Z | 2021-12-11T19:10:44.000Z | # encoding: utf-8
"""
config_file_backing_store.py
Created by Scott on 2014-08-12.
Copyright (c) 2014 Scott Rice. All rights reserved.
"""
import ConfigParser
import backing_store
class ConfigFileBackingStore(backing_store.BackingStore):
def __init__(self, path):
super(ConfigFileBackingStore, self).__init__(path)
self.configParser = ConfigParser.RawConfigParser()
self.configParser.read(self.path)
def identifiers(self):
return self.configParser.sections()
def add_identifier(self, ident):
try:
self.configParser.add_section(ident)
except ConfigParser.DuplicateSectionError:
raise ValueError("The identifier `%s` already exists" % str(ident))
def remove_identifier(self, ident):
self.configParser.remove_section(ident)
def keys(self, ident):
try:
return self.configParser.options(ident)
except ConfigParser.NoSectionError:
raise ValueError("No identifier named `%s` exists" % str(ident))
def get(self, ident, key, default=None):
try:
val = self.configParser.get(ident, key.lower())
return val
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
return default
def set(self, ident, key, value):
self.configParser.set(ident, key.lower(), value)
def save(self):
try:
with open(self.path, "w") as configFile:
self.configParser.write(configFile)
except IOError:
raise IOError("Cannot save data to `%s`. Permission Denied")
| 26.927273 | 73 | 0.713707 |
acf05682b3561c325c865b177baae136f84a176f | 412 | py | Python | server/daemon.py | CameronEx/whatsmyip | e27040a42e9c62e622b707f1b8cf2bc5105b131e | [
"MIT"
] | null | null | null | server/daemon.py | CameronEx/whatsmyip | e27040a42e9c62e622b707f1b8cf2bc5105b131e | [
"MIT"
] | null | null | null | server/daemon.py | CameronEx/whatsmyip | e27040a42e9c62e622b707f1b8cf2bc5105b131e | [
"MIT"
] | null | null | null | from flask import Flask, request, Response
# Initialize the Flask application
app = Flask(__name__)
# Default route, print user's IP
@app.route('/')
def index():
ip = request.remote_addr
data = { "user_ip":ip }
resp = Response(response=data['user_ip'], status=200, mimetype="application/json")
return(resp)
if __name__ == '__main__':
app.run(
host="0.0.0.0",
port=int("8081")
)
| 19.619048 | 84 | 0.652913 |
acf0574f4f3db826b9c5875ae669f22486fc464f | 79,195 | py | Python | src/transformers/models/megatron_bert/modeling_megatron_bert.py | OllieBroadhurst/transformers | 12428f0ef15bb3631e7a5f04672ddb05f363de97 | [
"Apache-2.0"
] | 1 | 2019-10-01T17:42:02.000Z | 2019-10-01T17:42:02.000Z | src/transformers/models/megatron_bert/modeling_megatron_bert.py | OllieBroadhurst/transformers | 12428f0ef15bb3631e7a5f04672ddb05f363de97 | [
"Apache-2.0"
] | 1 | 2022-03-23T19:49:13.000Z | 2022-03-23T19:49:13.000Z | src/transformers/models/megatron_bert/modeling_megatron_bert.py | erichan1/transformers | 12428f0ef15bb3631e7a5f04672ddb05f363de97 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch MegatronBERT model."""
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_megatron_bert import MegatronBertConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "MegatronBertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
_CHECKPOINT_FOR_DOC = "nvidia/megatron-bert-cased-345m"
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"nvidia/megatron-bert-cased-345m",
# See all MegatronBERT models at https://huggingface.co/models?filter=megatron_bert
]
def load_tf_weights_in_megatron_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class MegatronBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
# In Megatron, layer-norm is applied after the 1st dropout.
# self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
# Megatron BERT moves that layer norm after the drop-out (and to each layer).
# embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->MegatronBert
class MegatronBertSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(
config, "position_embedding_type", "absolute"
)
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple:
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in MegatronBertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Based transformers.models.bert.modeling_bert.BertSelfOutput. Moved LayerNorm to MegatronBertAttention below.
class MegatronBertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, residual):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return residual + hidden_states
# Based transformers.models.bert.modeling_bert.BertAttention. Added LayerNorm.
class MegatronBertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.self = MegatronBertSelfAttention(config)
self.output = MegatronBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
ln_outputs = self.ln(hidden_states)
self_outputs = self.self(
ln_outputs,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->MegatronBert
class MegatronBertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Based on transformers.models.bert.modeling_bert.BertOutput. Moved LayerNorm to MegatronBertLayer below.
class MegatronBertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return input_tensor + hidden_states
# Based on transformers.models.bert.modeling_bert.BertLayer. Added LayerNorm.
class MegatronBertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = MegatronBertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = MegatronBertAttention(config)
self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.intermediate = MegatronBertIntermediate(config)
self.output = MegatronBertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
ln_output = self.ln(attention_output)
intermediate_output = self.intermediate(ln_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class MegatronBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([MegatronBertLayer(config) for _ in range(config.num_hidden_layers)])
# The final layer norm. We removed the 1st LN, moved LN to each hidden layer and this one
# is simply the final LN (Transformer's BERT has it attached to each hidden layer).
self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
# Because we moved the layer-norm at the end of the hidden layer, we have non-normali-
# zed data here. If that's really needed, we must apply LN to match Transformer's BERT.
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
# Finalize the hidden states.
hidden_states = self.ln(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->MegatronBert
class MegatronBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->MegatronBert
class MegatronBertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->MegatronBert
class MegatronBertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = MegatronBertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->MegatronBert
class MegatronBertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MegatronBertLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->MegatronBert
class MegatronBertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->MegatronBert
class MegatronBertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MegatronBertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class MegatronBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = MegatronBertConfig
load_tf_weights = load_tf_weights_in_megatron_bert
base_model_prefix = "bert"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, MegatronBertEncoder):
module.gradient_checkpointing = value
@dataclass
# Copied from transformers.models.bert.modeling_bert.BertForPreTrainingOutput with Bert->MegatronBert
class MegatronBertForPreTrainingOutput(ModelOutput):
"""
Output type of [`MegatronBertForPreTraining`].
Args:
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
MEGATRON_BERT_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`MegatronBertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
MEGATRON_BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare MegatronBert Model transformer outputting raw hidden-states without any specific head on top.",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertModel(MegatronBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = MegatronBertEmbeddings(config)
self.encoder = MegatronBertEncoder(config)
self.pooler = MegatronBertPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
MegatronBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
`next sentence prediction (classification)` head.
""",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertForPreTraining(MegatronBertPreTrainedModel):
def __init__(self, config, add_binary_head=True):
super().__init__(config)
self.bert = MegatronBertModel(config)
self.cls = MegatronBertPreTrainingHeads(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=MegatronBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (`Dict[str, any]`, optional, defaults to *{}*):
Used to hide legacy arguments that have been deprecated.
Returns:
Example:
```python
>>> from transformers import BertTokenizer, MegatronBertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("nvidia/megatron-bert-cased-345m")
>>> model = MegatronBertForPreTraining.from_pretrained("nvidia/megatron-bert-cased-345m")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return MegatronBertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""MegatronBert Model with a `language modeling` head on top for CLM fine-tuning.""",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertForCausalLM(MegatronBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `MegatronBertForCausalLM` as a standalone, add `is_decoder=True.`")
self.bert = MegatronBertModel(config, add_pooling_layer=False)
self.cls = MegatronBertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
Returns:
Example:
```python
>>> from transformers import BertTokenizer, MegatronBertForCausalLM, MegatronBertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("nvidia/megatron-bert-cased-345m")
>>> model = MegatronBertForCausalLM.from_pretrained("nvidia/megatron-bert-cased-345m", is_decoder=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""MegatronBert Model with a `language modeling` head on top.""", MEGATRON_BERT_START_DOCSTRING)
class MegatronBertForMaskedLM(MegatronBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler", r"seq_relationship"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `MegatronBertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = MegatronBertModel(config, add_pooling_layer=False)
self.cls = MegatronBertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""MegatronBert Model with a `next sentence prediction (classification)` head on top.""",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertForNextSentencePrediction(MegatronBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"predictions"]
def __init__(self, config):
super().__init__(config)
self.bert = MegatronBertModel(config)
self.cls = MegatronBertOnlyNSPHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring). Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example:
```python
>>> from transformers import BertTokenizer, MegatronBertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("nvidia/megatron-bert-cased-345m")
>>> model = MegatronBertForNextSentencePrediction.from_pretrained("nvidia/megatron-bert-cased-345m")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
```"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MegatronBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertForSequenceClassification(MegatronBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = MegatronBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MegatronBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output
and a softmax) e.g. for RocStories/SWAG tasks.
""",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertForMultipleChoice(MegatronBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = MegatronBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(
MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MegatronBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
for Named-Entity-Recognition (NER) tasks.
""",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertForTokenClassification(MegatronBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = MegatronBertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MegatronBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertForQuestionAnswering(MegatronBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = MegatronBertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 42.924119 | 198 | 0.672113 |
acf0577e3b2b69b7bb30f27a2f46271d87db6a3f | 66 | py | Python | backend/ibutsu_server/errors.py | rsnyman/ibutsu-server | 3d190a3ab2f3cd206b7c5509ba21f95ce5bbdfcc | [
"MIT"
] | 10 | 2020-07-07T07:00:00.000Z | 2022-03-30T12:21:44.000Z | backend/ibutsu_server/errors.py | rsnyman/ibutsu-server | 3d190a3ab2f3cd206b7c5509ba21f95ce5bbdfcc | [
"MIT"
] | 133 | 2020-07-06T20:10:45.000Z | 2022-03-31T15:19:19.000Z | backend/ibutsu_server/errors.py | rsnyman/ibutsu-server | 3d190a3ab2f3cd206b7c5509ba21f95ce5bbdfcc | [
"MIT"
] | 9 | 2020-07-06T17:33:29.000Z | 2022-03-07T00:08:00.000Z | class IbutsuError(Exception):
"""Base exception for Ibutsu"""
| 22 | 35 | 0.712121 |
acf05975d5f57328596fd2b8cb67c7d82b7a591b | 128 | py | Python | src/section4/quotesapi/quotes/api/pagination.py | Kayofpimentel/django_course | f4408bb02319d3fa8580f41e62bd9910609442eb | [
"MIT"
] | null | null | null | src/section4/quotesapi/quotes/api/pagination.py | Kayofpimentel/django_course | f4408bb02319d3fa8580f41e62bd9910609442eb | [
"MIT"
] | null | null | null | src/section4/quotesapi/quotes/api/pagination.py | Kayofpimentel/django_course | f4408bb02319d3fa8580f41e62bd9910609442eb | [
"MIT"
] | null | null | null | from rest_framework.pagination import PageNumberPagination
class SmallSetPagination(PageNumberPagination):
page_size = 30
| 21.333333 | 58 | 0.84375 |
acf05a256b7e38f8b57a6b957af9e17999a1abb5 | 36,296 | py | Python | mindspore/train/serialization.py | HappyKL/mindspore | 479cb89e8b5c9d859130891567038bb849a30bce | [
"Apache-2.0"
] | 1 | 2020-10-18T12:27:45.000Z | 2020-10-18T12:27:45.000Z | mindspore/train/serialization.py | ReIadnSan/mindspore | c3d1f54c7f6d6f514e5748430d24b16a4f9ee9e5 | [
"Apache-2.0"
] | null | null | null | mindspore/train/serialization.py | ReIadnSan/mindspore | c3d1f54c7f6d6f514e5748430d24b16a4f9ee9e5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model and parameters serialization."""
import os
import stat
import math
from threading import Thread, Lock
import numpy as np
import mindspore.nn as nn
from mindspore import log as logger
from mindspore.train.checkpoint_pb2 import Checkpoint
from mindspore.train.print_pb2 import Print
from mindspore.train.node_strategy_pb2 import ParallelStrategyMap
from mindspore.common.tensor import Tensor
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.common.api import _executor
from mindspore.common import dtype as mstype
from mindspore._checkparam import check_input_data
from mindspore.train.quant import quant
import mindspore.context as context
from .._checkparam import Validator
__all__ = ["save_checkpoint", "load_checkpoint", "load_param_into_net", "export", "parse_print",
"build_searched_strategy", "merge_sliced_parameter"]
tensor_to_ms_type = {"Int8": mstype.int8, "Uint8": mstype.uint8, "Int16": mstype.int16, "Uint16": mstype.uint16,
"Int32": mstype.int32, "Uint32": mstype.uint32, "Int64": mstype.int64, "Uint64": mstype.uint64,
"Float16": mstype.float16, "Float32": mstype.float32, "Float64": mstype.float64,
"Bool": mstype.bool_}
tensor_to_np_type = {"Int8": np.int8, "Uint8": np.uint8, "Int16": np.int16, "Uint16": np.uint16,
"Int32": np.int32, "Uint32": np.uint32, "Int64": np.int64, "Uint64": np.uint64,
"Float16": np.float16, "Float32": np.float32, "Float64": np.float64, "Bool": np.bool_}
_ckpt_mutex = Lock()
SLICE_SIZE = 512 * 1024 * 1024
def _set_pb_env():
"""Set env variable `PROTOCOL_BUFFERS` to prevent memory overflow."""
if os.getenv("PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION") == "cpp":
logger.warning("Current env variable `PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp`,\
When the parameter is too large, it may cause memory limit error.\
This can be solved by set env `PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python`.")
else:
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
logger.debug("Set the `PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python`.")
def _special_process_par(par, new_par):
"""
Processes the special condition.
Like (12,2048,1,1)->(12,2048), this case is caused by GE 4 dimensions tensor.
"""
par_shape_len = len(par.data.shape)
new_par_shape_len = len(new_par.data.shape)
delta_len = new_par_shape_len - par_shape_len
delta_i = 0
for delta_i in range(delta_len):
if new_par.data.shape[par_shape_len + delta_i] != 1:
break
if delta_i == delta_len - 1:
new_val = new_par.data.asnumpy()
new_val = new_val.reshape(par.data.shape)
par.set_data(Tensor(new_val, par.data.dtype))
return True
return False
def _update_param(param, new_param):
"""Updates param's data from new_param's data."""
if isinstance(param.data, Tensor) and isinstance(new_param.data, Tensor):
if param.data.dtype != new_param.data.dtype:
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} type({}) different from parameter_dict's({})"
.format(param.name, param.data.dtype, new_param.data.dtype))
raise RuntimeError(msg)
if param.data.shape != new_param.data.shape:
if not _special_process_par(param, new_param):
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} shape({}) different from parameter_dict's({})"
.format(param.name, param.data.shape, new_param.data.shape))
raise RuntimeError(msg)
return
param.set_data(new_param.data)
return
if isinstance(param.data, Tensor) and not isinstance(new_param.data, Tensor):
if param.data.shape != (1,) and param.data.shape != ():
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} shape({}) is not (1,), inconsitent with parameter_dict's(scalar)."
.format(param.name, param.data.shape))
raise RuntimeError(msg)
param.set_data(initializer(new_param.data, param.data.shape, param.data.dtype))
elif isinstance(new_param.data, Tensor) and not isinstance(param.data, Tensor):
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} type({}) different from parameter_dict's({})"
.format(param.name, type(param.data), type(new_param.data)))
raise RuntimeError(msg)
else:
param.set_data(type(param.data)(new_param.data))
def _exec_save(ckpt_file_name, data_list):
"""Execute save checkpoint into file process."""
try:
with _ckpt_mutex:
if os.path.exists(ckpt_file_name):
os.remove(ckpt_file_name)
with open(ckpt_file_name, "ab") as f:
for name, value in data_list.items():
data_size = value[2].nbytes
if data_size > SLICE_SIZE:
slice_count = math.ceil(data_size / SLICE_SIZE)
param_slice_list = np.array_split(value[2], slice_count)
else:
param_slice_list = [value[2]]
for param_slice in param_slice_list:
checkpoint_list = Checkpoint()
param_value = checkpoint_list.value.add()
param_value.tag = name
param_tensor = param_value.tensor
param_tensor.dims.extend(value[0])
param_tensor.tensor_type = value[1]
param_tensor.tensor_content = param_slice.tostring()
f.write(checkpoint_list.SerializeToString())
os.chmod(ckpt_file_name, stat.S_IRUSR)
except BaseException as e:
logger.error("Failed to save the checkpoint file %s.", ckpt_file_name)
raise e
def save_checkpoint(save_obj, ckpt_file_name, integrated_save=True, async_save=False):
"""
Saves checkpoint info to a specified file.
Args:
save_obj (nn.Cell or list): The cell object or data list(each element is a dictionary, like
[{"name": param_name, "data": param_data},...], the type of param_name would
be string, and the type of param_data would be parameter or tensor).
ckpt_file_name (str): Checkpoint file name. If the file name already exists, it will be overwritten.
integrated_save (bool): Whether to integrated save in automatic model parallel scene. Default: True
async_save (bool): Whether asynchronous execution saves the checkpoint to a file. Default: False
Raises:
TypeError: If the parameter save_obj is not nn.Cell or list type.And if the parameter integrated_save and
async_save are not bool type.
"""
if not isinstance(save_obj, nn.Cell) and not isinstance(save_obj, list):
raise TypeError("The parameter save_obj should be nn.Cell or list, but got {}".format(type(save_obj)))
if not isinstance(integrated_save, bool):
raise TypeError("The parameter integrated_save should be bool, but got {}".format(type(integrated_save)))
if not isinstance(async_save, bool):
raise TypeError("The parameter async_save should be bool, but got {}".format(type(async_save)))
logger.info("Execute save checkpoint process.")
if isinstance(save_obj, nn.Cell):
save_obj.init_parameters_data()
param_dict = {}
for _, param in save_obj.parameters_and_names():
param_dict[param.name] = param
param_list = []
for (key, value) in param_dict.items():
each_param = {"name": key}
param_data = Tensor(value.data)
# in automatic model parallel scenario, some parameters were spliteds to all the devices,
# which should be combined before saving
if integrated_save and key in save_obj.parameter_layout_dict:
param_data = _get_merged_param_data(save_obj, key, param_data)
each_param["data"] = param_data
param_list.append(each_param)
save_obj = param_list
data_list = {}
with _ckpt_mutex:
for param in save_obj:
key = param["name"]
data_list[key] = []
if isinstance(param["data"], Parameter):
param["data"].init_data()
dims = []
if param['data'].shape == ():
dims.append(0)
else:
for dim in param['data'].shape:
dims.append(dim)
data_list[key].append(dims)
tensor_type = str(param["data"].dtype)
data_list[key].append(tensor_type)
data = param["data"].asnumpy().reshape(-1)
data_list[key].append(data)
if async_save:
thr = Thread(target=_exec_save, args=(ckpt_file_name, data_list), name="asyn_save_ckpt")
thr.start()
else:
_exec_save(ckpt_file_name, data_list)
logger.info("Save checkpoint process finish.")
def load_checkpoint(ckpt_file_name, net=None):
"""
Loads checkpoint info from a specified file.
Args:
ckpt_file_name (str): Checkpoint file name.
net (Cell): Cell network. Default: None
Returns:
Dict, key is parameter name, value is a Parameter.
Raises:
ValueError: Checkpoint file is incorrect.
"""
if not isinstance(ckpt_file_name, str):
raise ValueError("The ckpt_file_name must be string.")
if not os.path.exists(ckpt_file_name):
raise ValueError("The checkpoint file is not exist.")
if ckpt_file_name[-5:] != ".ckpt":
raise ValueError("Please input the correct checkpoint file name.")
if os.path.getsize(ckpt_file_name) == 0:
raise ValueError("The checkpoint file may be empty, please make sure enter the correct file name.")
logger.info("Execute load checkpoint process.")
checkpoint_list = Checkpoint()
try:
with open(ckpt_file_name, "rb") as f:
pb_content = f.read()
checkpoint_list.ParseFromString(pb_content)
except BaseException as e:
logger.error("Failed to read the checkpoint file `%s`, please check the correct of the file.", ckpt_file_name)
raise ValueError(e.__str__())
parameter_dict = {}
try:
element_id = 0
param_data_list = []
for element in checkpoint_list.value:
data = element.tensor.tensor_content
data_type = element.tensor.tensor_type
np_type = tensor_to_np_type[data_type]
ms_type = tensor_to_ms_type[data_type]
element_data = np.frombuffer(data, np_type)
param_data_list.append(element_data)
if (element_id == len(checkpoint_list.value) - 1) or \
(element.tag != checkpoint_list.value[element_id + 1].tag):
param_data = np.concatenate((param_data_list), axis=0)
param_data_list.clear()
dims = element.tensor.dims
if dims == [0]:
if 'Float' in data_type:
param_data = float(param_data[0])
elif 'Int' in data_type:
param_data = int(param_data[0])
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
elif dims == [1]:
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
else:
param_dim = []
for dim in dims:
param_dim.append(dim)
param_value = param_data.reshape(param_dim)
parameter_dict[element.tag] = Parameter(Tensor(param_value, ms_type), name=element.tag)
element_id += 1
logger.info("Load checkpoint process finish.")
except BaseException as e:
logger.error("Failed to load the checkpoint file `%s`.", ckpt_file_name)
raise RuntimeError(e.__str__())
if net is not None:
load_param_into_net(net, parameter_dict)
return parameter_dict
def load_param_into_net(net, parameter_dict):
"""
Loads parameters into network.
Args:
net (Cell): Cell network.
parameter_dict (dict): Parameter dictionary.
Raises:
TypeError: Argument is not a Cell, or parameter_dict is not a Parameter dictionary.
"""
if not isinstance(net, nn.Cell):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument net should be a Cell, but got {}.".format(type(net)))
raise TypeError(msg)
if not isinstance(parameter_dict, dict):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument parameter_dict should be a dict, but got {}.".format(type(parameter_dict)))
raise TypeError(msg)
logger.info("Execute load parameter into net process.")
net.init_parameters_data()
param_not_load = []
for _, param in net.parameters_and_names():
if param.name in parameter_dict:
new_param = parameter_dict[param.name]
if not isinstance(new_param, Parameter):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument parameter_dict element should be a Parameter, but got {}.".format(type(new_param)))
raise TypeError(msg)
_update_param(param, new_param)
else:
param_not_load.append(param.name)
if param_not_load:
_load_dismatch_prefix_params(net, parameter_dict, param_not_load)
logger.debug("Params not matched(in net but not in parameter_dict):")
for param_name in param_not_load:
logger.debug("%s", param_name)
logger.info("Load parameter into net finish, {} parameters has not been loaded.".format(len(param_not_load)))
return param_not_load
def _load_dismatch_prefix_params(net, parameter_dict, param_not_load):
"""When some net parameter did not load, try to continue load."""
prefix_name = ""
longest_name = param_not_load[0]
while prefix_name != longest_name and param_not_load:
logger.debug("Count: {} parameters has not been loaded, try to load continue.".format(len(param_not_load)))
prefix_name = longest_name
for net_param_name in param_not_load:
for dict_name in parameter_dict:
if dict_name.endswith(net_param_name):
prefix_name = dict_name[:-len(net_param_name)]
break
if prefix_name != longest_name:
break
if prefix_name != longest_name:
logger.warning("Remove parameter prefix name: {}, continue to load.".format(prefix_name))
for _, param in net.parameters_and_names():
new_param_name = prefix_name + param.name
if param.name in param_not_load and new_param_name in parameter_dict:
new_param = parameter_dict[new_param_name]
_update_param(param, new_param)
param_not_load.remove(param.name)
def _save_graph(network, file_name):
"""
Saves the graph of network to a file.
Args:
network (Cell): Obtain a pipeline through network for saving graph.
file_name (str): Graph file name into which the graph will be saved.
"""
logger.info("Execute save the graph process.")
graph_proto = network.get_func_graph_proto()
if graph_proto:
with open(file_name, "wb") as f:
f.write(graph_proto)
os.chmod(file_name, stat.S_IRUSR)
def _get_merged_param_data(net, param_name, param_data):
"""
Gets the merged data(tensor) from tensor slice, by device arrangement and tensor map.
Args:
net (Cell): MindSpore network.
param_name(str): The parameter name, which to be combined.
param_data(Tensor):The parameter data on the local device,
It was a slice of the whole parameter data.
Returns:
Tensor, the combined tensor which with the whole data value.
"""
layout = net.parameter_layout_dict[param_name]
if len(layout) < 6:
logger.info("layout dict does not contain the key %s", param_name)
return param_data
dev_mat = layout[0]
tensor_map = layout[1]
field_size = layout[3]
uniform_split = layout[4]
opt_shard_group = layout[5]
if uniform_split == 0:
raise RuntimeError("Save checkpoint only support uniform split tensor now.")
from mindspore.parallel._cell_wrapper import get_allgather_cell
from mindspore.parallel._tensor import _reshape_param_data, _reshape_param_data_with_weight
# while any dim is not equal to -1, means param is split and needs to be merged
# pipeline parallel need to be supported here later
for dim in tensor_map:
if dim != -1 or opt_shard_group:
allgather_net = get_allgather_cell(opt_shard_group)
param_data = allgather_net(param_data)
if field_size:
return _reshape_param_data_with_weight(param_data, dev_mat, field_size)
return _reshape_param_data(param_data, dev_mat, tensor_map)
return param_data
def _fill_param_into_net(net, parameter_list):
"""
Fills parameter_list into net.
Args:
net (Cell): train network.
parameter_list (list): parameters list from ge callback.
"""
parameter_dict = {}
for each_param in parameter_list:
param_name = each_param["name"]
if isinstance(each_param["data"], Parameter):
each_param["data"].init_data()
np_val = each_param["data"].asnumpy()
if np_val.shape == (1,):
parameter_dict[param_name] = Parameter(np_val, name=param_name)
elif np_val.shape == ():
parameter_dict[param_name] = Parameter(Tensor(np_val.tolist(), mstype.pytype_to_dtype(np_val.dtype)),
name=param_name)
else:
parameter_dict[param_name] = Parameter(Tensor(np_val), name=param_name)
load_param_into_net(net, parameter_dict)
def export(net, *inputs, file_name, file_format='AIR', **kwargs):
"""
Export the MindSpore prediction model to a file in the specified format.
Args:
net (Cell): MindSpore network.
inputs (Tensor): Inputs of the `net`.
file_name (str): File name of the model to be exported.
file_format (str): MindSpore currently supports 'AIR', 'ONNX' and 'MINDIR' format for exported model.
- AIR: Ascend Intermidiate Representation. An intermidiate representation format of Ascend model.
Recommended suffix for output file is '.air'.
- ONNX: Open Neural Network eXchange. An open format built to represent machine learning models.
Recommended suffix for output file is '.onnx'.
- MINDIR: MindSpore Native Intermidiate Representation for Anf. An intermidiate representation format
for MindSpore models.
Recommended suffix for output file is '.mindir'.
kwargs (dict): Configuration options dictionary.
- quant_mode: The mode of quant.
- mean: Input data mean. Default: 127.5.
- std_dev: Input data variance. Default: 127.5.
"""
logger.info("exporting model file:%s format:%s.", file_name, file_format)
check_input_data(*inputs, data_class=Tensor)
net = _quant_export(net, *inputs, file_format='AIR', **kwargs)
_export(net, file_name, file_format, *inputs)
def _export(net, file_name, file_format, *inputs):
"""
It is an internal conversion function. Export the MindSpore prediction model to a file in the specified format.
"""
logger.info("exporting model file:%s format:%s.", file_name, file_format)
check_input_data(*inputs, data_class=Tensor)
if file_format == 'GEIR':
logger.warning(f"Format 'GEIR' is deprecated, it would be removed in future release, use 'AIR' instead.")
file_format = 'AIR'
supported_formats = ['AIR', 'ONNX', 'MINDIR']
if file_format not in supported_formats:
raise ValueError(f'Illegal file format {file_format}, it must be one of {supported_formats}')
# When dumping ONNX file, switch network mode to infer when it is training(NOTE: ONNX only designed for prediction)
is_dump_onnx_in_training = net.training and file_format == 'ONNX'
if is_dump_onnx_in_training:
net.set_train(mode=False)
# export model
net.init_parameters_data()
if file_format == 'AIR':
phase_name = 'export.air'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name)
_executor.export(file_name, graph_id)
elif file_format == 'ONNX': # file_format is 'ONNX'
phase_name = 'export.onnx'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name, do_convert=False)
onnx_stream = _executor._get_func_graph_proto(graph_id)
with open(file_name, 'wb') as f:
os.chmod(file_name, stat.S_IWUSR | stat.S_IRUSR)
f.write(onnx_stream)
elif file_format == 'MINDIR': # file_format is 'MINDIR'
phase_name = 'export.mindir'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name, do_convert=False)
onnx_stream = _executor._get_func_graph_proto(graph_id, 'mind_ir')
with open(file_name, 'wb') as f:
os.chmod(file_name, stat.S_IWUSR | stat.S_IRUSR)
f.write(onnx_stream)
# restore network training mode
if is_dump_onnx_in_training:
net.set_train(mode=True)
def _quant_export(network, *inputs, file_format='AIR', **kwargs):
"""
Exports MindSpore quantization predict model to deploy with AIR and MINDIR.
"""
if not kwargs.get('quant_mode', None):
return network
supported_device = ["Ascend", "GPU"]
supported_formats = ['AIR', 'MINDIR']
quant_mode_formats = ['AUTO', 'MANUAL']
mean = kwargs['mean'] if kwargs.get('mean', None) else 127.5
std_dev = kwargs['std_dev'] if kwargs.get('std_dev', None) else 127.5
quant_mode = kwargs['quant_mode']
if quant_mode not in quant_mode_formats:
raise KeyError(f'Quant_mode input is wrong, Please choose the right mode of the quant_mode.')
mean = Validator.check_type("mean", mean, (int, float))
std_dev = Validator.check_type("std_dev", std_dev, (int, float))
if context.get_context('device_target') not in supported_device:
raise KeyError("Unsupported {} device target.".format(context.get_context('device_target')))
if file_format not in supported_formats:
raise ValueError('Illegal file format {}.'.format(file_format))
network.set_train(False)
if file_format == "MINDIR":
if quant_mode == 'MANUAL':
exporter = quant.ExportManualQuantNetwork(network, mean, std_dev, *inputs, is_mindir=True)
else:
exporter = quant.ExportToQuantInferNetwork(network, mean, std_dev, *inputs, is_mindir=True)
else:
if quant_mode == 'MANUAL':
exporter = quant.ExportManualQuantNetwork(network, mean, std_dev, *inputs)
else:
exporter = quant.ExportToQuantInferNetwork(network, mean, std_dev, *inputs)
deploy_net = exporter.run()
return deploy_net
def parse_print(print_file_name):
"""
Loads Print data from a specified file.
Args:
print_file_name (str): The file name of saved print data.
Returns:
List, element of list is Tensor.
Raises:
ValueError: The print file may be empty, please make sure enter the correct file name.
"""
print_file_path = os.path.realpath(print_file_name)
if os.path.getsize(print_file_path) == 0:
raise ValueError("The print file may be empty, please make sure enter the correct file name.")
logger.info("Execute load print process.")
print_list = Print()
try:
with open(print_file_path, "rb") as f:
pb_content = f.read()
print_list.ParseFromString(pb_content)
except BaseException as e:
logger.error("Failed to read the print file %s, please check the correct of the file.", print_file_name)
raise ValueError(e.__str__())
tensor_list = []
try:
for print_ in print_list.value:
# String type
if print_.HasField("desc"):
tensor_list.append(print_.desc)
elif print_.HasField("tensor"):
dims = print_.tensor.dims
data_type = print_.tensor.tensor_type
data = print_.tensor.tensor_content
np_type = tensor_to_np_type[data_type]
param_data = np.fromstring(data, np_type)
ms_type = tensor_to_ms_type[data_type]
param_dim = []
for dim in dims:
param_dim.append(dim)
if param_dim:
param_value = param_data.reshape(param_dim)
tensor_list.append(Tensor(param_value, ms_type))
# Scale type
else:
data_type_ = data_type.lower()
if 'float' in data_type_:
param_data = float(param_data[0])
elif 'int' in data_type_:
param_data = int(param_data[0])
elif 'bool' in data_type_:
param_data = bool(param_data[0])
tensor_list.append(Tensor(param_data, ms_type))
except BaseException as e:
logger.error("Failed to load the print file %s.", print_list)
raise RuntimeError(e.__str__())
return tensor_list
def _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even):
"""
Merge data slices to one tensor with whole data when strategy is not None.
Args:
sliced_data (list[numpy.ndarray]): Data slices in order of rank_id.
parameter_name (str): Name of parameter.
strategy (dict): Parameter slice strategy.
is_even (bool): Slice manner that True represents slicing evenly and False represents slicing unevenly.
Returns:
Tensor, the merged Tensor which has the whole data.
Raises:
ValueError: Failed to merge.
"""
layout = strategy.get(parameter_name)
try:
dev_mat = list(layout.dev_matrix[0].dim)
tensor_map = list(layout.tensor_map[0].dim)
param_split_shape = list(layout.param_split_shape[0].dim)
field_size = int(layout.field)
except BaseException as e:
raise ValueError(f"{e.__str__()}. please make sure that strategy matches the node_strategy.proto.")
device_count = 1
for dim in dev_mat:
device_count *= dim
if len(sliced_data) != device_count:
raise ValueError(f"The sliced_parameters length should be equal to device_count. "
f"the sliced_parameters length is {len(sliced_data)} but device_count is {device_count}.")
merged_tensor = None
if not param_split_shape:
if not is_even:
raise ValueError("The shape of every parameter in sliced_parameters should be the same "
"when slice manner is even.")
all_gather_tensor = Tensor(np.concatenate(sliced_data))
if field_size > 0:
from mindspore.parallel._tensor import _reshape_param_data_with_weight
merged_tensor = _reshape_param_data_with_weight(all_gather_tensor, dev_mat, [field_size])
else:
from mindspore.parallel._tensor import _reshape_param_data
merged_tensor = _reshape_param_data(all_gather_tensor, dev_mat, tensor_map)
else:
from mindspore.parallel._tensor import _get_tensor_strategy, _get_tensor_slice_index
tensor_strategy = _get_tensor_strategy(dev_mat, tensor_map)
slice_count = 1
for dim in tensor_strategy:
slice_count *= dim
if len(param_split_shape) != slice_count:
raise ValueError(f"The param_split_shape length in strategy should be {slice_count}, "
f"but got {len(param_split_shape)}.")
tensor_slices_new = list(range(slice_count))
tensor_slices = sliced_data
for i in range(device_count):
slice_index = int(_get_tensor_slice_index(dev_mat, tensor_strategy, tensor_map, i))
if tensor_slices[i].shape[0] != param_split_shape[slice_index]:
raise ValueError(f"The slice {slice_index} is {param_split_shape[slice_index]} in 0 axis, "
f"but got {tensor_slices[i].shape[0]}.")
tensor_slices_new[slice_index] = np.array(tensor_slices[i])
dim_len = len(tensor_strategy)
for i in range(dim_len):
ele_count = int(len(tensor_slices_new) / tensor_strategy[dim_len - 1 - i])
tensor_slices_new_inner = []
for j in range(ele_count):
new_tensor = tensor_slices_new[j * tensor_strategy[dim_len - 1 - i]]
for l in range(j * tensor_strategy[dim_len - 1 - i] + 1,
(j + 1) * tensor_strategy[dim_len - 1 - i]):
new_tensor = np.concatenate((new_tensor, tensor_slices_new[l]), axis=dim_len - 1 - i)
tensor_slices_new_inner.insert(len(tensor_slices_new_inner), np.array(new_tensor))
tensor_slices_new = tensor_slices_new_inner
merged_tensor = Tensor(tensor_slices_new[0])
return merged_tensor
def build_searched_strategy(strategy_filename):
"""
Build strategy of every parameter in network.
Args:
strategy_filename (str): Name of strategy file.
Returns:
Dictionary, whose key is parameter name and value is slice strategy of this parameter.
Raises:
ValueError: Strategy file is incorrect.
TypeError: Strategy_filename is not str.
Examples:
>>> strategy_filename = "./strategy_train.ckpt"
>>> strategy = build_searched_strategy(strategy_filename)
"""
if not isinstance(strategy_filename, str):
raise TypeError(f"The strategy_filename should be str, but got {type(strategy_filename)}.")
if not os.path.isfile(strategy_filename):
raise ValueError(f"No such strategy file: {strategy_filename}.")
if os.path.getsize(strategy_filename) == 0:
raise ValueError("The strategy file should not be empty.")
parallel_strategy_map = ParallelStrategyMap()
with open(strategy_filename, 'rb') as f:
pb_content = f.read()
parallel_strategy_map.ParseFromString(pb_content)
layout_items = parallel_strategy_map.parallel_layout_item
if not layout_items:
raise ValueError("The strategy file has no sliced parameter.")
strategy = {}
for layout_item in layout_items:
parameter_name = layout_item.param_name
layout = layout_item.parallel_layouts
strategy[parameter_name] = layout
return strategy
def merge_sliced_parameter(sliced_parameters, strategy=None):
"""
Merge parameter slices to one whole parameter.
Args:
sliced_parameters (list[Parameter]): Parameter slices in order of rank_id.
strategy (dict): Parameter slice strategy, the default is None.
If strategy is None, just merge parameter slices in 0 axis order.
- key (str): Parameter name.
- value (<class 'node_strategy_pb2.ParallelLayouts'>): Slice strategy of this parameter.
Returns:
Parameter, the merged parameter which has the whole data.
Raises:
ValueError: Failed to merge.
TypeError: The sliced_parameters is incorrect or strategy is not dict.
KeyError: The parameter name is not in keys of strategy.
Examples:
>>> strategy = build_searched_strategy("./strategy_train.ckpt")
>>> sliced_parameters = [
>>> Parameter(Tensor(np.array([0.00023915, 0.00013939, -0.00098059])),
>>> "network.embedding_table"),
>>> Parameter(Tensor(np.array([0.00015815, 0.00015458, -0.00012125])),
>>> "network.embedding_table"),
>>> Parameter(Tensor(np.array([0.00042165, 0.00029692, -0.00007941])),
>>> "network.embedding_table"),
>>> Parameter(Tensor(np.array([0.00084451, 0.00089960, -0.00010431])),
>>> "network.embedding_table")]
>>> merged_parameter = merge_sliced_parameter(sliced_parameters, strategy)
"""
if not isinstance(sliced_parameters, list):
raise TypeError(f"The sliced_parameters should be list, but got {type(sliced_parameters)}.")
if not sliced_parameters:
raise ValueError("The sliced_parameters should not be empty.")
if strategy and not isinstance(strategy, dict):
raise TypeError(f"The strategy should be dict, but got {type(strategy)}.")
try:
parameter_name = sliced_parameters[0].name
parameter_shape = sliced_parameters[0].data.shape
parameter_shape_length = len(parameter_shape)
except BaseException as e:
raise TypeError(f"{e.__str__()}. the element in sliced_parameters should be Parameter.")
is_even = True
for index, parameter in enumerate(sliced_parameters):
if not isinstance(parameter, Parameter):
raise TypeError(f"The element in sliced_parameters should be Parameter, "
f"but got {type(parameter)} at index {index}.")
if parameter.name != parameter_name \
or len(parameter.data.shape) != parameter_shape_length \
or parameter.data.shape[1:] != parameter_shape[1:]:
raise ValueError("Please make sure that the elements in slice_parameters have the same name, "
"dimension length and shape except 0 axis")
if parameter.data.shape != parameter_shape:
is_even = False
layerwise_parallel = sliced_parameters[0].layerwise_parallel
requires_grad = sliced_parameters[0].requires_grad
sliced_data = [parameter.data.asnumpy() for parameter in sliced_parameters]
merged_parameter = None
if not strategy:
merged_tensor = Tensor(np.concatenate(sliced_data))
merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
else:
if parameter_name not in strategy.keys():
raise KeyError(f"The parameter name should be one key of strategy. "
f"the parameter name is {parameter_name}.")
merged_tensor = _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even)
merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
return merged_parameter
_set_pb_env()
| 42.057937 | 119 | 0.643404 |
acf05bb416eadf35b8ebd2658a6df4e186f6a3e8 | 2,615 | py | Python | ordersapp/migrations/0001_orders.py | Uadim23/Django_GeekShop | 56c695291f952c2d0cc1935bf667b1bc683b4baa | [
"MIT"
] | null | null | null | ordersapp/migrations/0001_orders.py | Uadim23/Django_GeekShop | 56c695291f952c2d0cc1935bf667b1bc683b4baa | [
"MIT"
] | null | null | null | ordersapp/migrations/0001_orders.py | Uadim23/Django_GeekShop | 56c695291f952c2d0cc1935bf667b1bc683b4baa | [
"MIT"
] | null | null | null | # Generated by Django 3.2.11 on 2022-02-03 15:33
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("mainapp", "0006_product_is_active"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Order",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("created", models.DateTimeField(auto_now_add=True, verbose_name="создан")),
("updated", models.DateTimeField(auto_now=True, verbose_name="обновлен")),
(
"status",
models.CharField(
choices=[
("FM", "формируется"),
("STP", "отправлен в обработку"),
("PD", "оплачен"),
("PRD", "обрабатывается"),
("RDY", "готов к выдаче"),
("CNC", "отменен"),
],
default="FM",
max_length=3,
verbose_name="статус",
),
),
("is_active", models.BooleanField(default=True, verbose_name="активен")),
("user", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
"verbose_name": "заказ",
"verbose_name_plural": "заказы",
"ordering": ("-created",),
},
),
migrations.CreateModel(
name="OrderItem",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("quantity", models.PositiveIntegerField(default=0, verbose_name="количество")),
(
"order",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="orderitems", to="ordersapp.order"
),
),
(
"product",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="mainapp.product", verbose_name="продукт"
),
),
],
),
]
| 37.898551 | 118 | 0.473423 |
acf05d00a37ac8d572971c67e5c926a38cf65a1b | 12,890 | py | Python | sdk/python/pulumi_aws/storagegateway/upload_buffer.py | alexbowers/pulumi-aws | 7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5 | [
"ECL-2.0",
"Apache-2.0"
] | 260 | 2018-06-18T14:57:00.000Z | 2022-03-29T11:41:03.000Z | sdk/python/pulumi_aws/storagegateway/upload_buffer.py | alexbowers/pulumi-aws | 7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5 | [
"ECL-2.0",
"Apache-2.0"
] | 1,154 | 2018-06-19T20:38:20.000Z | 2022-03-31T19:48:16.000Z | sdk/python/pulumi_aws/storagegateway/upload_buffer.py | alexbowers/pulumi-aws | 7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5 | [
"ECL-2.0",
"Apache-2.0"
] | 115 | 2018-06-28T03:20:27.000Z | 2022-03-29T11:41:06.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['UploadBufferArgs', 'UploadBuffer']
@pulumi.input_type
class UploadBufferArgs:
def __init__(__self__, *,
gateway_arn: pulumi.Input[str],
disk_id: Optional[pulumi.Input[str]] = None,
disk_path: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a UploadBuffer resource.
:param pulumi.Input[str] gateway_arn: The Amazon Resource Name (ARN) of the gateway.
:param pulumi.Input[str] disk_id: Local disk identifier. For example, `pci-0000:03:00.0-scsi-0:0:0:0`.
:param pulumi.Input[str] disk_path: Local disk path. For example, `/dev/nvme1n1`.
"""
pulumi.set(__self__, "gateway_arn", gateway_arn)
if disk_id is not None:
pulumi.set(__self__, "disk_id", disk_id)
if disk_path is not None:
pulumi.set(__self__, "disk_path", disk_path)
@property
@pulumi.getter(name="gatewayArn")
def gateway_arn(self) -> pulumi.Input[str]:
"""
The Amazon Resource Name (ARN) of the gateway.
"""
return pulumi.get(self, "gateway_arn")
@gateway_arn.setter
def gateway_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "gateway_arn", value)
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> Optional[pulumi.Input[str]]:
"""
Local disk identifier. For example, `pci-0000:03:00.0-scsi-0:0:0:0`.
"""
return pulumi.get(self, "disk_id")
@disk_id.setter
def disk_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_id", value)
@property
@pulumi.getter(name="diskPath")
def disk_path(self) -> Optional[pulumi.Input[str]]:
"""
Local disk path. For example, `/dev/nvme1n1`.
"""
return pulumi.get(self, "disk_path")
@disk_path.setter
def disk_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_path", value)
@pulumi.input_type
class _UploadBufferState:
def __init__(__self__, *,
disk_id: Optional[pulumi.Input[str]] = None,
disk_path: Optional[pulumi.Input[str]] = None,
gateway_arn: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering UploadBuffer resources.
:param pulumi.Input[str] disk_id: Local disk identifier. For example, `pci-0000:03:00.0-scsi-0:0:0:0`.
:param pulumi.Input[str] disk_path: Local disk path. For example, `/dev/nvme1n1`.
:param pulumi.Input[str] gateway_arn: The Amazon Resource Name (ARN) of the gateway.
"""
if disk_id is not None:
pulumi.set(__self__, "disk_id", disk_id)
if disk_path is not None:
pulumi.set(__self__, "disk_path", disk_path)
if gateway_arn is not None:
pulumi.set(__self__, "gateway_arn", gateway_arn)
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> Optional[pulumi.Input[str]]:
"""
Local disk identifier. For example, `pci-0000:03:00.0-scsi-0:0:0:0`.
"""
return pulumi.get(self, "disk_id")
@disk_id.setter
def disk_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_id", value)
@property
@pulumi.getter(name="diskPath")
def disk_path(self) -> Optional[pulumi.Input[str]]:
"""
Local disk path. For example, `/dev/nvme1n1`.
"""
return pulumi.get(self, "disk_path")
@disk_path.setter
def disk_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_path", value)
@property
@pulumi.getter(name="gatewayArn")
def gateway_arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) of the gateway.
"""
return pulumi.get(self, "gateway_arn")
@gateway_arn.setter
def gateway_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gateway_arn", value)
class UploadBuffer(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
disk_id: Optional[pulumi.Input[str]] = None,
disk_path: Optional[pulumi.Input[str]] = None,
gateway_arn: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an AWS Storage Gateway upload buffer.
> **NOTE:** The Storage Gateway API provides no method to remove an upload buffer disk. Destroying this resource does not perform any Storage Gateway actions.
## Example Usage
### Cached and VTL Gateway Type
```python
import pulumi
import pulumi_aws as aws
test_local_disk = aws.storagegateway.get_local_disk(disk_node=aws_volume_attachment["test"]["device_name"],
gateway_arn=aws_storagegateway_gateway["test"]["arn"])
test_upload_buffer = aws.storagegateway.UploadBuffer("testUploadBuffer",
disk_path=test_local_disk.disk_path,
gateway_arn=aws_storagegateway_gateway["test"]["arn"])
```
### Stored Gateway Type
```python
import pulumi
import pulumi_aws as aws
test = aws.storagegateway.get_local_disk(disk_node=aws_volume_attachment["test"]["device_name"],
gateway_arn=aws_storagegateway_gateway["test"]["arn"])
example = aws.storagegateway.UploadBuffer("example",
disk_id=data["aws_storagegateway_local_disk"]["example"]["id"],
gateway_arn=aws_storagegateway_gateway["example"]["arn"])
```
## Import
`aws_storagegateway_upload_buffer` can be imported by using the gateway Amazon Resource Name (ARN) and local disk identifier separated with a colon (`:`), e.g.
```sh
$ pulumi import aws:storagegateway/uploadBuffer:UploadBuffer example arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678:pci-0000:03:00.0-scsi-0:0:0:0
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] disk_id: Local disk identifier. For example, `pci-0000:03:00.0-scsi-0:0:0:0`.
:param pulumi.Input[str] disk_path: Local disk path. For example, `/dev/nvme1n1`.
:param pulumi.Input[str] gateway_arn: The Amazon Resource Name (ARN) of the gateway.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UploadBufferArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an AWS Storage Gateway upload buffer.
> **NOTE:** The Storage Gateway API provides no method to remove an upload buffer disk. Destroying this resource does not perform any Storage Gateway actions.
## Example Usage
### Cached and VTL Gateway Type
```python
import pulumi
import pulumi_aws as aws
test_local_disk = aws.storagegateway.get_local_disk(disk_node=aws_volume_attachment["test"]["device_name"],
gateway_arn=aws_storagegateway_gateway["test"]["arn"])
test_upload_buffer = aws.storagegateway.UploadBuffer("testUploadBuffer",
disk_path=test_local_disk.disk_path,
gateway_arn=aws_storagegateway_gateway["test"]["arn"])
```
### Stored Gateway Type
```python
import pulumi
import pulumi_aws as aws
test = aws.storagegateway.get_local_disk(disk_node=aws_volume_attachment["test"]["device_name"],
gateway_arn=aws_storagegateway_gateway["test"]["arn"])
example = aws.storagegateway.UploadBuffer("example",
disk_id=data["aws_storagegateway_local_disk"]["example"]["id"],
gateway_arn=aws_storagegateway_gateway["example"]["arn"])
```
## Import
`aws_storagegateway_upload_buffer` can be imported by using the gateway Amazon Resource Name (ARN) and local disk identifier separated with a colon (`:`), e.g.
```sh
$ pulumi import aws:storagegateway/uploadBuffer:UploadBuffer example arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678:pci-0000:03:00.0-scsi-0:0:0:0
```
:param str resource_name: The name of the resource.
:param UploadBufferArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UploadBufferArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
disk_id: Optional[pulumi.Input[str]] = None,
disk_path: Optional[pulumi.Input[str]] = None,
gateway_arn: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UploadBufferArgs.__new__(UploadBufferArgs)
__props__.__dict__["disk_id"] = disk_id
__props__.__dict__["disk_path"] = disk_path
if gateway_arn is None and not opts.urn:
raise TypeError("Missing required property 'gateway_arn'")
__props__.__dict__["gateway_arn"] = gateway_arn
super(UploadBuffer, __self__).__init__(
'aws:storagegateway/uploadBuffer:UploadBuffer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
disk_id: Optional[pulumi.Input[str]] = None,
disk_path: Optional[pulumi.Input[str]] = None,
gateway_arn: Optional[pulumi.Input[str]] = None) -> 'UploadBuffer':
"""
Get an existing UploadBuffer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] disk_id: Local disk identifier. For example, `pci-0000:03:00.0-scsi-0:0:0:0`.
:param pulumi.Input[str] disk_path: Local disk path. For example, `/dev/nvme1n1`.
:param pulumi.Input[str] gateway_arn: The Amazon Resource Name (ARN) of the gateway.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UploadBufferState.__new__(_UploadBufferState)
__props__.__dict__["disk_id"] = disk_id
__props__.__dict__["disk_path"] = disk_path
__props__.__dict__["gateway_arn"] = gateway_arn
return UploadBuffer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> pulumi.Output[str]:
"""
Local disk identifier. For example, `pci-0000:03:00.0-scsi-0:0:0:0`.
"""
return pulumi.get(self, "disk_id")
@property
@pulumi.getter(name="diskPath")
def disk_path(self) -> pulumi.Output[str]:
"""
Local disk path. For example, `/dev/nvme1n1`.
"""
return pulumi.get(self, "disk_path")
@property
@pulumi.getter(name="gatewayArn")
def gateway_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the gateway.
"""
return pulumi.get(self, "gateway_arn")
| 40.920635 | 174 | 0.637083 |
acf05f9dd8018a3bf3cbd8a56fb16882a9c92471 | 2,064 | py | Python | coastl/stl_toolkit/stl_constraints_helpers.py | prathgan/COASTL | 2ee009964f8bafc2d108aba6554f230549cb09e3 | [
"MIT"
] | null | null | null | coastl/stl_toolkit/stl_constraints_helpers.py | prathgan/COASTL | 2ee009964f8bafc2d108aba6554f230549cb09e3 | [
"MIT"
] | null | null | null | coastl/stl_toolkit/stl_constraints_helpers.py | prathgan/COASTL | 2ee009964f8bafc2d108aba6554f230549cb09e3 | [
"MIT"
] | null | null | null | from .utilities.simple_utilities import list_to_str
import re
def get_bin_name(inp):
"""Returns corrected name of binary variable for an stl_node, removes disruptive chars"""
alphaDict = {'0':'zero','1':'one','2':'two','3':'three','4':'four','5':'five','6':'six','7':'seven','8':'eight','9':'nine'}
name = ""
if isinstance(inp, str):
name = inp
else:
name = "b_"+inp.string_rep
name = name.replace('[','ob')
name = name.replace(']','cb')
name = name.replace(',','_')
name = name.replace("<=",'leq')
name = name.replace(">=",'geq')
name = name.replace('<','l')
name = name.replace('>','g')
name = name.replace("&&","and")
name = name.replace('||',"or")
name = name.replace('~',"not")
name = name.replace('(',"_op_")
name = name.replace(')',"_cp_")
name = name.replace('+',"_p_")
name = name.replace('-',"_m_")
name = name.replace('*',"_t_")
name = name.replace('/',"_d_")
name = name.replace('.',"_dot_")
if name[0].isdigit():
name=alphaDict[name[0]]+"_"+name[1:]
return name
def replace_operators(str):
"""Returns string with mathematical operators put back"""
str = str.replace('_p_','+')
str = str.replace('_m_','-')
str = str.replace('_t_','*')
str = str.replace('_d_','/')
return str
def remove_operators(str):
"""Returns string with mathematical operators removed"""
str = str.replace('+','')
str = str.replace('-','')
str = str.replace('*','')
str = str.replace('/','')
return str
def handle_no_range(node):
"""Returns stl_node with range set to [0,0] if not previously set"""
if node.range_start==None or node.range_end==None:
node.range_start = 0
node.range_end = 0
return node
def isolate_0(node):
"""Returns string s where 0<s is found from some expression n<k"""
exp = node.string_rep
parts = re.split("<=", exp)
if exp[-1].isalpha():
return parts[0]+" - ("+parts[1]+")"
else:
return parts[1]+" - ("+parts[0]+")"
class SwitchDict(dict):
"""Dictionary subclass with dict.get(x), where x is not in dict, returning None"""
def __getitem__(self, key):
return dict.get(self, key)
| 29.485714 | 124 | 0.625969 |
acf05fbc4874aa20783c8f892fa95cfb9422e389 | 3,731 | py | Python | flink-ml-tensorflow-2.x/src/test/python/csv_input_only.py | rohankumardubey/dl-on-flink | 60646aa9520f49619b64e9ff03ce73959e8a3858 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2022-02-03T23:54:10.000Z | 2022-02-03T23:54:10.000Z | flink-ml-tensorflow-2.x/src/test/python/csv_input_only.py | rohankumardubey/dl-on-flink | 60646aa9520f49619b64e9ff03ce73959e8a3858 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | flink-ml-tensorflow-2.x/src/test/python/csv_input_only.py | rohankumardubey/dl-on-flink | 60646aa9520f49619b64e9ff03ce73959e8a3858 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
# A quick fix to run TF 1.X code in TF 2.X, we may want to properly migrate the Python script to TF 2.X API.
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import sys
import time
import json
from tensorflow.python.summary.writer.writer_cache import FileWriterCache as SummaryWriterCache
import tensorflow_on_flink.tensorflow_on_flink_ops as tff_ops
import traceback
from flink_ml_tensorflow.tensorflow_context import TFContext
def map_fun(context):
print(tf.__version__)
sys.stdout.flush()
tf_context = TFContext(context)
job_name = tf_context.get_role_name()
index = tf_context.get_index()
cluster_json = tf_context.get_tf_cluster()
print (cluster_json)
sys.stdout.flush()
cluster = tf.train.ClusterSpec(cluster=cluster_json)
server = tf.train.Server(cluster, job_name=job_name, task_index=index)
sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False,
device_filters=["/job:ps", "/job:worker/task:%d" % index])
if 'ps' == job_name:
from time import sleep
while True:
sleep(1)
else:
with tf.device(tf.train.replica_device_setter(worker_device='/job:worker/task:' + str(index), cluster=cluster)):
record_defaults = [[9], [tf.constant(value=9, dtype=tf.int64)], [9.0],
[tf.constant(value=9.0, dtype=tf.float64)], ["9.0"]]
dataset = context.flinkStreamDataSet(buffer_size=0)
dataset = dataset.map(lambda record: tf.decode_csv(record, record_defaults=record_defaults))
dataset = dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
input_records = iterator.get_next()
global_step = tf.train.get_or_create_global_step()
global_step_inc = tf.assign_add(global_step, 1)
out_list = [input_records[0], input_records[2], input_records[4]]
out = tff_ops.encode_csv(input_list=out_list)
is_chief = (index == 0)
t = time.time()
try:
with tf.train.MonitoredTrainingSession(master=server.target, is_chief=is_chief, config=sess_config,
checkpoint_dir="./target/tmp/input_output/" + str(
t)) as mon_sess:
# while not mon_sess.should_stop():
while True:
print (index, mon_sess.run([global_step_inc, out]))
sys.stdout.flush()
# time.sleep(1)
except Exception as e:
print('traceback.print_exc():')
traceback.print_exc()
sys.stdout.flush()
finally:
SummaryWriterCache.clear()
if __name__ == "__main__":
map_fun(context)
| 45.5 | 120 | 0.645671 |
acf0614602692fcd6664f23574e5286eca9358be | 10,325 | py | Python | napari/utils/events/evented_model.py | Mishrasubha/napari | c4d1038fc3ed30dc228949cbdedf12826ec2efc2 | [
"BSD-3-Clause"
] | 2 | 2020-06-18T20:15:41.000Z | 2021-08-11T02:10:58.000Z | napari/utils/events/evented_model.py | Mishrasubha/napari | c4d1038fc3ed30dc228949cbdedf12826ec2efc2 | [
"BSD-3-Clause"
] | 7 | 2020-04-11T03:37:54.000Z | 2021-01-31T22:41:35.000Z | napari/utils/events/evented_model.py | Mishrasubha/napari | c4d1038fc3ed30dc228949cbdedf12826ec2efc2 | [
"BSD-3-Clause"
] | 3 | 2020-08-29T21:07:38.000Z | 2022-01-10T15:36:16.000Z | import operator
import sys
import warnings
from contextlib import contextmanager
from typing import Any, Callable, ClassVar, Dict, Set
import numpy as np
from pydantic import BaseModel, PrivateAttr, main, utils
from ...utils.misc import pick_equality_operator
from ..translations import trans
from .event import EmitterGroup, Event
# encoders for non-napari specific field types. To declare a custom encoder
# for a napari type, add a `_json_encode` method to the class itself.
# it will be added to the model json_encoders in :func:`EventedMetaclass.__new__`
_BASE_JSON_ENCODERS = {np.ndarray: lambda arr: arr.tolist()}
@contextmanager
def no_class_attributes():
"""Context in which pydantic.main.ClassAttribute just passes value 2.
Due to a very annoying decision by PySide2, all class ``__signature__``
attributes may only be assigned **once**. (This seems to be regardless of
whether the class has anything to do with PySide2 or not). Furthermore,
the PySide2 ``__signature__`` attribute seems to break the python
descriptor protocol, which means that class attributes that have a
``__get__`` method will not be able to successfully retrieve their value
(instead, the descriptor object itself will be accessed).
This plays terribly with Pydantic, which assigns a ``ClassAttribute``
object to the value of ``cls.__signature__`` in ``ModelMetaclass.__new__``
in order to avoid masking the call signature of object instances that have
a ``__call__`` method (https://github.com/samuelcolvin/pydantic/pull/1466).
So, because we only get to set the ``__signature__`` once, this context
manager basically "opts-out" of pydantic's ``ClassAttribute`` strategy,
thereby directly setting the ``cls.__signature__`` to an instance of
``inspect.Signature``.
For additional context, see:
- https://github.com/napari/napari/issues/2264
- https://github.com/napari/napari/pull/2265
- https://bugreports.qt.io/browse/PYSIDE-1004
- https://codereview.qt-project.org/c/pyside/pyside-setup/+/261411
"""
if "PySide2" not in sys.modules:
yield
return
# monkey patch the pydantic ClassAttribute object
# the second argument to ClassAttribute is the inspect.Signature object
main.ClassAttribute = lambda x, y: y
try:
yield
finally:
# undo our monkey patch
main.ClassAttribute = utils.ClassAttribute
class EventedMetaclass(main.ModelMetaclass):
"""pydantic ModelMetaclass that preps "equality checking" operations.
A metaclass is the thing that "constructs" a class, and ``ModelMetaclass``
is where pydantic puts a lot of it's type introspection and ``ModelField``
creation logic. Here, we simply tack on one more function, that builds a
``cls.__eq_operators__`` dict which is mapping of field name to a function
that can be called to check equality of the value of that field with some
other object. (used in ``EventedModel.__eq__``)
This happens only once, when an ``EventedModel`` class is created (and not
when each instance of an ``EventedModel`` is instantiated).
"""
def __new__(mcs, name, bases, namespace, **kwargs):
with no_class_attributes():
cls = super().__new__(mcs, name, bases, namespace, **kwargs)
cls.__eq_operators__ = {}
for n, f in cls.__fields__.items():
cls.__eq_operators__[n] = pick_equality_operator(f.type_)
# If a field type has a _json_encode method, add it to the json
# encoders for this model.
# NOTE: a _json_encode field must return an object that can be
# passed to json.dumps ... but it needn't return a string.
if hasattr(f.type_, '_json_encode'):
encoder = f.type_._json_encode
cls.__config__.json_encoders[f.type_] = encoder
# also add it to the base config
# required for pydantic>=1.8.0 due to:
# https://github.com/samuelcolvin/pydantic/pull/2064
EventedModel.__config__.json_encoders[f.type_] = encoder
return cls
class EventedModel(BaseModel, metaclass=EventedMetaclass):
# add private attributes for event emission
_events: EmitterGroup = PrivateAttr(default_factory=EmitterGroup)
__eq_operators__: ClassVar[Dict[str, Callable[[Any, Any], bool]]]
__slots__: ClassVar[Set[str]] = {"__weakref__"} # type: ignore
# pydantic BaseModel configuration. see:
# https://pydantic-docs.helpmanual.io/usage/model_config/
class Config:
# whether to allow arbitrary user types for fields (they are validated
# simply by checking if the value is an instance of the type). If
# False, RuntimeError will be raised on model declaration
arbitrary_types_allowed = True
# whether to perform validation on assignment to attributes
validate_assignment = True
# whether to treat any underscore non-class var attrs as private
# https://pydantic-docs.helpmanual.io/usage/models/#private-model-attributes
underscore_attrs_are_private = True
# whether to validate field defaults (default: False)
validate_all = True
# https://pydantic-docs.helpmanual.io/usage/exporting_models/#modeljson
# NOTE: json_encoders are also added EventedMetaclass.__new__ if the
# field declares a _json_encode method.
json_encoders = _BASE_JSON_ENCODERS
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._events.source = self
# add event emitters for each field which is mutable
fields = []
for name, field in self.__fields__.items():
if field.field_info.allow_mutation:
fields.append(name)
self._events.add(**dict.fromkeys(fields))
def __setattr__(self, name, value):
if name not in getattr(self, 'events', {}):
# fallback to default behavior
super().__setattr__(name, value)
return
# grab current value
before = getattr(self, name, object())
# set value using original setter
super().__setattr__(name, value)
# if different we emit the event with new value
after = getattr(self, name)
are_equal = self.__eq_operators__.get(name, operator.eq)
if not are_equal(after, before):
getattr(self.events, name)(value=after) # emit event
# expose the private EmitterGroup publically
@property
def events(self):
return self._events
@property
def _defaults(self):
return get_defaults(self)
def reset(self):
"""Reset the state of the model to default values."""
for name, value in self._defaults.items():
if isinstance(value, EventedModel):
getattr(self, name).reset()
elif (
self.__config__.allow_mutation
and self.__fields__[name].field_info.allow_mutation
):
setattr(self, name, value)
def asdict(self):
"""Convert a model to a dictionary."""
warnings.warn(
trans._(
"The `asdict` method has been renamed `dict` and is now deprecated. It will be removed in 0.4.7",
deferred=True,
),
category=FutureWarning,
stacklevel=2,
)
return self.dict()
def update(self, values):
"""Update a model in place.
Parameters
----------
values : dict, napari.utils.events.EventedModel
Values to update the model with. If an EventedModel is passed it is
first converted to a dictionary. The keys of this dictionary must
be found as attributes on the current model.
"""
if isinstance(values, self.__class__):
values = values.dict()
if not isinstance(values, dict):
raise ValueError(
trans._(
"Unsupported update from {values}",
deferred=True,
values=type(values),
)
)
with self.events.blocker() as block:
for key, value in values.items():
field = getattr(self, key)
if isinstance(field, EventedModel):
field.update(value)
else:
setattr(self, key, value)
if block.count:
self.events(Event(self))
def __eq__(self, other) -> bool:
"""Check equality with another object.
We override the pydantic approach (which just checks
``self.dict() == other.dict()``) to accommodate more complicated types
like arrays, whose truth value is often ambiguous. ``__eq_operators__``
is constructed in ``EqualityMetaclass.__new__``
"""
if isinstance(other, EventedModel):
for f_name, eq in self.__eq_operators__.items():
if f_name not in other.__eq_operators__:
return False
if not eq(getattr(self, f_name), getattr(other, f_name)):
return False
return True
else:
return self.dict() == other
@contextmanager
def enums_as_values(self, as_values: bool = True):
"""Temporarily override how enums are retrieved.
Parameters
----------
as_values : bool, optional
Whether enums should be shown as values (or as enum objects),
by default `True`
"""
null = object()
before = getattr(self.Config, 'use_enum_values', null)
self.Config.use_enum_values = as_values
try:
yield
finally:
if before is not null:
self.Config.use_enum_values = before
else:
delattr(self.Config, 'use_enum_values')
def get_defaults(obj: BaseModel):
"""Get possibly nested default values for a Model object."""
dflt = {}
for k, v in obj.__fields__.items():
d = v.get_default()
if d is None and isinstance(v.type_, main.ModelMetaclass):
d = get_defaults(v.type_)
dflt[k] = d
return dflt
| 38.815789 | 113 | 0.635738 |
acf06162d66847787e57b73cbd3976e4d5c82cf6 | 799 | py | Python | SirIsaac/linalgTools.py | sidambhire/SirIsaac | 105f3deb3bc5c2941281caa311ed97df6c57e6d9 | [
"MIT"
] | 39 | 2015-08-24T14:55:02.000Z | 2022-03-14T20:58:14.000Z | SirIsaac/linalgTools.py | sidambhire/SirIsaac | 105f3deb3bc5c2941281caa311ed97df6c57e6d9 | [
"MIT"
] | 20 | 2015-09-10T17:36:32.000Z | 2021-03-03T19:55:15.000Z | SirIsaac/linalgTools.py | sidambhire/SirIsaac | 105f3deb3bc5c2941281caa311ed97df6c57e6d9 | [
"MIT"
] | 14 | 2015-08-24T14:55:06.000Z | 2020-12-08T10:54:56.000Z | # linalgTools.py
#
# Bryan Daniels
# 9.7.2012
#
#
#
import pylab
import scipy
import scipy.linalg
# 7.10.2012
def svdInverse(mat,maxEig=1e10,minEig=1e-10): #1e10,1e-10
u,w,vt = scipy.linalg.svd(mat)
if any(w==0.):
raise ZeroDivisionError, "Singular matrix."
wInv = w ** -1
largeIndices = pylab.find( abs(wInv) > maxEig )
if len(largeIndices) > 0: print "svdInverse:",len(largeIndices),"large singular values out of",len(w)
wInv[largeIndices] = maxEig*scipy.sign(wInv[largeIndices])
smallIndices = pylab.find( abs(wInv) < minEig )
if len(smallIndices) > 0: print "svdInverse:",len(smallIndices),"small singular values out of",len(w)
wInv[smallIndices] = minEig*scipy.sign(wInv[smallIndices])
return scipy.dot( scipy.dot(vt.T,scipy.diag(wInv)), u.T ) | 31.96 | 105 | 0.682103 |
acf0630a668a22739d31b130b389d7a3064fc30d | 3,557 | py | Python | SourceCode/Module10/radiobuttondemo.py | hackettccp/CIS106SampleCode | 0717fa0f6dc0c48bc51f16ab44e7425b186a35c3 | [
"MIT"
] | 1 | 2019-10-23T03:25:43.000Z | 2019-10-23T03:25:43.000Z | SourceCode/Module10/radiobuttondemo.py | hackettccp/CIS106SampleCode | 0717fa0f6dc0c48bc51f16ab44e7425b186a35c3 | [
"MIT"
] | null | null | null | SourceCode/Module10/radiobuttondemo.py | hackettccp/CIS106SampleCode | 0717fa0f6dc0c48bc51f16ab44e7425b186a35c3 | [
"MIT"
] | null | null | null | #Imports the tkinter module
import tkinter
#Imports the tkinter.messagebox module
import tkinter.messagebox
#Main Function
def main() :
#Creates the window
test_window = tkinter.Tk()
#Sets the window's title
test_window.wm_title("My Window")
#Creates two frames that belong to test_window
upper_frame = tkinter.Frame(test_window)
lower_frame = tkinter.Frame(test_window)
#Creates a global IntVar variable.
#This will be shared among the members
#of a radio button group.
#It is global in this program so other functions
#can access it.
global rbvar
rbvar = tkinter.IntVar()
#Sets the IntVar to zero (none selected)
rbvar.set(0)
#Creates three Radiobuttons that belong to upper_frame and
#uses the shared IntVar variable.
#Each Radiobutton has a unique value. This value will tell us which
#button is currently selected.
testrb = tkinter.Radiobutton(upper_frame,
text="Option 1",
variable=rbvar,
value=1)
testrb2 = tkinter.Radiobutton(upper_frame,
text="Option 2",
variable=rbvar,
value=2)
testrb3 = tkinter.Radiobutton(upper_frame,
text="Option 3",
variable=rbvar,
value=3)
#Packs the Radiobuttons onto upper_frame
testrb.pack()
testrb2.pack()
testrb3.pack()
#Creates a button that belongs to lower_frame and calls the
#showdialog function when clicked.
ok_button = tkinter.Button(lower_frame,
text="Get Selection",
command=showdialog)
#Creates a button that belongs to lower_frame and calls the
#reset function when clicked.
reset_button = tkinter.Button(lower_frame,
text="Reset",
command=reset)
#Creates a button that belongs to lower_frame and calls test_window's
#destroy function when clicked.
quit_button = tkinter.Button(lower_frame,
text="Quit",
command=test_window.destroy)
#Packs the three buttons onto lower_frame
ok_button.pack(side="left")
reset_button.pack(side="left")
quit_button.pack(side="left")
#Packs the frames onto the window
upper_frame.pack()
lower_frame.pack()
#Enters the main loop, displaying the window
#and waiting for events
tkinter.mainloop()
#Function that displays a dialog box when it is called.
#The function builds a string based on which Radiobutton is selected.
#The dialog box will display the string created.
def showdialog() :
output = "You selected:\n"
#The IntVar variable's get function will return the value of the
#Radiobutton that is currently selected
if rbvar.get() == 0 :
output += "None"
elif rbvar.get() == 1 :
output+= "Option 1"
elif rbvar.get() == 2 :
output+= "Option 2"
elif rbvar.get() == 3 :
output+= "Option 3"
tkinter.messagebox.showinfo("Selections", output)
#Function that sets the rbvar IntVar variable back to zero.
#0 doesn't correspond to any of the Radiobuttons' values,
#so none will be selected.
def reset() :
rbvar.set(0)
#Calls the main function/starts the program
main()
| 33.87619 | 72 | 0.601631 |
acf0640aba2ce0fbfde33398e5e6a07b4ef54021 | 12,995 | py | Python | msgraph/cli/command_modules/groups/azext_groups/vendored_sdks/groups/operations/_groups_onenote_section_groups_sections_pages_operations.py | microsoftgraph/msgraph-cli-archived | 489f70bf4ede1ce67b84bfb31e66da3e4db76062 | [
"MIT"
] | null | null | null | msgraph/cli/command_modules/groups/azext_groups/vendored_sdks/groups/operations/_groups_onenote_section_groups_sections_pages_operations.py | microsoftgraph/msgraph-cli-archived | 489f70bf4ede1ce67b84bfb31e66da3e4db76062 | [
"MIT"
] | 22 | 2022-03-29T22:54:37.000Z | 2022-03-29T22:55:27.000Z | msgraph/cli/command_modules/groups/azext_groups/vendored_sdks/groups/operations/_groups_onenote_section_groups_sections_pages_operations.py | microsoftgraph/msgraph-cli-archived | 489f70bf4ede1ce67b84bfb31e66da3e4db76062 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class GroupsOnenoteSectionGroupsSectionsPagesOperations(object):
"""GroupsOnenoteSectionGroupsSectionsPagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~groups.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def copy_to_section(
self,
group_id, # type: str
section_group_id, # type: str
onenote_section_id, # type: str
onenote_page_id, # type: str
body, # type: "models.Paths1Ghsy1BGroupsGroupIdOnenoteSectiongroupsSectiongroupIdSectionsOnenotesectionIdPagesOnenotepageIdMicrosoftGraphCopytosectionPostRequestbodyContentApplicationJsonSchema"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphOnenoteOperation"
"""Invoke action copyToSection.
Invoke action copyToSection.
:param group_id: key: id of group.
:type group_id: str
:param section_group_id: key: id of sectionGroup.
:type section_group_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:param body: Action parameters.
:type body: ~groups.models.Paths1Ghsy1BGroupsGroupIdOnenoteSectiongroupsSectiongroupIdSectionsOnenotesectionIdPagesOnenotepageIdMicrosoftGraphCopytosectionPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenoteOperation, or the result of cls(response)
:rtype: ~groups.models.MicrosoftGraphOnenoteOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenoteOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.copy_to_section.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'Paths1Ghsy1BGroupsGroupIdOnenoteSectiongroupsSectiongroupIdSectionsOnenotesectionIdPagesOnenotepageIdMicrosoftGraphCopytosectionPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
copy_to_section.metadata = {'url': '/groups/{group-id}/onenote/sectionGroups/{sectionGroup-id}/sections/{onenoteSection-id}/pages/{onenotePage-id}/microsoft.graph.copyToSection'} # type: ignore
def onenote_patch_content(
self,
group_id, # type: str
section_group_id, # type: str
onenote_section_id, # type: str
onenote_page_id, # type: str
body, # type: "models.Paths13IutgfGroupsGroupIdOnenoteSectiongroupsSectiongroupIdSectionsOnenotesectionIdPagesOnenotepageIdMicrosoftGraphOnenotepatchcontentPostRequestbodyContentApplicationJsonSchema"
**kwargs # type: Any
):
# type: (...) -> None
"""Invoke action onenotePatchContent.
Invoke action onenotePatchContent.
:param group_id: key: id of group.
:type group_id: str
:param section_group_id: key: id of sectionGroup.
:type section_group_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:param body: Action parameters.
:type body: ~groups.models.Paths13IutgfGroupsGroupIdOnenoteSectiongroupsSectiongroupIdSectionsOnenotesectionIdPagesOnenotepageIdMicrosoftGraphOnenotepatchcontentPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.onenote_patch_content.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'Paths13IutgfGroupsGroupIdOnenoteSectiongroupsSectiongroupIdSectionsOnenotesectionIdPagesOnenotepageIdMicrosoftGraphOnenotepatchcontentPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
onenote_patch_content.metadata = {'url': '/groups/{group-id}/onenote/sectionGroups/{sectionGroup-id}/sections/{onenoteSection-id}/pages/{onenotePage-id}/microsoft.graph.onenotePatchContent'} # type: ignore
def preview(
self,
group_id, # type: str
section_group_id, # type: str
onenote_section_id, # type: str
onenote_page_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphOnenotePagePreview"
"""Invoke function preview.
Invoke function preview.
:param group_id: key: id of group.
:type group_id: str
:param section_group_id: key: id of sectionGroup.
:type section_group_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenotePagePreview, or the result of cls(response)
:rtype: ~groups.models.MicrosoftGraphOnenotePagePreview
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenotePagePreview"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.preview.metadata['url'] # type: ignore
path_format_arguments = {
'group-id': self._serialize.url("group_id", group_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenotePagePreview', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
preview.metadata = {'url': '/groups/{group-id}/onenote/sectionGroups/{sectionGroup-id}/sections/{onenoteSection-id}/pages/{onenotePage-id}/microsoft.graph.preview()'} # type: ignore
| 49.980769 | 230 | 0.692189 |
acf0646396d48e0969e663551da22f367367a269 | 1,801 | py | Python | cinderclient/tests/unit/v3/test_attachments.py | timgates42/python-cinderclient | af3bc66a5fe2a1ab37d537b6cfe1f5dfb5659002 | [
"Apache-1.1"
] | 74 | 2015-01-29T20:10:47.000Z | 2022-03-03T05:09:00.000Z | cinderclient/tests/unit/v3/test_attachments.py | timgates42/python-cinderclient | af3bc66a5fe2a1ab37d537b6cfe1f5dfb5659002 | [
"Apache-1.1"
] | 6 | 2015-08-10T10:23:42.000Z | 2022-02-16T02:28:22.000Z | cinderclient/tests/unit/v3/test_attachments.py | timgates42/python-cinderclient | af3bc66a5fe2a1ab37d537b6cfe1f5dfb5659002 | [
"Apache-1.1"
] | 125 | 2015-02-24T11:04:51.000Z | 2021-12-23T01:28:05.000Z | # Copyright (C) 2016 EMC Corporation.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import api_versions
from cinderclient.tests.unit import utils
from cinderclient.tests.unit.v3 import fakes
class AttachmentsTest(utils.TestCase):
def test_create_attachment(self):
cs = fakes.FakeClient(api_versions.APIVersion('3.27'))
att = cs.attachments.create(
'e84fda45-4de4-4ce4-8f39-fc9d3b0aa05e',
{},
'557ad76c-ce54-40a3-9e91-c40d21665cc3',
'null')
cs.assert_called('POST', '/attachments')
self.assertEqual(fakes.fake_attachment['attachment'], att)
def test_create_attachment_without_instance_uuid(self):
cs = fakes.FakeClient(api_versions.APIVersion('3.27'))
att = cs.attachments.create(
'e84fda45-4de4-4ce4-8f39-fc9d3b0aa05e',
{},
None,
'null')
cs.assert_called('POST', '/attachments')
self.assertEqual(
fakes.fake_attachment_without_instance_id['attachment'], att)
def test_complete_attachment(self):
cs = fakes.FakeClient(api_versions.APIVersion('3.44'))
att = cs.attachments.complete('a232e9ae')
self.assertTrue(att.ok)
| 36.755102 | 78 | 0.676291 |
acf0647a81ff2d14790789ae9c0bfc12289607fb | 4,021 | py | Python | test/CVSCOMSTR.py | datalogics-staylor/scons | 4c48deb6947066e53aac7d86621a7ec17f3b4034 | [
"MIT"
] | 3 | 2017-01-06T09:26:23.000Z | 2017-03-04T04:13:20.000Z | test/CVSCOMSTR.py | datalogics-staylor/scons | 4c48deb6947066e53aac7d86621a7ec17f3b4034 | [
"MIT"
] | 2 | 2015-10-27T20:17:24.000Z | 2016-08-04T21:49:56.000Z | test/CVSCOMSTR.py | datalogics-staylor/scons | 4c48deb6947066e53aac7d86621a7ec17f3b4034 | [
"MIT"
] | 4 | 2015-03-31T16:09:15.000Z | 2021-08-04T12:41:47.000Z | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test customizing the output with the the $CVSCOMSTR variable.
"""
import os.path
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.subdir('CVS', ['CVS', 'sub'], 'sub')
sub_CVS = os.path.join('sub', 'CVS')
sub_SConscript = os.path.join('sub', 'SConscript')
sub_all = os.path.join('sub', 'all')
sub_ddd_in = os.path.join('sub', 'ddd.in')
sub_ddd_out = os.path.join('sub', 'ddd.out')
sub_eee_in = os.path.join('sub', 'eee.in')
sub_eee_out = os.path.join('sub', 'eee.out')
sub_fff_in = os.path.join('sub', 'fff.in')
sub_fff_out = os.path.join('sub', 'fff.out')
test.write('my-cvs-co.py', """
import shutil
import sys
for f in sys.argv[1:]:
shutil.copy('CVS/'+f, f)
""")
test.write('SConstruct', """
def cat(env, source, target):
target = str(target[0])
source = map(str, source)
f = open(target, "wb")
for src in source:
f.write(open(src, "rb").read())
f.close()
env = Environment(TOOLS = ['default', 'CVS'],
BUILDERS={'Cat':Builder(action=cat)},
CVSCOM='%(_python_)s my-cvs-co.py $TARGET',
CVSCOMSTR='Checking out $TARGET from our fake CVS')
env.Cat('aaa.out', 'aaa.in')
env.Cat('bbb.out', 'bbb.in')
env.Cat('ccc.out', 'ccc.in')
env.Cat('all', ['aaa.out', 'bbb.out', 'ccc.out'])
env.SourceCode('.', env.CVS(None))
SConscript('sub/SConscript', "env")
""" % locals())
test.write(['CVS', 'sub', 'SConscript'], """\
Import("env")
env.Cat('ddd.out', 'ddd.in')
env.Cat('eee.out', 'eee.in')
env.Cat('fff.out', 'fff.in')
env.Cat('all', ['ddd.out', 'eee.out', 'fff.out'])
""")
test.write(['CVS', 'aaa.in'], "CVS/aaa.in\n")
test.write('bbb.in', "checked-out bbb.in\n")
test.write(['CVS', 'ccc.in'], "CVS/ccc.in\n")
test.write(['CVS', 'sub', 'ddd.in'], "CVS/sub/ddd.in\n")
test.write(['sub', 'eee.in'], "checked-out sub/eee.in\n")
test.write(['CVS', 'sub', 'fff.in'], "CVS/sub/fff.in\n")
test.run(arguments = '.',
stdout = test.wrap_stdout(read_str = """\
Checking out %(sub_SConscript)s from our fake CVS
""" % locals(),
build_str = """\
Checking out aaa.in from our fake CVS
cat(["aaa.out"], ["aaa.in"])
cat(["bbb.out"], ["bbb.in"])
Checking out ccc.in from our fake CVS
cat(["ccc.out"], ["ccc.in"])
cat(["all"], ["aaa.out", "bbb.out", "ccc.out"])
Checking out %(sub_ddd_in)s from our fake CVS
cat(["%(sub_ddd_out)s"], ["%(sub_ddd_in)s"])
cat(["%(sub_eee_out)s"], ["%(sub_eee_in)s"])
Checking out %(sub_fff_in)s from our fake CVS
cat(["%(sub_fff_out)s"], ["%(sub_fff_in)s"])
cat(["%(sub_all)s"], ["%(sub_ddd_out)s", "%(sub_eee_out)s", "%(sub_fff_out)s"])
""" % locals()))
test.must_match('all',
"CVS/aaa.in\nchecked-out bbb.in\nCVS/ccc.in\n")
test.must_match(['sub', 'all'],
"CVS/sub/ddd.in\nchecked-out sub/eee.in\nCVS/sub/fff.in\n")
#
test.pass_test()
| 32.691057 | 79 | 0.650087 |
acf06493c13649e5d4d9adc9127d10560d2a23d0 | 4,463 | py | Python | setup.py | MSLNZ/pr-autocollimator | 1fdfd932097b83877b666fc91a6e99cce5f1bd70 | [
"MIT"
] | null | null | null | setup.py | MSLNZ/pr-autocollimator | 1fdfd932097b83877b666fc91a6e99cce5f1bd70 | [
"MIT"
] | null | null | null | setup.py | MSLNZ/pr-autocollimator | 1fdfd932097b83877b666fc91a6e99cce5f1bd70 | [
"MIT"
] | null | null | null | import os
import re
import sys
from subprocess import check_output
from setuptools import (
setup,
find_packages,
)
def read(filename):
with open(filename) as fp:
return fp.read()
def fetch_init(key):
# open the __init__.py file to determine a value instead of importing the package
return re.search(r'{}\s*=\s*(.+)'.format(key), read(init_original)).group(1).strip('\'\"')
def get_version():
init_version = fetch_init('__version__')
if 'dev' not in init_version or testing:
return init_version
if 'develop' in sys.argv:
# then installing in editable (develop) mode
# python setup.py develop
# pip install -e .
# following PEP-440, the local version identifier starts with '+'
return init_version + '+editable'
# append the commit hash to __version__
setup_dir = os.path.dirname(os.path.realpath(__file__))
try:
# write all error messages from git to devnull
with open(os.devnull, mode='w') as devnull:
out = check_output(['git', 'rev-parse', 'HEAD'], cwd=setup_dir, stderr=devnull)
sha1 = out.strip().decode()
except:
# the git executable is not available, manually parse .git directory
try:
git_dir = os.path.join(setup_dir, '.git')
with open(os.path.join(git_dir, 'HEAD'), mode='rt') as fp1:
line = fp1.readline().strip()
if line.startswith('ref:'):
_, ref_path = line.split()
with open(os.path.join(git_dir, ref_path), mode='rt') as fp2:
sha1 = fp2.readline().strip()
else: # detached HEAD
sha1 = line
except:
return init_version
suffix = sha1[:7]
if not suffix or init_version.endswith(suffix):
return init_version
# following PEP-440, the local version identifier starts with '+'
dev_version = init_version + '+' + suffix
with open(init_original) as fp:
init_source = fp.read()
if os.path.isfile(init_backup):
os.remove(init_backup)
os.rename(init_original, init_backup)
with open(init_original, mode='wt') as fp:
fp.write(re.sub(
r'__version__\s*=.+',
"__version__ = '{}'".format(dev_version),
init_source
))
return dev_version
# the packages that pr-autocollimator depends on
install_requires = [
'opencv-python==4.5.4.60',
'numpy==1.21.4',
'requests',
'flask; "arm" in platform_machine',
'matplotlib; "arm" in platform_machine',
'picamera; "arm" in platform_machine',
'rpi-ws281x; "arm" in platform_machine',
'RPi.GPIO; "arm" in platform_machine',
'scipy==1.7.3; "arm" in platform_machine',
]
# the packages that are needed for running the tests
tests_require = ['pytest', 'pytest-cov']
testing = {'test', 'tests'}.intersection(sys.argv)
init_original = 'autocollimator/__init__.py'
init_backup = init_original + '.backup'
version = get_version()
setup(
name='pr-autocollimator',
version=version,
author=fetch_init('__author__'),
author_email='info@measurement.govt.nz',
url='https://github.com/MSLNZ/pr-autocollimator',
description='Locate the crosshair of the autocollimator',
long_description=read('README.rst'),
platforms='any',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
],
tests_require=tests_require,
install_requires=install_requires,
extras_require={'tests': tests_require},
entry_points={
'console_scripts': [
'autocollimator = autocollimator.webapp:run',
],
},
packages=find_packages(include=('autocollimator',)),
include_package_data=True,
)
if os.path.isfile(init_backup):
os.remove(init_original)
os.rename(init_backup, init_original)
| 31.652482 | 94 | 0.622003 |
acf06728d0d0d537480d54f8fe398678269f83db | 4,522 | py | Python | tests/components/deconz/test_binary_sensor.py | alemuro/home-assistant | 9b1315d8e55f0ca906c4c8a1b2ae8c2ea511dc90 | [
"Apache-2.0"
] | 2 | 2019-10-19T15:07:32.000Z | 2022-01-29T10:33:20.000Z | tests/components/deconz/test_binary_sensor.py | alemuro/home-assistant | 9b1315d8e55f0ca906c4c8a1b2ae8c2ea511dc90 | [
"Apache-2.0"
] | 4 | 2021-02-08T21:05:14.000Z | 2021-09-08T02:57:03.000Z | tests/components/deconz/test_binary_sensor.py | alemuro/home-assistant | 9b1315d8e55f0ca906c4c8a1b2ae8c2ea511dc90 | [
"Apache-2.0"
] | 1 | 2019-10-04T13:26:54.000Z | 2019-10-04T13:26:54.000Z | """deCONZ binary sensor platform tests."""
from unittest.mock import Mock, patch
from tests.common import mock_coro
from homeassistant import config_entries
from homeassistant.components import deconz
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.setup import async_setup_component
import homeassistant.components.binary_sensor as binary_sensor
SENSOR = {
"1": {
"id": "Sensor 1 id",
"name": "Sensor 1 name",
"type": "ZHAPresence",
"state": {"presence": False},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
"2": {
"id": "Sensor 2 id",
"name": "Sensor 2 name",
"type": "ZHATemperature",
"state": {"temperature": False},
"config": {},
},
}
ENTRY_CONFIG = {
deconz.config_flow.CONF_API_KEY: "ABCDEF",
deconz.config_flow.CONF_BRIDGEID: "0123456789",
deconz.config_flow.CONF_HOST: "1.2.3.4",
deconz.config_flow.CONF_PORT: 80,
}
ENTRY_OPTIONS = {
deconz.const.CONF_ALLOW_CLIP_SENSOR: True,
deconz.const.CONF_ALLOW_DECONZ_GROUPS: True,
}
async def setup_gateway(hass, data, allow_clip_sensor=True):
"""Load the deCONZ binary sensor platform."""
from pydeconz import DeconzSession
loop = Mock()
session = Mock()
ENTRY_OPTIONS[deconz.const.CONF_ALLOW_CLIP_SENSOR] = allow_clip_sensor
config_entry = config_entries.ConfigEntry(
1,
deconz.DOMAIN,
"Mock Title",
ENTRY_CONFIG,
"test",
config_entries.CONN_CLASS_LOCAL_PUSH,
system_options={},
options=ENTRY_OPTIONS,
)
gateway = deconz.DeconzGateway(hass, config_entry)
gateway.api = DeconzSession(loop, session, **config_entry.data)
gateway.api.config = Mock()
hass.data[deconz.DOMAIN] = {gateway.bridgeid: gateway}
with patch("pydeconz.DeconzSession.async_get_state", return_value=mock_coro(data)):
await gateway.api.async_load_parameters()
await hass.config_entries.async_forward_entry_setup(config_entry, "binary_sensor")
# To flush out the service call to update the group
await hass.async_block_till_done()
return gateway
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a gateway."""
assert (
await async_setup_component(
hass, binary_sensor.DOMAIN, {"binary_sensor": {"platform": deconz.DOMAIN}}
)
is True
)
assert deconz.DOMAIN not in hass.data
async def test_no_binary_sensors(hass):
"""Test that no sensors in deconz results in no sensor entities."""
data = {}
gateway = await setup_gateway(hass, data)
assert len(hass.data[deconz.DOMAIN][gateway.bridgeid].deconz_ids) == 0
assert len(hass.states.async_all()) == 0
async def test_binary_sensors(hass):
"""Test successful creation of binary sensor entities."""
data = {"sensors": SENSOR}
gateway = await setup_gateway(hass, data)
assert "binary_sensor.sensor_1_name" in gateway.deconz_ids
assert "binary_sensor.sensor_2_name" not in gateway.deconz_ids
assert len(hass.states.async_all()) == 1
hass.data[deconz.DOMAIN][gateway.bridgeid].api.sensors["1"].async_update(
{"state": {"on": False}}
)
async def test_add_new_sensor(hass):
"""Test successful creation of sensor entities."""
data = {}
gateway = await setup_gateway(hass, data)
sensor = Mock()
sensor.name = "name"
sensor.type = "ZHAPresence"
sensor.BINARY = True
sensor.uniqueid = "1"
sensor.register_async_callback = Mock()
async_dispatcher_send(hass, gateway.async_event_new_device("sensor"), [sensor])
await hass.async_block_till_done()
assert "binary_sensor.name" in gateway.deconz_ids
async def test_do_not_allow_clip_sensor(hass):
"""Test that clip sensors can be ignored."""
data = {}
gateway = await setup_gateway(hass, data, allow_clip_sensor=False)
sensor = Mock()
sensor.name = "name"
sensor.type = "CLIPPresence"
sensor.register_async_callback = Mock()
async_dispatcher_send(hass, gateway.async_event_new_device("sensor"), [sensor])
await hass.async_block_till_done()
assert len(gateway.deconz_ids) == 0
async def test_unload_switch(hass):
"""Test that it works to unload switch entities."""
data = {"sensors": SENSOR}
gateway = await setup_gateway(hass, data)
await gateway.async_reset()
assert len(hass.states.async_all()) == 0
| 30.761905 | 87 | 0.686643 |
acf068e8a63834d5324881cdee3205e6786c78d7 | 3,258 | py | Python | rich/__init__.py | cra/rich | f96e62c011fa8ae13f5819cfda1d9f54ac8b5771 | [
"MIT"
] | 1 | 2020-12-24T08:24:11.000Z | 2020-12-24T08:24:11.000Z | rich/__init__.py | presence1337/rich | 99cd18d9630c4edceb6303afef15f6060a094735 | [
"MIT"
] | 6 | 2021-12-31T12:25:06.000Z | 2022-02-14T12:29:11.000Z | rich/__init__.py | presence1337/rich | 99cd18d9630c4edceb6303afef15f6060a094735 | [
"MIT"
] | null | null | null | """Rich text and beautiful formatting in the terminal."""
from typing import Any, IO, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from .console import Console
# Global console used by alternative print
_console: Optional["Console"] = None
def get_console() -> "Console":
"""Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,
and hasn't been explicitly given one.
Returns:
Console: A console instance.
"""
global _console
if _console is None:
from .console import Console
_console = Console()
return _console
def reconfigure(*args, **kwargs) -> None:
"""Reconfigures the global console bu replacing it with another.
Args:
console (Console): Replacement console instance.
"""
from rich.console import Console
new_console = Console(*args, **kwargs)
_console.__dict__ = new_console.__dict__
def print(*objects: Any, sep=" ", end="\n", file: IO[str] = None, flush: bool = False):
r"""Print object(s) supplied via positional arguments.
This function has an identical signature to the built-in print.
For more advanced features, see the :class:`~rich.console.Console` class.
Args:
sep (str, optional): Separator between printed objects. Defaults to " ".
end (str, optional): Character to write at end of output. Defaults to "\\n".
file (IO[str], optional): File to write to, or None for stdout. Defaults to None.
flush (bool, optional): Has no effect as Rich always flushes output. Defaults to False.
"""
from .console import Console
write_console = get_console() if file is None else Console(file=file)
return write_console.print(*objects, sep=sep, end=end)
def inspect(
obj: Any,
*,
console: "Console" = None,
title: str = None,
help: bool = False,
methods: bool = False,
docs: bool = True,
private: bool = False,
dunder: bool = False,
sort: bool = True,
all: bool = False,
):
"""Inspect any Python object.
Args:
obj (Any): An object to inspect.
title (str, optional): Title to display over inspect result, or None use type. Defaults to None.
help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.
methods (bool, optional): Enable inspection of callables. Defaults to False.
docs (bool, optional): Also render doc strings. Defaults to True.
private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.
dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.
sort (bool, optional): Sort attributes alphabetically. Defaults to True.
all (bool, optional): Show all attributes. Defaults to False.
"""
_console = console or get_console()
from rich._inspect import Inspect
_inspect = Inspect(
obj,
title=title,
help=help,
methods=methods,
docs=docs,
private=private,
dunder=dunder,
sort=sort,
all=all,
)
_console.print(_inspect)
if __name__ == "__main__": # pragma: no cover
print("Hello, **World**")
| 31.631068 | 112 | 0.658072 |
acf0694e4b3473de496eebbba16afdaa2e0847c8 | 20,676 | py | Python | pylibs/benchmarking/frameworks/framework_base.py | famousyub/smartyphp | 2691233a8993b8902c6a3d533a6e3f1fe44fe0f8 | [
"MIT"
] | 359 | 2018-06-16T02:42:54.000Z | 2022-03-21T06:39:26.000Z | pylibs/benchmarking/frameworks/framework_base.py | famousyub/smartyphp | 2691233a8993b8902c6a3d533a6e3f1fe44fe0f8 | [
"MIT"
] | 257 | 2018-07-03T22:08:27.000Z | 2022-03-31T18:45:04.000Z | pylibs/benchmarking/frameworks/framework_base.py | famousyub/smartyphp | 2691233a8993b8902c6a3d533a6e3f1fe44fe0f8 | [
"MIT"
] | 77 | 2018-06-16T04:00:29.000Z | 2022-02-19T03:07:58.000Z | #!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import ast
import json
import os
import random
import re
import shutil
from data_converters.data_converters import getConverters
from platforms.platforms import getHostPlatform
from six import string_types
from utils import software_power
from utils.utilities import (
deepMerge,
deepReplace,
getFAIPEPROOT,
getString,
getModelName,
)
class FrameworkBase(object):
def __init__(self, args):
self.converters = getConverters()
self.tmpdir = None
self.host_platform = None
self.args = args
@abc.abstractmethod
def getName(self):
return "Error"
@abc.abstractmethod
def runBenchmark(self, info, benchmark, platform):
model = benchmark["model"]
tests = benchmark["tests"]
assert len(tests) == 1, (
"At this point, only one test should "
+ "exist in one benchmark. However, benchmark "
+ "{} doesn't.".format(benchmark["name"])
)
test = tests[0]
index = test["INDEX"] if "INDEX" in test else 0
first_iteration = index == 0
last_iteration = ("repeat" not in model) or (
"repeat" in model and index == model["repeat"] - 1
)
if self.host_platform is None:
self.host_platform = getHostPlatform(self.tempdir, self.args)
program_files = {
name: info["programs"][name]["location"] for name in info["programs"]
}
program_path = (
os.path.dirname(program_files["program"])
if "program" in program_files
else None
)
stringmap_from_info = info["string_map"] if "string_map" in info else None
self._replaceStringMap(benchmark, platform, program_path, stringmap_from_info)
# better to be before target program files separation.
# this way, in ios, the platform may not be copied to the target.
platform.preprocess(programs=program_files, benchmark=benchmark)
tgt_program_files, host_program_files = self._separatePrograms(
program_files, test.get("commands")
)
tgt_program_files = platform.copyFilesToPlatform(
tgt_program_files, copy_files=first_iteration
)
programs = {}
deepMerge(programs, host_program_files)
deepMerge(programs, tgt_program_files)
model_files = {
name: model["files"][name]["location"] for name in model["files"]
}
if "converter" in model:
converter = model["converter"]
assert "name" in converter, "converter field must have a name"
assert converter["name"] in self.converters, "Unknown converter {}".format(
converter
)
else:
converter = None
output = {}
# inject default parameters into test
if "iter" not in test:
test["iter"] = -1
# overall preprocess
if "preprocess" in model and first_iteration:
commands = model["preprocess"]["commands"]
self._runCommands(
output,
commands,
self.host_platform,
programs,
model,
None,
model_files,
None,
None,
None,
None,
-1,
converter,
)
input_files = (
{
name: test["input_files"][name]["location"]
for name in test["input_files"]
}
if "input_files" in test
else None
)
test_files = (
{name: test["files"][name]["location"] for name in test["files"]}
if "files" in test
else {}
)
# Let's handle preprocess comamnd first,
# since we will copy all files into host
if "preprocess" in test:
# simple thing first, let's assume preprocess is self contained
# check the program to executable
if (
"files" in test["preprocess"]
and "program" in test["preprocess"]["files"]
):
host_program_path = test["preprocess"]["files"]["program"]["location"]
os.chmod(host_program_path, 0o777)
# will deprecate in the future
if "files" in test["preprocess"]:
preprocess_files = {
name: test["preprocess"]["files"][name]["location"]
for name in test["preprocess"]["files"]
}
deepMerge(test_files, preprocess_files)
if "commands" in test["preprocess"]:
commands = test["preprocess"]["commands"]
elif "command" in test["preprocess"]:
commands = [test["preprocess"]["command"]]
self._runCommands(
output,
commands,
self.host_platform,
programs,
model,
test,
model_files,
input_files,
None,
None,
test_files,
-1,
converter,
)
tgt_input_files = (
platform.copyFilesToPlatform(input_files) if input_files else None
)
shared_libs = None
if "shared_libs" in info:
shared_libs = platform.copyFilesToPlatform(
info["shared_libs"], copy_files=first_iteration
)
tgt_model_files = platform.copyFilesToPlatform(
model_files, copy_files=first_iteration
)
tgt_result_files = None
if "output_files" in test:
tgt_result_files = {
name: test["output_files"][name]["location"]
for name in test["output_files"]
}
total_num = test["iter"]
if "platform_args" in test:
platform_args = test["platform_args"]
elif "platform_args" in model:
platform_args = model["platform_args"]
else:
platform_args = {}
if "timeout" in model:
platform_args["timeout"] = model["timeout"]
if "timeout" in test:
platform_args["timeout"] = test["timeout"]
program = programs["program"] if "program" in programs else ""
if test["metric"] == "power":
platform_args["power"] = True
method = test.get("method")
platform_args["method"] = method
if method == "software":
power_util = software_power.PowerUtil(
platform, test.get("collection_time", 300)
)
else:
# FIXME "Monsoon" was unimportable
from utils.monsoon_power import collectPowerData
# in power metric, the output is ignored
total_num = 0
platform.killProgram(program)
if test.get("env", False):
platform_args["env"] = test["env"]
if platform.getType() == "host":
# Fix the number of threads
if not platform_args.get("env", False):
platform_args["env"] = {}
MKL_NUM_THREADS = test.get("MKL_NUM_THREADS", 1)
OMP_NUM_THREADS = test.get("OMP_NUM_THREADS", 1)
if MKL_NUM_THREADS > 0:
platform_args["env"]["MKL_NUM_THREADS"] = MKL_NUM_THREADS
if OMP_NUM_THREADS > 0:
platform_args["env"]["OMP_NUM_THREADS"] = OMP_NUM_THREADS
# Randomly select one cpu core from logic cpu #4 to #13.
cpu_core = test.get("cpu-list", random.randint(5, 14))
if isinstance(test["commands"], list) and cpu_core > 0:
test["commands"][-1] = " ".join(
["taskset", "--cpu-list", str(cpu_core), test["commands"][-1]]
)
self._runCommands(
output,
test["commands"],
platform,
programs,
model,
test,
tgt_model_files,
tgt_input_files,
tgt_result_files,
shared_libs,
test_files,
total_num,
converter,
platform_args=platform_args,
main_command=True,
)
if test["metric"] == "power":
if test.get("method") == "software":
output = power_util.collect()
else:
collection_time = (
test["collection_time"] if "collection_time" in test else 180
)
voltage = float(test["voltage"]) if "voltage" in test else 4.0
output = collectPowerData(
platform.platform_hash,
collection_time,
voltage,
test["iter"],
self.args.monsoon_map,
)
platform.waitForDevice(20)
# kill the process if exists
platform.killProgram(program)
# remove the files before copying out the output files
# this will save some time in ios platform, since in ios
# all files are copied back to the host system
if len(output) > 0:
if input_files is not None:
platform.delFilesFromPlatform(tgt_input_files)
if last_iteration:
platform.delFilesFromPlatform(tgt_model_files)
platform.delFilesFromPlatform(tgt_program_files)
if shared_libs is not None:
platform.delFilesFromPlatform(shared_libs)
output_files = None
if "output_files" in test:
target_dir = os.path.join(self.tempdir, "output")
shutil.rmtree(target_dir, True)
os.makedirs(target_dir)
output_files = platform.moveFilesFromPlatform(tgt_result_files, target_dir)
platform.postprocess()
if "postprocess" in test:
if (
"files" in test["postprocess"]
and "program" in test["preprocess"]["files"]
):
host_program_path = test["postprocess"]["files"]["program"]["location"]
os.chmod(host_program_path, 0o777)
# will deprecate in the future
if "files" in test["postprocess"]:
postprocess_files = {
name: test["postprocess"]["files"][name]["location"]
for name in test["postprocess"]["files"]
}
deepMerge(test_files, postprocess_files)
commands = None
if "commands" in test["postprocess"]:
commands = test["postprocess"]["commands"]
elif "command" in test["postprocess"]:
commands = [test["postprocess"]["command"]]
self._runCommands(
output,
commands,
self.host_platform,
programs,
model,
test,
model_files,
input_files,
output_files,
None,
test_files,
-1,
converter,
)
if "postprocess" in model and last_iteration:
commands = model["postprocess"]["commands"]
self._runCommands(
output,
commands,
self.host_platform,
programs,
model,
test,
model_files,
None,
None,
None,
None,
-1,
converter,
)
# after everything is done, some of the output files may
# contain metrics that can be processed. Those files have
# field converter, and specify which convert to use to
# convert the metrics
if output_files:
for filename in output_files:
file = output_files[filename]
converter = test["output_files"][filename].get("converter")
if not converter:
continue
assert "name" in converter, "converter field must have a name"
assert (
converter["name"] in self.converters
), "Unknown converter {}".format(converter["name"])
converter_class = self.converters[converter["name"]]
args = converter.get("args")
with open(file, "r") as f:
content = f.read()
convert = converter_class()
results, _ = convert.collect(content, args)
one_output = convert.convert(results)
deepMerge(output, one_output)
return output, output_files
@abc.abstractmethod
def composeRunCommand(
self,
commands,
platform,
programs,
model,
test,
model_files,
input_files,
output_files,
shared_libs,
test_files=None,
main_command=False,
):
if commands is None or not isinstance(commands, list):
return None
files = input_files.copy() if input_files is not None else {}
files.update(output_files if output_files is not None else {})
files.update(test_files if test_files is not None else {})
extra_arguments = " " + model["command_args"] if "command_args" in model else ""
string_map = json.loads(self.args.string_map) if self.args.string_map else {}
composed_commands = []
for command in commands:
more_args = extra_arguments if "{program}" in command else ""
command = self._getReplacedCommand(
command, files, model, test, programs, model_files
)
command += more_args
# extra args only applied for main_command
if main_command and len(commands) == 1 and "pep_extra_args" in string_map:
command += " " + string_map["pep_extra_args"]
composed_commands.append(command)
return composed_commands
def _getReplacedCommand(self, command, files, model, test, programs, model_files):
pattern = re.compile(r"\{([\w|\.]+)\}")
repeat = True
while repeat:
repeat = False
results = []
for m in pattern.finditer(command):
results.append(
{"start": m.start(), "end": m.end(), "content": m.group(1)}
)
results.reverse()
for res in results:
replace = self._getMatchedString(test, res["content"], files)
if replace is None:
# TODO: handle shared libraries
replace = self._getMatchedString(model, res["content"], model_files)
if replace is None:
replace = self._getMatchedString(programs, res["content"])
if replace:
command = command[: res["start"]] + replace + command[res["end"] :]
repeat = True
return command
def _getMatchedString(self, root, match, files=None):
if not isinstance(root, dict):
return None
if match in root:
return getString(root[match])
# split on .
fields = match.split(".")
found = True
entry = root
for i in range(len(fields)):
field = fields[i]
if field not in entry:
found = False
break
entry = entry[field]
if not found:
return None
if "location" in entry:
# is a file field
if files and fields[-1] in files:
return getString(files[fields[-1]])
assert isinstance(entry, string_types), (
"Output {}".format(entry) + " is not string type"
)
return getString(entry)
@abc.abstractmethod
def runOnPlatform(self, total_num, cmd, platform, platform_args, converter):
raise NotImplementedError("Child class need to implement runOnPlatform")
def _runCommands(
self,
output,
commands,
platform,
programs,
model,
test,
model_files,
input_files,
output_files,
shared_libs,
test_files,
total_num,
converter,
platform_args=None,
main_command=False,
):
if platform_args is None:
platform_args = {}
if test and test.get("log_output", False):
platform_args["log_output"] = True
if self.args.timeout > 0 and "timeout" not in platform_args:
platform_args["timeout"] = self.args.timeout
cmds = self.composeRunCommand(
commands,
platform,
programs,
model,
test,
model_files,
input_files,
output_files,
shared_libs,
test_files,
main_command,
)
profiling_enabled = False
if "profiler" in test:
profiling_enabled = test.get("profiler", {}).get("enabled", False)
if profiling_enabled:
platform_args["profiler_args"] = test.get("profiler", {})
platform_args["model_name"] = getModelName(model)
for idx, cmd in enumerate(cmds):
# note that we only enable profiling for the last command
# of the main commands.
platform_args["enable_profiling"] = (
profiling_enabled and main_command and idx == len(cmds) - 1
)
one_output = self.runOnPlatform(
total_num, cmd, platform, platform_args, converter
)
deepMerge(output, one_output)
@abc.abstractmethod
def verifyBenchmarkFile(self, benchmark, filename, is_post):
return None
def rewriteBenchmarkTests(self, benchmark, filename):
pass
def _separatePrograms(self, program_files, commands):
if commands is None or not isinstance(commands, list):
return program_files, {}
tgt_program_files = {}
for command in commands:
for name in program_files:
if "{" + name + "}" in command:
tgt_program_files[name] = program_files[name]
host_program_files = {
name: program_files[name]
for name in program_files
if name not in tgt_program_files
}
return tgt_program_files, host_program_files
def _createHostDir(self):
hostdir = os.path.join(self.tempdir, "host")
i = 0
while os.path.exists(hostdir):
hostdir = os.path.join(self.tempdir, "host" + str(i))
i = i + 1
os.makedirs(hostdir, 0o777)
return hostdir
def _replaceStringMap(self, root, platform, program_path, stringmap_from_info):
try:
# backward compatible
string_map = (
json.loads(self.args.string_map) if self.args.string_map else {}
)
info_string_map = (
json.loads(stringmap_from_info) if stringmap_from_info else {}
)
except BaseException:
string_map = (
ast.literal_eval(self.args.string_map) if self.args.string_map else {}
)
info_string_map = stringmap_from_info if stringmap_from_info else {}
deepMerge(string_map, info_string_map)
string_map["TGTDIR"] = platform.getOutputDir()
string_map["HOSTDIR"] = self._createHostDir()
string_map["FAIPEPROOT"] = getFAIPEPROOT()
if program_path:
string_map["BUILDDIR"] = program_path
for name in string_map:
value = string_map[name]
deepReplace(root, "{" + name + "}", value)
| 34.46 | 88 | 0.53492 |
acf06b99c8090e9a78564e6d6769e7fe167d77bf | 531 | py | Python | GenerateText/models/model.py | dnjegovanovic/rnn | 1e8198606c5fbdc0aaf4647efa42603a823bd904 | [
"MIT"
] | null | null | null | GenerateText/models/model.py | dnjegovanovic/rnn | 1e8198606c5fbdc0aaf4647efa42603a823bd904 | [
"MIT"
] | null | null | null | GenerateText/models/model.py | dnjegovanovic/rnn | 1e8198606c5fbdc0aaf4647efa42603a823bd904 | [
"MIT"
] | null | null | null | import tensorflow as tf
def build_model(vocab_size, embedding_dim, rnn_units):
"""[summary]
Args:
vocab_size ([type]): [description]
embedding_dim ([type]): [description]
rnn_units ([type]): [description]
Returns:
[type]: [description]
"""
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim),
tf.keras.layers.LSTM(
rnn_units, return_sequences=True),
tf.keras.layers.Dense(vocab_size)
])
return model
| 24.136364 | 61 | 0.617702 |
acf06c2ad8c2dc3bf5440f5bb40379222be4f2da | 1,561 | py | Python | src/algoritmia/datastructures/maps/linkedlistmap.py | DavidLlorens/algoritmia | 40ca0a89ea6de9b633fa5f697f0a28cae70816a2 | [
"MIT"
] | 6 | 2018-09-15T15:09:10.000Z | 2022-02-27T01:23:11.000Z | src/algoritmia/datastructures/maps/linkedlistmap.py | JeromeIllgner/algoritmia | 406afe7206f2411557859bf03480c16db7dcce0d | [
"MIT"
] | null | null | null | src/algoritmia/datastructures/maps/linkedlistmap.py | JeromeIllgner/algoritmia | 406afe7206f2411557859bf03480c16db7dcce0d | [
"MIT"
] | 5 | 2018-07-10T20:19:55.000Z | 2021-03-31T03:32:22.000Z | from algoritmia.datastructures.maps import IMap
from algoritmia.datastructures.lists import LinkedList
class LinkedListMap(IMap):
class KeyValue:
__slots__ = ("key", "value")
def __init__(self, key, value):
self.key, self.value = key, value
def __eq__(self, other: "KeyValue<K, T>") -> "bool":
return self.key == other.key
def __init__(self, data: "Iterable<(K, T)> or IMap<K, T>"=[]):
if isinstance(data, IMap): data = tuple(data.items())
self._linkedlist = LinkedList(data)
def _get_keyvalue_by_key(self, key: "K") -> "KeyValue<K, T>":
for item in self._linkedlist:
if item.key == key:
return item
return None
def __contains__(self, key: "K") -> "bool":
return self._get_keyvalue_by_key(key) != None
def __getitem__(self, key: "K") -> "T":
kv = self._get_keyvalue_by_key(key)
if kv == None: raise KeyError("")
return kv.value
def __setitem__(self, key: "K", value: "T") -> "T":
kv = self._get_keyvalue_by_key(key)
if kv == None:
kv = LinkedListMap.KeyValue(key, value)
self._linkedlist.append(kv)
else:
kv.value = value
return kv.value
def __delitem__(self, key: "K", value: "T"):
self._linkedlist.remove(LinkedListMap.KeyValue(key, value))
def __len__(self) -> "int":
return len(self._linkedlist) | 34.688889 | 68 | 0.55221 |
acf06da72af65889b09a9ad68251b9fe55ba717e | 13,934 | py | Python | pytorch_tabular/models/mixture_density/mdn.py | edchengmoore/pytorch_tabular | 25f87089fbed95b46f2a1a8a96fba1f581aa8af1 | [
"MIT"
] | null | null | null | pytorch_tabular/models/mixture_density/mdn.py | edchengmoore/pytorch_tabular | 25f87089fbed95b46f2a1a8a96fba1f581aa8af1 | [
"MIT"
] | null | null | null | pytorch_tabular/models/mixture_density/mdn.py | edchengmoore/pytorch_tabular | 25f87089fbed95b46f2a1a8a96fba1f581aa8af1 | [
"MIT"
] | null | null | null | # Pytorch Tabular
# Author: Manu Joseph <manujoseph@gmail.com>
# For license information, see LICENSE.TXT
"""Mixture Density Models"""
import logging
import math
from abc import ABCMeta
from typing import Dict, Optional
import torch
import torch.nn as nn
from omegaconf import DictConfig
from torch import Tensor
from torch.autograd import Variable
from torch.distributions import Categorical
from pytorch_tabular.models.autoint import AutoIntBackbone
from pytorch_tabular.models.category_embedding import CategoryEmbeddingBackbone
from pytorch_tabular.models.node import NODEBackbone
from pytorch_tabular.models.node import utils as utils
from ..base_model import BaseModel
try:
import wandb
WANDB_INSTALLED = True
except ImportError:
WANDB_INSTALLED = False
logger = logging.getLogger(__name__)
ONEOVERSQRT2PI = 1.0 / math.sqrt(2 * math.pi)
LOG2PI = math.log(2 * math.pi)
class MixtureDensityHead(nn.Module):
def __init__(self, config: DictConfig, **kwargs):
self.hparams = config
super().__init__()
self._build_network()
def _build_network(self):
self.pi = nn.Linear(self.hparams.input_dim, self.hparams.num_gaussian)
nn.init.normal_(self.pi.weight)
self.sigma = nn.Linear(
self.hparams.input_dim,
self.hparams.num_gaussian,
bias=self.hparams.sigma_bias_flag,
)
self.mu = nn.Linear(self.hparams.input_dim, self.hparams.num_gaussian)
nn.init.normal_(self.mu.weight)
if self.hparams.mu_bias_init is not None:
for i, bias in enumerate(self.hparams.mu_bias_init):
nn.init.constant_(self.mu.bias[i], bias)
def forward(self, x):
pi = self.pi(x)
sigma = self.sigma(x)
# Applying modified ELU activation
sigma = nn.ELU()(sigma) + 1 + 1e-15
mu = self.mu(x)
return pi, sigma, mu
def gaussian_probability(self, sigma, mu, target, log=False):
"""Returns the probability of `target` given MoG parameters `sigma` and `mu`.
Arguments:
sigma (BxGxO): The standard deviation of the Gaussians. B is the batch
size, G is the number of Gaussians, and O is the number of
dimensions per Gaussian.
mu (BxGxO): The means of the Gaussians. B is the batch size, G is the
number of Gaussians, and O is the number of dimensions per Gaussian.
target (BxI): A batch of target. B is the batch size and I is the number of
input dimensions.
Returns:
probabilities (BxG): The probability of each point in the probability
of the distribution in the corresponding sigma/mu index.
"""
target = target.expand_as(sigma)
if log:
ret = (
-torch.log(sigma)
- 0.5 * LOG2PI
- 0.5 * torch.pow((target - mu) / sigma, 2)
)
else:
ret = (ONEOVERSQRT2PI / sigma) * torch.exp(
-0.5 * ((target - mu) / sigma) ** 2
)
return ret # torch.prod(ret, 2)
def log_prob(self, pi, sigma, mu, y):
log_component_prob = self.gaussian_probability(sigma, mu, y, log=True)
log_mix_prob = torch.log(
nn.functional.gumbel_softmax(
pi, tau=self.hparams.softmax_temperature, dim=-1
)
+ 1e-15
)
return torch.logsumexp(log_component_prob + log_mix_prob, dim=-1)
def sample(self, pi, sigma, mu):
"""Draw samples from a MoG."""
categorical = Categorical(pi)
pis = categorical.sample().unsqueeze(1)
sample = Variable(sigma.data.new(sigma.size(0), 1).normal_())
# Gathering from the n Gaussian Distribution based on sampled indices
sample = sample * sigma.gather(1, pis) + mu.gather(1, pis)
return sample
def generate_samples(self, pi, sigma, mu, n_samples=None):
if n_samples is None:
n_samples = self.hparams.n_samples
samples = []
softmax_pi = nn.functional.gumbel_softmax(
pi, tau=self.hparams.softmax_temperature, dim=-1
)
assert (
softmax_pi < 0
).sum().item() == 0, "pi parameter should not have negative"
for _ in range(n_samples):
samples.append(self.sample(softmax_pi, sigma, mu))
samples = torch.cat(samples, dim=1)
return samples
def generate_point_predictions(self, pi, sigma, mu, n_samples=None):
# Sample using n_samples and take average
samples = self.generate_samples(pi, sigma, mu, n_samples)
if self.hparams.central_tendency == "mean":
y_hat = torch.mean(samples, dim=-1)
elif self.hparams.central_tendency == "median":
y_hat = torch.median(samples, dim=-1).values
return y_hat.unsqueeze(1)
class BaseMDN(BaseModel, metaclass=ABCMeta):
def __init__(self, config: DictConfig, **kwargs):
assert config.task == "regression", "MDN is only implemented for Regression"
assert config.output_dim == 1, "MDN is not implemented for multi-targets"
if config.target_range is not None:
logger.warning("MDN does not use target range. Ignoring it.")
super().__init__(config, **kwargs)
def compute_head(self, x: Tensor):
pi, sigma, mu = self.mdn(x)
return {"pi": pi, "sigma": sigma, "mu": mu, "backbone_features": x}
def predict(self, x: Dict):
ret_value = self.forward(x)
return self.mdn.generate_point_predictions(
ret_value["pi"], ret_value["sigma"], ret_value["mu"]
)
def sample(self, x: Dict, n_samples: Optional[int] = None, ret_model_output=False):
ret_value = self.forward(x)
samples = self.mdn.generate_samples(
ret_value["pi"], ret_value["sigma"], ret_value["mu"], n_samples
)
if ret_model_output:
return samples, ret_value
else:
return samples
def calculate_loss(self, y, pi, sigma, mu, tag="train"):
# NLL Loss
log_prob = self.mdn.log_prob(pi, sigma, mu, y)
loss = torch.mean(-log_prob)
if self.hparams.mdn_config.weight_regularization is not None:
sigma_l1_reg = 0
pi_l1_reg = 0
mu_l1_reg = 0
if self.hparams.mdn_config.lambda_sigma > 0:
# Weight Regularization Sigma
sigma_params = torch.cat(
[x.view(-1) for x in self.mdn.sigma.parameters()]
)
sigma_l1_reg = self.hparams.mdn_config.lambda_sigma * torch.norm(
sigma_params, self.hparams.mdn_config.weight_regularization
)
if self.hparams.mdn_config.lambda_pi > 0:
pi_params = torch.cat([x.view(-1) for x in self.mdn.pi.parameters()])
pi_l1_reg = self.hparams.mdn_config.lambda_pi * torch.norm(
pi_params, self.hparams.mdn_config.weight_regularization
)
if self.hparams.mdn_config.lambda_mu > 0:
mu_params = torch.cat([x.view(-1) for x in self.mdn.mu.parameters()])
mu_l1_reg = self.hparams.mdn_config.lambda_mu * torch.norm(
mu_params, self.hparams.mdn_config.weight_regularization
)
loss = loss + sigma_l1_reg + pi_l1_reg + mu_l1_reg
self.log(
f"{tag}_loss",
loss,
on_epoch=(tag == "valid") or (tag == "test"),
on_step=(tag == "train"),
# on_step=False,
logger=True,
prog_bar=True,
)
return loss
def training_step(self, batch, batch_idx):
y = batch["target"]
ret_value = self(batch)
loss = self.calculate_loss(
y, ret_value["pi"], ret_value["sigma"], ret_value["mu"], tag="train"
)
if self.hparams.mdn_config.speedup_training:
pass
else:
y_hat = self.mdn.generate_point_predictions(
ret_value["pi"], ret_value["sigma"], ret_value["mu"]
)
_ = self.calculate_metrics(y, y_hat, tag="train")
return loss
def validation_step(self, batch, batch_idx):
y = batch["target"]
ret_value = self(batch)
_ = self.calculate_loss(
y, ret_value["pi"], ret_value["sigma"], ret_value["mu"], tag="valid"
)
y_hat = self.mdn.generate_point_predictions(
ret_value["pi"], ret_value["sigma"], ret_value["mu"]
)
_ = self.calculate_metrics(y, y_hat, tag="valid")
return y_hat, y, ret_value
def test_step(self, batch, batch_idx):
y = batch["target"]
ret_value = self(batch)
_ = self.calculate_loss(
y, ret_value["pi"], ret_value["sigma"], ret_value["mu"], tag="test"
)
y_hat = self.mdn.generate_point_predictions(
ret_value["pi"], ret_value["sigma"], ret_value["mu"]
)
_ = self.calculate_metrics(y, y_hat, tag="test")
return y_hat, y
def validation_epoch_end(self, outputs) -> None:
do_log_logits = (
hasattr(self.hparams, "log_logits")
and self.hparams.log_logits
and self.hparams.log_target == "wandb"
and WANDB_INSTALLED
)
pi = [
nn.functional.gumbel_softmax(
output[2]["pi"], tau=self.hparams.mdn_config.softmax_temperature, dim=-1
)
for output in outputs
]
pi = torch.cat(pi).detach().cpu()
for i in range(self.hparams.mdn_config.num_gaussian):
self.log(
f"mean_pi_{i}",
pi[:, i].mean(),
on_epoch=True,
on_step=False,
logger=True,
prog_bar=False,
)
mu = [output[2]["mu"] for output in outputs]
mu = torch.cat(mu).detach().cpu()
for i in range(self.hparams.mdn_config.num_gaussian):
self.log(
f"mean_mu_{i}",
mu[:, i].mean(),
on_epoch=True,
on_step=False,
logger=True,
prog_bar=False,
)
sigma = [output[2]["sigma"] for output in outputs]
sigma = torch.cat(sigma).detach().cpu()
for i in range(self.hparams.mdn_config.num_gaussian):
self.log(
f"mean_sigma_{i}",
sigma[:, i].mean(),
on_epoch=True,
on_step=False,
logger=True,
prog_bar=False,
)
if do_log_logits:
logits = [output[0] for output in outputs]
logits = torch.cat(logits).detach().cpu()
fig = self.create_plotly_histogram(logits.unsqueeze(1), "logits")
wandb.log(
{
"valid_logits": fig,
"global_step": self.global_step,
},
commit=False,
)
if self.hparams.mdn_config.log_debug_plot:
fig = self.create_plotly_histogram(
pi, "pi", bin_dict=dict(start=0.0, end=1.0, size=0.1)
)
wandb.log(
{
"valid_pi": fig,
"global_step": self.global_step,
},
commit=False,
)
fig = self.create_plotly_histogram(mu, "mu")
wandb.log(
{
"valid_mu": fig,
"global_step": self.global_step,
},
commit=False,
)
fig = self.create_plotly_histogram(sigma, "sigma")
wandb.log(
{
"valid_sigma": fig,
"global_step": self.global_step,
},
commit=False,
)
class CategoryEmbeddingMDN(BaseMDN):
def __init__(self, config: DictConfig, **kwargs):
self.embedding_cat_dim = sum([y for x, y in config.embedding_dims])
super().__init__(config, **kwargs)
def _build_network(self):
# Backbone
self.backbone = CategoryEmbeddingBackbone(self.hparams)
# Adding the last layer
self.hparams.mdn_config.input_dim = self.backbone.output_dim
self.mdn = MixtureDensityHead(self.hparams.mdn_config)
class NODEMDN(BaseMDN):
def __init__(self, config: DictConfig, **kwargs):
super().__init__(config, **kwargs)
def subset(self, x):
return x[..., :].mean(dim=-2)
def _build_network(self):
self.hparams.node_input_dim = (
self.hparams.continuous_dim + self.hparams.categorical_dim
)
backbone = NODEBackbone(self.hparams)
# average first n channels of every tree, where n is the number of output targets for regression
# and number of classes for classification
output_response = utils.Lambda(self.subset)
self.backbone = nn.Sequential(backbone, output_response)
# Adding the last layer
self.hparams.mdn_config.input_dim = backbone.output_dim
setattr(self.backbone, "output_dim", backbone.output_dim)
self.mdn = MixtureDensityHead(self.hparams.mdn_config)
class AutoIntMDN(BaseMDN):
def __init__(self, config: DictConfig, **kwargs):
super().__init__(config, **kwargs)
def _build_network(self):
# Backbone
self.backbone = AutoIntBackbone(self.hparams)
# Adding the last layer
self.hparams.mdn_config.input_dim = self.backbone.output_dim
self.mdn = MixtureDensityHead(self.hparams.mdn_config)
| 36.960212 | 104 | 0.572341 |
acf06db27edf01fa3e031aa843431d7ed93d951b | 13,008 | py | Python | nitro/resource/config/tm/tmglobal_tmtrafficpolicy_binding.py | HanseMerkur/nitro-python | d03eb11f492a35a2a8b2a140322fbce22d25a8f7 | [
"Apache-2.0"
] | 2 | 2020-08-24T18:04:22.000Z | 2020-08-24T18:04:47.000Z | nitro/resource/config/tm/tmglobal_tmtrafficpolicy_binding.py | HanseMerkur/nitro-python | d03eb11f492a35a2a8b2a140322fbce22d25a8f7 | [
"Apache-2.0"
] | null | null | null | nitro/resource/config/tm/tmglobal_tmtrafficpolicy_binding.py | HanseMerkur/nitro-python | d03eb11f492a35a2a8b2a140322fbce22d25a8f7 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class tmglobal_tmtrafficpolicy_binding(base_resource) :
"""Binding class showing the tmtrafficpolicy that can be bound to tmglobal."""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._bindpolicytype = 0
self._type = ""
self._globalbindtype = ""
self._gotopriorityexpression = ""
self.___count = 0
@property
def priority(self) :
"""The priority of the policy."""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""The priority of the policy.
:param priority:
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def globalbindtype(self) :
""".<br/>Default value: SYSTEM_GLOBAL<br/>Possible values = SYSTEM_GLOBAL, VPN_GLOBAL, RNAT_GLOBAL."""
try :
return self._globalbindtype
except Exception as e:
raise e
@globalbindtype.setter
def globalbindtype(self, globalbindtype) :
""".<br/>Default value: SYSTEM_GLOBAL<br/>Possible values = SYSTEM_GLOBAL, VPN_GLOBAL, RNAT_GLOBAL
:param globalbindtype:
"""
try :
self._globalbindtype = globalbindtype
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Applicable only to advance tmsession policy. Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Applicable only to advance tmsession policy. Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
:param gotopriorityexpression:
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policyname(self) :
"""The name of the policy."""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""The name of the policy.
:param policyname:
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def type(self) :
"""Bindpoint to which the policy is bound.<br/>Possible values = REQ_OVERRIDE, REQ_DEFAULT, RES_OVERRIDE, RES_DEFAULT."""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
"""Bindpoint to which the policy is bound.<br/>Possible values = REQ_OVERRIDE, REQ_DEFAULT, RES_OVERRIDE, RES_DEFAULT
:param type:
"""
try :
self._type = type
except Exception as e:
raise e
@property
def bindpolicytype(self) :
"""Bound policy type."""
try :
return self._bindpolicytype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(tmglobal_tmtrafficpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.tmglobal_tmtrafficpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
return 0
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
updateresource = tmglobal_tmtrafficpolicy_binding()
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [tmglobal_tmtrafficpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
deleteresource = tmglobal_tmtrafficpolicy_binding()
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [tmglobal_tmtrafficpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
"""Use this API to fetch a tmglobal_tmtrafficpolicy_binding resources.
:param service:
"""
try :
obj = tmglobal_tmtrafficpolicy_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
"""Use this API to fetch filtered set of tmglobal_tmtrafficpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param filter_:
"""
try :
obj = tmglobal_tmtrafficpolicy_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
"""Use this API to count tmglobal_tmtrafficpolicy_binding resources configued on NetScaler.
:param service:
"""
try :
obj = tmglobal_tmtrafficpolicy_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
"""Use this API to count the filtered set of tmglobal_tmtrafficpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param filter_:
"""
try :
obj = tmglobal_tmtrafficpolicy_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Globalbindtype:
""" """
SYSTEM_GLOBAL = "SYSTEM_GLOBAL"
VPN_GLOBAL = "VPN_GLOBAL"
RNAT_GLOBAL = "RNAT_GLOBAL"
class Type:
""" """
REQ_OVERRIDE = "REQ_OVERRIDE"
REQ_DEFAULT = "REQ_DEFAULT"
RES_OVERRIDE = "RES_OVERRIDE"
RES_DEFAULT = "RES_DEFAULT"
class tmglobal_tmtrafficpolicy_binding_response(base_response) :
""" """
def __init__(self, length=1) :
self.tmglobal_tmtrafficpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.tmglobal_tmtrafficpolicy_binding = [tmglobal_tmtrafficpolicy_binding() for _ in range(length)]
| 38.258824 | 436 | 0.63361 |
acf06f4cca8efb52ac7fa76ea1ba5ffce7170f4c | 418 | py | Python | packages/meta/galaxy/project_galaxy_meta.py | quacksawbones/galaxy-1 | 65f7259b29d3886e526d9be670c60d9da9fbe038 | [
"CC-BY-3.0"
] | 1,085 | 2015-02-18T16:14:38.000Z | 2022-03-30T23:52:07.000Z | packages/meta/galaxy/project_galaxy_meta.py | quacksawbones/galaxy-1 | 65f7259b29d3886e526d9be670c60d9da9fbe038 | [
"CC-BY-3.0"
] | 11,253 | 2015-02-18T17:47:32.000Z | 2022-03-31T21:47:03.000Z | packages/meta/galaxy/project_galaxy_meta.py | quacksawbones/galaxy-1 | 65f7259b29d3886e526d9be670c60d9da9fbe038 | [
"CC-BY-3.0"
] | 1,000 | 2015-02-18T16:18:10.000Z | 2022-03-29T08:22:56.000Z | __version__ = "22.1.0.dev0"
PROJECT_NAME = "galaxy"
PROJECT_OWNER = PROJECT_USERAME = "galaxyproject"
PROJECT_URL = "https://github.com/galaxyproject/galaxy"
PROJECT_AUTHOR = 'Galaxy Project and Community'
PROJECT_DESCRIPTION = 'Galaxy Server Metapackage'
PROJECT_EMAIL = 'galaxy-committers@lists.galaxyproject.org'
RAW_CONTENT_URL = "https://raw.github.com/{}/{}/master/".format(
PROJECT_USERAME, PROJECT_NAME
)
| 34.833333 | 64 | 0.77512 |
acf070075e3feafe053bcc9109d4137ad1acf44b | 15,606 | py | Python | tensorflow/python/tools/freeze_graph.py | OzairKhanBro/tensorflow | 3f08f82a7c896d357262a47eda8df80b778a2672 | [
"Apache-2.0"
] | 5 | 2018-10-20T03:54:49.000Z | 2021-01-02T07:19:53.000Z | tensorflow/python/tools/freeze_graph.py | Halimaz/tensorflow-1 | 3437fba39d5bca77fd7627aad15ba76fb75f5731 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/tools/freeze_graph.py | Halimaz/tensorflow-1 | 3437fba39d5bca77fd7627aad15ba76fb75f5731 | [
"Apache-2.0"
] | 2 | 2018-11-03T01:20:09.000Z | 2018-11-17T07:50:32.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Converts checkpoint variables into Const ops in a standalone GraphDef file.
This script is designed to take a GraphDef proto, a SaverDef proto, and a set of
variable values stored in a checkpoint file, and output a GraphDef with all of
the variable ops converted into const ops containing the values of the
variables.
It's useful to do this when we need to load a single file in C++, especially in
environments like mobile or embedded where we may not have access to the
RestoreTensor ops and file loading calls that they rely on.
An example of command-line usage is:
bazel build tensorflow/python/tools:freeze_graph && \
bazel-bin/tensorflow/python/tools/freeze_graph \
--input_graph=some_graph_def.pb \
--input_checkpoint=model.ckpt-8361242 \
--output_graph=/tmp/frozen_graph.pb --output_node_names=softmax
You can also look at freeze_graph_test.py for an example of how to use it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import re
import sys
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.tools import saved_model_utils
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
def _has_variables(sess):
"""Determines if the graph has any variables.
Args:
sess: TensorFlow Session.
Returns:
Bool.
"""
for op in sess.graph.get_operations():
if op.type.startswith("Variable") or op.type.endswith("VariableOp"):
return False
return True
def freeze_graph_with_def_protos(input_graph_def,
input_saver_def,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph,
clear_devices,
initializer_nodes,
variable_names_whitelist="",
variable_names_blacklist="",
input_meta_graph_def=None,
input_saved_model_dir=None,
saved_model_tags=None,
checkpoint_version=saver_pb2.SaverDef.V2):
"""Converts all variables in a graph and checkpoint into constants."""
del restore_op_name, filename_tensor_name # Unused by updated loading code.
# 'input_checkpoint' may be a prefix if we're using Saver V2 format
if (not input_saved_model_dir and
not checkpoint_management.checkpoint_exists(input_checkpoint)):
print("Input checkpoint '" + input_checkpoint + "' doesn't exist!")
return -1
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
if input_meta_graph_def:
for node in input_meta_graph_def.graph_def.node:
node.device = ""
elif input_graph_def:
for node in input_graph_def.node:
node.device = ""
if input_graph_def:
_ = importer.import_graph_def(input_graph_def, name="")
with session.Session() as sess:
if input_saver_def:
saver = saver_lib.Saver(
saver_def=input_saver_def, write_version=checkpoint_version)
saver.restore(sess, input_checkpoint)
elif input_meta_graph_def:
restorer = saver_lib.import_meta_graph(
input_meta_graph_def, clear_devices=True)
restorer.restore(sess, input_checkpoint)
if initializer_nodes:
sess.run(initializer_nodes.replace(" ", "").split(","))
elif input_saved_model_dir:
if saved_model_tags is None:
saved_model_tags = []
loader.load(sess, saved_model_tags, input_saved_model_dir)
else:
var_list = {}
reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
var_to_shape_map = reader.get_variable_to_shape_map()
# List of all partition variables. Because the condition is heuristic
# based, the list could include false positives.
all_parition_variable_names = [
tensor.name.split(":")[0]
for op in sess.graph.get_operations()
for tensor in op.values()
if re.search(r"/part_\d+/", tensor.name)
]
has_partition_var = False
for key in var_to_shape_map:
try:
tensor = sess.graph.get_tensor_by_name(key + ":0")
if any(key in name for name in all_parition_variable_names):
has_partition_var = True
except KeyError:
# This tensor doesn't exist in the graph (for example it's
# 'global_step' or a similar housekeeping element) so skip it.
continue
var_list[key] = tensor
try:
saver = saver_lib.Saver(
var_list=var_list, write_version=checkpoint_version)
except TypeError as e:
# `var_list` is required to be a map of variable names to Variable
# tensors. Partition variables are Identity tensors that cannot be
# handled by Saver.
if has_partition_var:
print("Models containing partition variables cannot be converted "
"from checkpoint files. Please pass in a SavedModel using "
"the flag --input_saved_model_dir.")
return -1
# Models that have been frozen previously do not contain Variables.
elif _has_variables(sess):
print("No variables were found in this model. It is likely the model "
"was frozen previously. You cannot freeze a graph twice.")
return 0
else:
raise e
saver.restore(sess, input_checkpoint)
if initializer_nodes:
sess.run(initializer_nodes.replace(" ", "").split(","))
variable_names_whitelist = (
variable_names_whitelist.replace(" ", "").split(",")
if variable_names_whitelist else None)
variable_names_blacklist = (
variable_names_blacklist.replace(" ", "").split(",")
if variable_names_blacklist else None)
if input_meta_graph_def:
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_meta_graph_def.graph_def,
output_node_names.replace(" ", "").split(","),
variable_names_whitelist=variable_names_whitelist,
variable_names_blacklist=variable_names_blacklist)
else:
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names.replace(" ", "").split(","),
variable_names_whitelist=variable_names_whitelist,
variable_names_blacklist=variable_names_blacklist)
# Write GraphDef to file if output path has been given.
if output_graph:
with gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
return output_graph_def
def _parse_input_graph_proto(input_graph, input_binary):
"""Parser input tensorflow graph into GraphDef proto."""
if not gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return -1
input_graph_def = graph_pb2.GraphDef()
mode = "rb" if input_binary else "r"
with gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_graph_def)
return input_graph_def
def _parse_input_meta_graph_proto(input_graph, input_binary):
"""Parser input tensorflow graph into MetaGraphDef proto."""
if not gfile.Exists(input_graph):
print("Input meta graph file '" + input_graph + "' does not exist!")
return -1
input_meta_graph_def = MetaGraphDef()
mode = "rb" if input_binary else "r"
with gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_meta_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_meta_graph_def)
print("Loaded meta graph file '" + input_graph)
return input_meta_graph_def
def _parse_input_saver_proto(input_saver, input_binary):
"""Parser input tensorflow Saver into SaverDef proto."""
if not gfile.Exists(input_saver):
print("Input saver file '" + input_saver + "' does not exist!")
return -1
mode = "rb" if input_binary else "r"
with gfile.FastGFile(input_saver, mode) as f:
saver_def = saver_pb2.SaverDef()
if input_binary:
saver_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), saver_def)
return saver_def
def freeze_graph(input_graph,
input_saver,
input_binary,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph,
clear_devices,
initializer_nodes,
variable_names_whitelist="",
variable_names_blacklist="",
input_meta_graph=None,
input_saved_model_dir=None,
saved_model_tags=tag_constants.SERVING,
checkpoint_version=saver_pb2.SaverDef.V2):
"""Converts all variables in a graph and checkpoint into constants."""
input_graph_def = None
if input_saved_model_dir:
input_graph_def = saved_model_utils.get_meta_graph_def(
input_saved_model_dir, saved_model_tags).graph_def
elif input_graph:
input_graph_def = _parse_input_graph_proto(input_graph, input_binary)
input_meta_graph_def = None
if input_meta_graph:
input_meta_graph_def = _parse_input_meta_graph_proto(
input_meta_graph, input_binary)
input_saver_def = None
if input_saver:
input_saver_def = _parse_input_saver_proto(input_saver, input_binary)
freeze_graph_with_def_protos(
input_graph_def,
input_saver_def,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph,
clear_devices,
initializer_nodes,
variable_names_whitelist,
variable_names_blacklist,
input_meta_graph_def,
input_saved_model_dir,
saved_model_tags.replace(" ", "").split(","),
checkpoint_version=checkpoint_version)
def main(unused_args, flags):
if flags.checkpoint_version == 1:
checkpoint_version = saver_pb2.SaverDef.V1
elif flags.checkpoint_version == 2:
checkpoint_version = saver_pb2.SaverDef.V2
else:
print("Invalid checkpoint version (must be '1' or '2'): %d" %
flags.checkpoint_version)
return -1
freeze_graph(flags.input_graph, flags.input_saver, flags.input_binary,
flags.input_checkpoint, flags.output_node_names,
flags.restore_op_name, flags.filename_tensor_name,
flags.output_graph, flags.clear_devices, flags.initializer_nodes,
flags.variable_names_whitelist, flags.variable_names_blacklist,
flags.input_meta_graph, flags.input_saved_model_dir,
flags.saved_model_tags, checkpoint_version)
def run_main():
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--input_graph",
type=str,
default="",
help="TensorFlow \'GraphDef\' file to load.")
parser.add_argument(
"--input_saver",
type=str,
default="",
help="TensorFlow saver file to load.")
parser.add_argument(
"--input_checkpoint",
type=str,
default="",
help="TensorFlow variables file to load.")
parser.add_argument(
"--checkpoint_version",
type=int,
default=2,
help="Tensorflow variable file format")
parser.add_argument(
"--output_graph",
type=str,
default="",
help="Output \'GraphDef\' file name.")
parser.add_argument(
"--input_binary",
nargs="?",
const=True,
type="bool",
default=False,
help="Whether the input files are in binary format.")
parser.add_argument(
"--output_node_names",
type=str,
default="",
help="The name of the output nodes, comma separated.")
parser.add_argument(
"--restore_op_name",
type=str,
default="save/restore_all",
help="""\
The name of the master restore operator. Deprecated, unused by updated \
loading code.
""")
parser.add_argument(
"--filename_tensor_name",
type=str,
default="save/Const:0",
help="""\
The name of the tensor holding the save path. Deprecated, unused by \
updated loading code.
""")
parser.add_argument(
"--clear_devices",
nargs="?",
const=True,
type="bool",
default=True,
help="Whether to remove device specifications.")
parser.add_argument(
"--initializer_nodes",
type=str,
default="",
help="Comma separated list of initializer nodes to run before freezing.")
parser.add_argument(
"--variable_names_whitelist",
type=str,
default="",
help="""\
Comma separated list of variables to convert to constants. If specified, \
only those variables will be converted to constants.\
""")
parser.add_argument(
"--variable_names_blacklist",
type=str,
default="",
help="""\
Comma separated list of variables to skip converting to constants.\
""")
parser.add_argument(
"--input_meta_graph",
type=str,
default="",
help="TensorFlow \'MetaGraphDef\' file to load.")
parser.add_argument(
"--input_saved_model_dir",
type=str,
default="",
help="Path to the dir with TensorFlow \'SavedModel\' file and variables.")
parser.add_argument(
"--saved_model_tags",
type=str,
default="serve",
help="""\
Group of tag(s) of the MetaGraphDef to load, in string format,\
separated by \',\'. For tag-set contains multiple tags, all tags \
must be passed in.\
""")
flags, unparsed = parser.parse_known_args()
my_main = lambda unused_args: main(unused_args, flags)
app.run(main=my_main, argv=[sys.argv[0]] + unparsed)
if __name__ == '__main__':
run_main()
| 36.125 | 80 | 0.66923 |
acf0705e7cc4950728485cc6e72aa9a5803bcb3a | 263 | py | Python | runtime/java9_container.py | tobegit3hub/lambda-docker | 352701b338735af9fa8a516900896e884febe99b | [
"MIT"
] | 343 | 2015-12-19T09:48:37.000Z | 2022-03-30T03:20:52.000Z | runtime/java9_container.py | tobegit3hub/lambda-docker | 352701b338735af9fa8a516900896e884febe99b | [
"MIT"
] | 5 | 2015-12-26T07:25:11.000Z | 2017-01-05T02:35:57.000Z | runtime/java9_container.py | tobegit3hub/lambda-docker | 352701b338735af9fa8a516900896e884febe99b | [
"MIT"
] | 47 | 2016-01-03T06:28:00.000Z | 2021-01-12T07:44:38.000Z |
import basic_container
class Java9Container(basic_container.BasicContainer):
def __init__(self):
super(self.__class__, self).__init__()
self.image = "java:9"
self.command = 'sh -c "javac main.java && java main"'
self.file_extension = ".java"
| 20.230769 | 54 | 0.703422 |
acf07110c007d350de70293d26e5f5e1c40676ae | 10,292 | py | Python | lib/galaxy/webapps/galaxy/services/dataset_collections.py | pvanheus/galaxy | 48403b0e45b71c4b0ce7a1e22d65a0a7cdb79574 | [
"CC-BY-3.0"
] | 3 | 2016-09-15T21:04:56.000Z | 2019-04-21T02:48:25.000Z | lib/galaxy/webapps/galaxy/services/dataset_collections.py | pvanheus/galaxy | 48403b0e45b71c4b0ce7a1e22d65a0a7cdb79574 | [
"CC-BY-3.0"
] | 209 | 2015-06-17T16:15:20.000Z | 2022-03-21T15:23:07.000Z | lib/galaxy/webapps/galaxy/services/dataset_collections.py | pvanheus/galaxy | 48403b0e45b71c4b0ce7a1e22d65a0a7cdb79574 | [
"CC-BY-3.0"
] | 7 | 2016-07-10T16:44:30.000Z | 2020-08-30T19:25:51.000Z | from logging import getLogger
from typing import List, Optional, Set
from pydantic import BaseModel, Extra, Field
from galaxy import exceptions
from galaxy.datatypes.registry import Registry
from galaxy.managers.collections import DatasetCollectionManager
from galaxy.managers.collections_util import (
api_payload_to_create_params,
dictify_dataset_collection_instance,
dictify_element_reference,
)
from galaxy.managers.context import ProvidesHistoryContext
from galaxy.managers.hdcas import HDCAManager
from galaxy.managers.histories import HistoryManager
from galaxy.schema.fields import EncodedDatabaseIdField, ModelClassField
from galaxy.schema.schema import (
AnyHDCA,
CreateNewCollectionPayload,
DatasetCollectionInstanceType,
DCESummary,
DCEType,
HDCADetailed,
TagCollection,
)
from galaxy.security.idencoding import IdEncodingHelper
from galaxy.webapps.base.controller import UsesLibraryMixinItems
from galaxy.webapps.galaxy.services.base import ServiceBase
log = getLogger(__name__)
class UpdateCollectionAttributePayload(BaseModel):
"""Contains attributes that can be updated for all elements in a dataset collection."""
dbkey: str = Field(
...,
description="TODO"
)
class Config:
extra = Extra.forbid # will cause validation to fail if extra attributes are included,
class DatasetCollectionAttributesResult(BaseModel):
dbkey: str = Field(
...,
description="TODO"
)
# Are the following fields really used/needed?
extension: str = Field(
...,
description="The dataset file extension.",
example="txt"
)
model_class: str = ModelClassField("HistoryDatasetCollectionAssociation")
dbkeys: Optional[Set[str]]
extensions: Optional[Set[str]]
tags: TagCollection
class SuitableConverter(BaseModel):
tool_id: str = Field(
...,
description="The ID of the tool that can perform the type conversion."
)
name: str = Field(
...,
description="The name of the converter."
)
target_type: str = Field(
...,
description="The type to convert to."
)
original_type: str = Field(
...,
description="The type to convert from."
)
class SuitableConverters(BaseModel):
"""Collection of converters that can be used on a particular dataset collection."""
__root__: List[SuitableConverter]
class DatasetCollectionContentElements(BaseModel):
"""Represents a collection of elements contained in the dataset collection."""
__root__: List[DCESummary]
class DatasetCollectionsService(ServiceBase, UsesLibraryMixinItems):
def __init__(
self,
security: IdEncodingHelper,
history_manager: HistoryManager,
hdca_manager: HDCAManager,
collection_manager: DatasetCollectionManager,
datatypes_registry: Registry,
):
super().__init__(security)
self.history_manager = history_manager
self.hdca_manager = hdca_manager
self.collection_manager = collection_manager
self.datatypes_registry = datatypes_registry
def create(self, trans: ProvidesHistoryContext, payload: CreateNewCollectionPayload) -> HDCADetailed:
"""
Create a new dataset collection instance.
:type payload: dict
:param payload: (optional) dictionary structure containing:
* collection_type: dataset collection type to create.
* instance_type: Instance type - 'history' or 'library'.
* name: the new dataset collections's name
* datasets: object describing datasets for collection
:rtype: dict
:returns: element view of new dataset collection
"""
# TODO: Error handling...
create_params = api_payload_to_create_params(payload.dict(exclude_unset=True))
if payload.instance_type == DatasetCollectionInstanceType.history:
if payload.history_id is None:
raise exceptions.RequestParameterInvalidException("Parameter history_id is required.")
history_id = self.decode_id(payload.history_id)
history = self.history_manager.get_owned(history_id, trans.user, current_history=trans.history)
create_params["parent"] = history
create_params["history"] = history
elif payload.instance_type == DatasetCollectionInstanceType.library:
library_folder = self.get_library_folder(trans, payload.folder_id, check_accessible=True)
self.check_user_can_add_to_library_item(trans, library_folder, check_accessible=False)
create_params["parent"] = library_folder
else:
raise exceptions.RequestParameterInvalidException()
dataset_collection_instance = self.collection_manager.create(trans=trans, **create_params)
rval = dictify_dataset_collection_instance(
dataset_collection_instance, security=trans.security, parent=create_params["parent"]
)
return rval
def copy(self, trans: ProvidesHistoryContext, id: EncodedDatabaseIdField, payload: UpdateCollectionAttributePayload):
"""
Iterate over all datasets of a collection and copy datasets with new attributes to a new collection.
e.g attributes = {'dbkey': 'dm3'}
"""
self.collection_manager.copy(
trans, trans.history, "hdca", id, copy_elements=True, dataset_instance_attributes=payload.dict()
)
def attributes(
self,
trans: ProvidesHistoryContext,
id: EncodedDatabaseIdField,
instance_type: DatasetCollectionInstanceType = DatasetCollectionInstanceType.history,
) -> DatasetCollectionAttributesResult:
"""
Returns dbkey/extension for collection elements
"""
dataset_collection_instance = self.collection_manager.get_dataset_collection_instance(
trans,
id=id,
instance_type=instance_type,
check_ownership=True
)
rval = dataset_collection_instance.to_dict(view="dbkeysandextensions")
return rval
def suitable_converters(
self,
trans: ProvidesHistoryContext,
id: EncodedDatabaseIdField,
instance_type: DatasetCollectionInstanceType = DatasetCollectionInstanceType.history,
) -> SuitableConverters:
"""
Returns suitable converters for all datatypes in collection
"""
rval = self.collection_manager.get_converters_for_collection(trans, id, self.datatypes_registry, instance_type)
return rval
def show(
self,
trans: ProvidesHistoryContext,
id: EncodedDatabaseIdField,
instance_type: DatasetCollectionInstanceType = DatasetCollectionInstanceType.history,
) -> AnyHDCA:
"""
Returns information about a particular dataset collection.
"""
dataset_collection_instance = self.collection_manager.get_dataset_collection_instance(
trans,
id=id,
instance_type=instance_type,
)
if instance_type == DatasetCollectionInstanceType.history:
parent = dataset_collection_instance.history
elif instance_type == DatasetCollectionInstanceType.library:
parent = dataset_collection_instance.folder
else:
raise exceptions.RequestParameterInvalidException()
rval = dictify_dataset_collection_instance(
dataset_collection_instance,
security=trans.security,
parent=parent,
view='element'
)
return rval
def contents(
self,
trans: ProvidesHistoryContext,
hdca_id: EncodedDatabaseIdField,
parent_id: EncodedDatabaseIdField,
instance_type: DatasetCollectionInstanceType = DatasetCollectionInstanceType.history,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> DatasetCollectionContentElements:
"""
Shows direct child contents of indicated dataset collection parent id
:type string: encoded string id
:param id: HDCA.id
:type string: encoded string id
:param parent_id: parent dataset_collection.id for the dataset contents to be viewed
:type integer: int
:param limit: pagination limit for returned dataset collection elements
:type integer: int
:param offset: pagination offset for returned dataset collection elements
:rtype: list
:returns: list of dataset collection elements and contents
"""
# validate HDCA for current user, will throw error if not permitted
# TODO: refactor get_dataset_collection_instance
hdca = self.collection_manager.get_dataset_collection_instance(
trans, id=hdca_id, check_ownership=True, instance_type=instance_type
)
# check to make sure the dsc is part of the validated hdca
decoded_parent_id = self.decode_id(parent_id)
if parent_id != hdca_id and not hdca.contains_collection(decoded_parent_id):
raise exceptions.ObjectNotFound('Requested dataset collection is not contained within indicated history content')
# retrieve contents
contents = self.collection_manager.get_collection_contents(trans, decoded_parent_id, limit=limit, offset=offset)
# dictify and tack on a collection_url for drilling down into nested collections
def serialize_element(dsc_element) -> DCESummary:
result = dictify_element_reference(dsc_element, recursive=False, security=trans.security)
if result["element_type"] == DCEType.dataset_collection:
assert trans.url_builder
result["object"]["contents_url"] = trans.url_builder('contents_dataset_collection',
hdca_id=self.encode_id(hdca.id),
parent_id=self.encode_id(result["object"]["id"]))
trans.security.encode_all_ids(result, recursive=True)
return result
rval = [serialize_element(el) for el in contents]
return DatasetCollectionContentElements.parse_obj(rval)
| 39.43295 | 125 | 0.687427 |
acf07158b8e9e81bdf69c70aff33e917dd244bbf | 2,252 | py | Python | python/akg/ms/gpu/assign.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | python/akg/ms/gpu/assign.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | python/akg/ms/gpu/assign.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function:Assign"""
import akg
import akg.topi as topi
import akg.tvm
from akg.utils import validation_check as vc_util
from akg.utils import kernel_exec as utils
@akg.schedule(topi.cuda.injective_single_kernel.schedule_injective)
def Assign(ref, val):
"""
Assign val to ref.
Args:
ref: Tensor, which is mutable.
val: Tensor, which will be assigned to ref.
Returns:
fake_output: Tensor, all zeros has the same shape as ref, needed by ME.
ref_val: Tensor, ref assigned with val.
attrs: Dictionary, indicates that ref and ref_val share the same buf.
"""
dtype = val.dtype
vc_util.ops_dtype_check(dtype, [vc_util.DtypeForDavinci.ALL_FLOAT, vc_util.DtypeForDavinci.INT8,
vc_util.DtypeForDavinci.INT16, vc_util.DtypeForDavinci.INT32,
vc_util.DtypeForDavinci.INT64, vc_util.DtypeForDavinci.UINT8,
vc_util.DtypeForDavinci.UINT16, vc_util.DtypeForDavinci.UINT32,
vc_util.DtypeForDavinci.UINT64])
shape1 = [x.value for x in ref.shape]
shape2 = [x.value for x in val.shape]
if shape1 != shape2:
raise RuntimeError("assign operations need input shape equal!")
vc_util.check_shape(shape2)
ref_val = akg.tvm.compute(shape2, lambda *indice: val(*indice), name="ref_val")
ref_val, binds_info = utils.TensorUtils.inplace_set(ref, ref_val)
attrs = {utils.BINDS: binds_info}
fake_output = akg.tvm.compute(ref.shape, lambda *indice: ref_val(*indice), name="fake_output")
return fake_output, ref_val, attrs
| 41.703704 | 100 | 0.69405 |
acf0725e3a91a5481c90518daaa9b2520677d0c0 | 401 | py | Python | leetcode/67/67.add-binary.py | Yu-Ren-NEU/Leetcode | e82bc2734680606f676fe867dbcb9b9e71635bf5 | [
"MIT"
] | 1 | 2020-01-06T06:54:22.000Z | 2020-01-06T06:54:22.000Z | leetcode/67/67.add-binary.py | Yu-Ren-NEU/Leetcode | e82bc2734680606f676fe867dbcb9b9e71635bf5 | [
"MIT"
] | 1 | 2020-01-07T02:22:06.000Z | 2020-01-07T02:22:06.000Z | leetcode/67/67.add-binary.py | Yu-Ren-NEU/Leetcode | e82bc2734680606f676fe867dbcb9b9e71635bf5 | [
"MIT"
] | 2 | 2020-01-06T20:04:04.000Z | 2020-01-10T08:24:01.000Z | #
# @lc app=leetcode id=67 lang=python3
#
# [67] Add Binary
#
# @lc code=start
class Solution:
def addBinary(self, a: str, b: str) -> str:
# 转成二进制然后相加再转回字符串
return "{0:b}".format(int(a, 2)+int(b, 2))
def test(self):
assert(self.addBinary("11", "1") == "100")
assert(self.addBinary("1010", "1011") == "10101")
sol = Solution()
sol.test()
# @lc code=end
| 19.095238 | 57 | 0.558603 |
acf073251b23e76638416b6eb6984acbbf176e84 | 10,011 | py | Python | tests/test_str_utils.py | jwcompdev/UltimateLinuxToolkit | 7f03e6bebf2739bdd32e64f18566d68f13687421 | [
"Apache-2.0"
] | null | null | null | tests/test_str_utils.py | jwcompdev/UltimateLinuxToolkit | 7f03e6bebf2739bdd32e64f18566d68f13687421 | [
"Apache-2.0"
] | null | null | null | tests/test_str_utils.py | jwcompdev/UltimateLinuxToolkit | 7f03e6bebf2739bdd32e64f18566d68f13687421 | [
"Apache-2.0"
] | null | null | null | # PyLinuxToolkit
# Copyright (C) 2022 JWCompDev
#
# LicenseHeader.txt
# Copyright (C) 2022 JWCompDev <jwcompdev@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation; either version 2.0 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with this program. If not, see <https://www.apache.org/licenses/>.
import pytest
from pystdlib.str_utils import *
from pystdlib.utils import IllegalArgumentError
def test_is_boolean():
assert is_boolean("true")
assert is_boolean("false")
assert is_boolean("t")
assert is_boolean("f")
assert is_boolean("1")
assert is_boolean("0")
assert is_boolean("succeed")
assert is_boolean("fail")
assert is_boolean("succeeded")
assert is_boolean("failed")
assert is_boolean("yes")
assert is_boolean("no")
assert is_boolean("enabled")
assert is_boolean("disabled")
assert is_boolean("y")
assert is_boolean("n")
assert is_boolean("TRUE")
assert is_boolean(" FALSE ")
assert is_boolean("FALSE ")
assert is_boolean(" FALSE")
assert not is_boolean(" FALSE /")
assert not is_boolean(" !@#$%^&*() ")
assert not is_boolean("")
# noinspection PyTypeChecker
assert not is_boolean(None)
def test_to_boolean():
assert to_boolean("true")
assert not to_boolean("false")
assert to_boolean("t")
assert not to_boolean("f")
assert to_boolean("1")
assert not to_boolean("0")
assert to_boolean("succeed")
assert not to_boolean("fail")
assert to_boolean("succeeded")
assert not to_boolean("failed")
assert to_boolean("yes")
assert not to_boolean("no")
assert to_boolean("enabled")
assert not to_boolean("disabled")
assert to_boolean("y")
assert not to_boolean("n")
assert to_boolean("TRUE")
assert not to_boolean(" FALSE ")
assert not to_boolean("FALSE ")
assert not to_boolean(" FALSE")
assert to_boolean(" FALSE /") is None
assert to_boolean(" !@#$%^&*() ") is None
assert to_boolean("") is None
# noinspection PyTypeChecker
assert to_boolean(None) is None
def test_parse_int_test_no_match():
with pytest.raises(ValueError):
parse_int("")
with pytest.raises(TypeError):
# noinspection PyTypeChecker
parse_int(None)
with pytest.raises(ValueError):
parse_int("abcd")
with pytest.raises(ValueError):
parse_int("1.0")
def test_parse_int_test_no_match_with_default():
assert parse_int("", 1) == 1
# noinspection PyTypeChecker
assert parse_int(None, 1) == 1
assert parse_int("abcd", 1) == 1
assert parse_int("1.0", 1) == 1
# noinspection PyTypeChecker
assert parse_int(None, 1.5) == 1
assert parse_int("abcd", 1.0) == 1
assert parse_int("", 2) == 2
# noinspection PyTypeChecker
assert parse_int(None, 2) == 2
assert parse_int("abcd", 2) == 2
assert parse_int("1.0", 2) == 2
assert isinstance(parse_int("", 2), int)
assert isinstance(parse_int("", 2.0), int)
with pytest.raises(ValueError):
parse_int("", None)
with pytest.raises(TypeError):
# noinspection PyTypeChecker
parse_int(None, None)
with pytest.raises(ValueError):
parse_int("abcd", None)
with pytest.raises(ValueError):
parse_int("1.0", None)
with pytest.raises(IllegalArgumentError):
# noinspection PyTypeChecker
parse_int("", [])
with pytest.raises(IllegalArgumentError):
# noinspection PyTypeChecker
parse_int(None, "")
def test_parse_int_test_match():
assert parse_int("1") == 1
assert parse_int("0") == 0
assert parse_int("50") == 50
assert parse_int("100") == 100
def test_parse_int_test_match_with_default():
assert parse_int("1", 5) == 1
assert parse_int("0", 1) == 0
assert parse_int("50", 1) == 50
assert parse_int("100", 1) == 100
def test_parse_float_test_no_match():
with pytest.raises(ValueError):
parse_float("")
with pytest.raises(TypeError):
# noinspection PyTypeChecker
parse_float(None)
with pytest.raises(ValueError):
parse_float("abcd")
def test_parse_float_test_no_match_with_default():
assert parse_float("", 2.8) == 2.8
# noinspection PyTypeChecker
assert parse_float(None, 1.5) == 1.5
assert parse_float("abcd", 1.0) == 1.0
assert parse_float("", 2) == 2
# noinspection PyTypeChecker
assert parse_float(None, 2) == 2
assert parse_float("abcd", 2) == 2
assert isinstance(parse_float("", 2), float)
assert isinstance(parse_float("", 2.0), float)
with pytest.raises(ValueError):
parse_float("", None)
with pytest.raises(TypeError):
# noinspection PyTypeChecker
parse_float(None, None)
with pytest.raises(ValueError):
parse_float("abcd", None)
with pytest.raises(IllegalArgumentError):
# noinspection PyTypeChecker
parse_float("", [])
with pytest.raises(IllegalArgumentError):
# noinspection PyTypeChecker
parse_float(None, "")
def test_parse_float_test_match():
assert parse_float("1") == 1.0
assert parse_float("0") == 0.0
assert parse_float("50") == 50.0
assert parse_float("100") == 100.0
def test_parse_float_test_match_with_default():
assert parse_float("1", 5) == 1.0
assert parse_float("0", 1) == 0.0
assert parse_float("50", 1) == 50.0
assert parse_float("100", 1) == 100.0
def test_is_blank():
assert is_blank("")
assert is_blank(" ")
assert not is_blank("abcd")
assert not is_blank(" 1234 ")
assert not is_blank("!@#$%^&*()_+")
# noinspection PyTypeChecker
assert not is_blank(None)
def test_is_not_blank():
assert not is_not_blank("")
assert not is_not_blank(" ")
assert is_not_blank("abcd")
assert is_not_blank(" 1234 ")
assert is_not_blank("!@#$%^&*()_+")
# noinspection PyTypeChecker
assert is_not_blank(None)
def test_is_blank_or_none():
assert is_blank_or_none("")
assert is_blank_or_none(" ")
assert not is_blank_or_none("abcd")
assert not is_blank_or_none(" 1234 ")
assert not is_blank_or_none("!@#$%^&*()_+")
# noinspection PyTypeChecker
assert is_blank_or_none(None)
def test_is_not_blank_or_none():
assert not is_not_blank_or_none("")
assert not is_not_blank_or_none(" ")
assert is_not_blank_or_none("abcd")
assert is_not_blank_or_none(" 1234 ")
assert is_not_blank_or_none("!@#$%^&*()_+")
# noinspection PyTypeChecker
assert not is_not_blank_or_none(None)
def test_wrap():
assert wrap("Hello World!", '*') == "*Hello World!*"
assert wrap("Hello World!", '') == "Hello World!"
assert wrap("World", ' Hello ') == " Hello World Hello "
with pytest.raises(IllegalArgumentError):
# noinspection PyTypeChecker
wrap(None, None)
with pytest.raises(IllegalArgumentError):
# noinspection PyTypeChecker
wrap(1, 1)
with pytest.raises(IllegalArgumentError):
# noinspection PyTypeChecker
wrap("", None)
with pytest.raises(IllegalArgumentError):
# noinspection PyTypeChecker
wrap(None, "")
def test_unwrap_success():
assert unwrap("*Hello World!*", '*') == "Hello World!"
assert unwrap(" Hello World Hello ", " Hello ") == "World"
def test_unwrap_failure():
assert unwrap("*Hello World!", '*') == "*Hello World!"
assert unwrap("Hello World!*", '*') == "Hello World!*"
assert unwrap("Hello World!", '*') == "Hello World!"
assert unwrap(" Hello World", " Hello ") == " Hello World"
assert unwrap("World Hello ", " Hello ") == "World Hello "
assert unwrap("Hello World", " Hello ") == "Hello World"
assert unwrap("World", " Hello ") == "World"
with pytest.raises(IllegalArgumentError):
# noinspection PyTypeChecker
unwrap(None, None)
with pytest.raises(IllegalArgumentError):
# noinspection PyTypeChecker
unwrap(1, 1)
with pytest.raises(IllegalArgumentError):
# noinspection PyTypeChecker
unwrap("", None)
with pytest.raises(IllegalArgumentError):
# noinspection PyTypeChecker
unwrap(None, "")
def test_uuid():
assert uuid(seed=1234) == "1de9ea66-70d3-4a1f-8735-df5ef7697fb9"
assert uuid(False, seed=1234) == "1de9ea66-70d3-4a1f-8735-df5ef7697fb9"
assert uuid(True, seed=1234) == "1de9ea6670d34a1f8735df5ef7697fb9"
assert uuid(as_hex=True, seed=1234) == "1de9ea6670d34a1f8735df5ef7697fb9"
assert uuid(True, 1234) == "1de9ea6670d34a1f8735df5ef7697fb9"
assert uuid(False) != "1de9ea66-70d3-4a1f-8735-df5ef7697fb9"
assert uuid(True) != "1de9ea6670d34a1f8735df5ef7697fb9"
assert uuid(False, None) != "1de9ea66-70d3-4a1f-8735-df5ef7697fb9"
assert uuid(True, None) != "1de9ea6670d34a1f8735df5ef7697fb9"
def test_random_string():
assert random_string(5, seed=1234) == "9XCha"
assert random_string(9, seed=1234) == "9XChaf688"
assert random_string(5, seed=1111) == "nmwWP"
assert random_string(5, seed=123) == "drfXA"
assert random_string(5, seed=0) == "2yW4A"
assert random_string(9, seed=None) != "9XChaf688"
with pytest.raises(IllegalArgumentError):
random_string(-1)
def test_reverse():
assert reverse("hello") == "olleh"
assert reverse("Hello World!") == "!dlroW olleH"
assert reverse("12345") == "54321"
assert reverse("") == ""
assert reverse("1") == "1"
with pytest.raises(InvalidInputError):
# noinspection PyTypeChecker
reverse(None)
| 29.794643 | 77 | 0.660973 |
acf074bc60c8732a6bf44f489223eee332bb7b3a | 7,933 | py | Python | kubernetes_asyncio/client/models/v1beta2_deployment_condition.py | PidgeyBE/kubernetes_asyncio | 14d15dc309890253c26b6274a022e84441e05217 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1beta2_deployment_condition.py | PidgeyBE/kubernetes_asyncio | 14d15dc309890253c26b6274a022e84441e05217 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1beta2_deployment_condition.py | PidgeyBE/kubernetes_asyncio | 14d15dc309890253c26b6274a022e84441e05217 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.13.5
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1beta2DeploymentCondition(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_transition_time': 'datetime',
'last_update_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'last_update_time': 'lastUpdateTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, last_update_time=None, message=None, reason=None, status=None, type=None): # noqa: E501
"""V1beta2DeploymentCondition - a model defined in OpenAPI""" # noqa: E501
self._last_transition_time = None
self._last_update_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if last_update_time is not None:
self.last_update_time = last_update_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1beta2DeploymentCondition. # noqa: E501
Last time the condition transitioned from one status to another. # noqa: E501
:return: The last_transition_time of this V1beta2DeploymentCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1beta2DeploymentCondition.
Last time the condition transitioned from one status to another. # noqa: E501
:param last_transition_time: The last_transition_time of this V1beta2DeploymentCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def last_update_time(self):
"""Gets the last_update_time of this V1beta2DeploymentCondition. # noqa: E501
The last time this condition was updated. # noqa: E501
:return: The last_update_time of this V1beta2DeploymentCondition. # noqa: E501
:rtype: datetime
"""
return self._last_update_time
@last_update_time.setter
def last_update_time(self, last_update_time):
"""Sets the last_update_time of this V1beta2DeploymentCondition.
The last time this condition was updated. # noqa: E501
:param last_update_time: The last_update_time of this V1beta2DeploymentCondition. # noqa: E501
:type: datetime
"""
self._last_update_time = last_update_time
@property
def message(self):
"""Gets the message of this V1beta2DeploymentCondition. # noqa: E501
A human readable message indicating details about the transition. # noqa: E501
:return: The message of this V1beta2DeploymentCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1beta2DeploymentCondition.
A human readable message indicating details about the transition. # noqa: E501
:param message: The message of this V1beta2DeploymentCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1beta2DeploymentCondition. # noqa: E501
The reason for the condition's last transition. # noqa: E501
:return: The reason of this V1beta2DeploymentCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1beta2DeploymentCondition.
The reason for the condition's last transition. # noqa: E501
:param reason: The reason of this V1beta2DeploymentCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1beta2DeploymentCondition. # noqa: E501
Status of the condition, one of True, False, Unknown. # noqa: E501
:return: The status of this V1beta2DeploymentCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1beta2DeploymentCondition.
Status of the condition, one of True, False, Unknown. # noqa: E501
:param status: The status of this V1beta2DeploymentCondition. # noqa: E501
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1beta2DeploymentCondition. # noqa: E501
Type of deployment condition. # noqa: E501
:return: The type of this V1beta2DeploymentCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1beta2DeploymentCondition.
Type of deployment condition. # noqa: E501
:param type: The type of this V1beta2DeploymentCondition. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2DeploymentCondition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.867704 | 138 | 0.614774 |
acf0757ac23e23ef95f8f6e78e82b53bc3fd4a96 | 18,724 | py | Python | Watson/views/lc.py | parksurk/sk-watson-hackerthon | 3677c7f068634809199bfbf06826c153f8b36eb9 | [
"Apache-2.0"
] | 1 | 2018-12-01T08:46:43.000Z | 2018-12-01T08:46:43.000Z | Watson/views/lc.py | parksurk/sk-watson-hackerthon | 3677c7f068634809199bfbf06826c153f8b36eb9 | [
"Apache-2.0"
] | null | null | null | Watson/views/lc.py | parksurk/sk-watson-hackerthon | 3677c7f068634809199bfbf06826c153f8b36eb9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: latin-1 -*-
# Copyright 2015 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import requests
import base64
from django import forms
from django.shortcuts import render
from django.shortcuts import redirect
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from Watson.watsonutils.wdc import WDCService
from Watson.watsonutils.twitservice import TwitterService
from watson_developer_cloud import TextToSpeechV1 as TextToSpeech
from cognitive.utils.vcap import get_vcap_settings
class UploadClassifierForm(forms.Form):
classInputFile = forms.FileField()
def lcindex(request):
# These are the views for the Natural Language Classifier portion of the application.
# It makes use of both the NLC and a Twitter service, and all calls to the services
# are done from the client via AJAX calls to REST APIs
wdc = WDCService('LC')
service_creds = wdc.getCreds()
nlcService = wdc.nlcService()
return render(request, 'Watson/lcindex.html', service_creds)
@csrf_exempt
def lclist(request):
# This is a request for a list of the classifiers available. We expect no input
# so all the classifiers found are returned.
results = {}
theData = {"error":"No Response as list request has failed"}
wdc = WDCService('LC')
nlcService = wdc.nlcService()
if nlcService is not None:
nlcResults = nlcService.listClassifiers()
theData = {"classifiers": nlcResults}
results["results"] = theData
return HttpResponse(json.dumps(results), content_type="application/json")
def nlcnew(request):
# This is a call that is not done through REST. Though it should also change to REST.
# This is the original new classifier request, but has been superseded by nlcnewx method.
# Will be removed in a future iteration, but is here for reference.
#
# The request is for a new classifier. Currently the file chosen is hardcoded and sits in the
# static directory. This should be modified to allow a file from the client to be submitted.
# The return is a redirect back to the classifier list, which forces it to refresh.
wdc = WDCService('LC')
data = ""
module_dir = os.path.dirname(__file__)
file_path = os.path.join(module_dir, '../static/', 'nlcsample.json')
with open(file_path) as f:
data = json.loads(f.read())
nlcService = wdc.nlcService()
if nlcService is not None:
nlcResults = nlcService.createClassifier(data)
return redirect('watson:nlclassifier')
def nlcnewx(request):
# This is a call that is not done through REST.
# The request is for a new classifier. The file is sent through a form
# The return is a redirect back to the classifier list, which forces it to refresh.
wdc = WDCService('LC')
service_creds = wdc.getCreds()
if request.POST:
form = UploadClassifierForm(request.FILES)
if request.FILES and 'classInputFile' in request.FILES:
f = request.FILES['classInputFile']
module_dir = os.path.dirname(__file__)
file_path = os.path.join(module_dir, '../static/', 'xx.json')
with open(file_path, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
destination.close()
with open(file_path) as fj:
data = json.loads(fj.read())
wdc = WDCService('LC')
service_creds = wdc.getCreds()
nlcService = wdc.nlcService()
if nlcService is not None:
nlcResults = nlcService.createClassifier(data)
if nlcResults and 'error' in nlcResults:
service_creds['error'] = nlcResults['description']
return render(request, 'Watson/lcindex.html', service_creds)
return redirect('watson:nlclassifier')
@csrf_exempt
def drop(request):
# Has we are only allowed 2 classifiers, we added in an option of dropping them to free up
# resources and to allow us to modify the training.
# In the input we are expecting a link to the classifier, which will be used to drop it.
results = {}
theData = {"error":"No Response as drop request has failed"}
if request.POST and "data" in request.POST:
d = request.POST["data"]
theRequestData = json.loads(d)
if theRequestData:
if "url" in theRequestData:
wdc = WDCService('LC')
nlcService = wdc.nlcService()
if nlcService is not None:
classURL = theRequestData["url"]
nlcResult = nlcService.dropClassifier(classURL)
if "error" in nlcResult:
theData = {"error": nlcResult["error"]}
else:
theData = {"ok": classURL}
results["results"] = theData
return HttpResponse(json.dumps(results), content_type="application/json")
@csrf_exempt
def twitsearch(request):
# This request runs a classification test against a twitter id
# 20 of the id's last tweets are classified.
results = {}
theData = {"error":"No Response as request has failed"}
theRequestData = None
if request.POST and "data" in request.POST:
d = request.POST["data"]
theRequestData = json.loads(d)
if theRequestData:
if "twitterid" in theRequestData and "classifierurl" in theRequestData:
classURL = theRequestData["classifierurl"]
twitID = theRequestData["twitterid"]
tw = TwitterService()
twitTimeLine = tw.getTimeLineFor(twitID)
theData = {"data": "Will go here"}
if "error" in twitTimeLine:
theData = {"error" : twitTimeLine["error"]}
if "tweets" in twitTimeLine:
theData = classifyTweets(classURL, twitTimeLine);
results["results"] = theData
return HttpResponse(json.dumps(results), content_type="application/json")
def classifyTweets(classURL, twitTimeLine):
# This is an internal helper method that runs a classification on each tweet.
tweetDataArray = []
theData = {}
wdc = WDCService('LC')
nlcService = wdc.nlcService()
if nlcService is not None:
tweets = twitTimeLine["tweets"]
for t in tweets:
tweetData = {}
tweetData["message"] = t
nlcResult = nlcService.getNLClassification({"text":t,}, classURL)
if "error" in nlcResult:
theData = {"error": nlcResult["error"]}
break
if "top_class" in nlcResult:
tweetData["top_class"] = nlcResult["top_class"]
if "classes" in nlcResult:
classes = nlcResult["classes"]
for c in classes:
if tweetData["top_class"] == c["class_name"]:
tweetData["confidence"] = c["confidence"]
tweetDataArray.append(tweetData)
else:
theData = {"error": "Natural Language Classifier service not found"}
if "error" not in theData:
theData = {"classification" : tweetDataArray}
return theData
class UploadAudioForm(forms.Form):
file = forms.FileField()
@csrf_exempt
def staudio(request):
# This request receives an Audio BLOB file which is passed to the
# Speech to text service. The response is then forwarded to the
# classifier service.
results = {}
tts_input = ''
theData = {"error": "Error detected in REST API"}
module_dir = os.path.dirname(__file__)
if request.POST and request.FILES:
form = UploadAudioForm(request.POST, request.FILES)
# Don't bother checking the form, as it is always invalid
#if form.is_valid():
# print("Valid Form")
#else:
# print("Invalid Form")
filename = ""
classURL = ""
textscript = ""
if "fname" in request.POST:
filename = request.POST["fname"]
if "classifierurl" in request.POST:
classURL = request.POST["classifierurl"]
if "textscript" in request.POST:
textscript = request.POST["textscript"]
# Saving the file and reading it again, as this ensures that all the data has
# been received. This gives a better result from the service.
f = request.FILES['data']
if f:
file_path = os.path.join(module_dir, '../static/', filename)
with open(file_path, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
destination.close()
# Remember to switch
yy_file_path = os.path.join(module_dir, '../static/', filename)
#yy_file_path = os.path.join(module_dir, '../static/', 'yy.wav')
with open(yy_file_path, 'rb') as fj:
audiodata = fj.read()
if audiodata:
wdc = WDCService('ST')
service_creds = wdc.getCreds()
stService = wdc.stService()
if stService is None:
theData = {"error": "No Speech to Text service found"}
else:
theData = stService.processAudio(audiodata)
if "results" in theData:
if list is type(theData["results"]):
res = theData["results"][0]
if "alternatives" in res:
alt = res["alternatives"][0]
if "transcript" in alt:
question = alt["transcript"]
# theData = classifyTranscript(classURL, alt["transcript"])
data = {}
if textscript != "":
data = {"txt": textscript,
"conversation_id": request.POST["conversation_id"],
"client_id": request.POST["client_id"],
"dialog_id": request.POST["dialog_id"],
}
else:
data = {"txt": alt["transcript"],
"conversation_id": request.POST["conversation_id"],
"client_id": request.POST["client_id"],
"dialog_id": request.POST["dialog_id"],
}
res = sendDialogAPI(request.POST["category"], {'data': json.dumps(data)})
theData = res['results']
theData['category'] = request.POST["category"]
theData['question'] = question
tts_input = theData['conversationData']['response']
fj.close()
results["results"] = theData
textToSpeech(tts_input)
return HttpResponse(json.dumps(results), content_type="application/json")
@csrf_exempt
def staudio_with_nlc(request):
# This request receives an Audio BLOB file which is passed to the
# Speech to text service. The response is then forwarded to the
# classifier service.
results = {}
tts_input = ''
theData = {"error": "Error detected in REST API"}
module_dir = os.path.dirname(__file__)
if request.POST and request.FILES:
form = UploadAudioForm(request.POST, request.FILES)
# Don't bother checking the form, as it is always invalid
#if form.is_valid():
# print("Valid Form")
#else:
# print("Invalid Form")
filename = ""
classURL = ""
textscript = ""
if "fname" in request.POST:
filename = request.POST["fname"]
if "textscript" in request.POST:
textscript = request.POST["textscript"]
if "classifierurl" in request.POST:
classURL = request.POST["classifierurl"]
# Saving the file and reading it again, as this ensures that all the data has
# been received. This gives a better result from the service.
f = request.FILES['data']
if f:
file_path = os.path.join(module_dir, '../static/', filename)
with open(file_path, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
destination.close()
# Remember to switch
yy_file_path = os.path.join(module_dir, '../static/', filename)
#yy_file_path = os.path.join(module_dir, '../static/', 'yy.wav')
with open(yy_file_path, 'rb') as fj:
audiodata = fj.read()
if audiodata:
wdc = WDCService('ST')
service_creds = wdc.getCreds()
stService = wdc.stService()
if stService is None:
theData = {"error": "No Speech to Text service found"}
else:
theData = stService.processAudio(audiodata)
if "results" in theData:
if list is type(theData["results"]):
res = theData["results"][0]
if "alternatives" in res:
alt = res["alternatives"][0]
if "transcript" in alt:
question = alt["transcript"]
theData = classifyTranscript(classURL, alt["transcript"])
data = {}
if textscript != "":
data = {"txt": textscript,
"conversation_id":"",
"client_id":""
}
else:
data = {"txt": alt["transcript"],
"conversation_id":"",
"client_id":""
}
if not 'classification' in theData:
raise Exception('Classificatio failed: {}'.format(theData))
category = theData["classification"]["top_class"]
res = sendDialogAPI(category, {'data': json.dumps(data)})
theData = res['results']
theData['category'] = category
theData['question'] = question
tts_input = theData['conversationData']['response']
fj.close()
results["results"] = theData
textToSpeech(tts_input)
return HttpResponse(json.dumps(results), content_type="application/json")
@csrf_exempt
def rnr(request):
# This request receives an Audio BLOB file which is passed to the
# Speech to text service. The response is then forwarded to the
# classifier service.
results = {}
tts_input = ''
theData = {"error": "Error detected in REST API"}
if request.POST:
data = {}
data = {"q": request.POST["rnr_query"],
"rnr_user_id": request.POST["rnr_user_id"],
"rnr_passwd": request.POST["rnr_passwd"],
"wt":"json",
"ranker_id":"F3551Dx1-rank-428",
"fl":"score,id,title,body,domain",
}
results = sendRnrAPI(request.POST["rnr_user_id"], request.POST["rnr_passwd"], data)
return HttpResponse(json.dumps(results), content_type="application/json")
@csrf_exempt
def rnrSearch(request):
# This request receives an Audio BLOB file which is passed to the
# Speech to text service. The response is then forwarded to the
# classifier service.
results = {}
tts_input = ''
theData = {"error": "Error detected in REST API"}
if request.POST:
data = {}
data = {"q": request.POST["rnr_query"],
"rnr_user_id": request.POST["rnr_user_id"],
"rnr_passwd": request.POST["rnr_passwd"],
"wt":"json",
"fl":"score,id,title,body,domain",
}
results = sendRnrSearchAPI(request.POST["rnr_user_id"], request.POST["rnr_passwd"], data)
return HttpResponse(json.dumps(results), content_type="application/json")
def textToSpeech(text):
wdc = WDCService('TS')
text_to_speech = TextToSpeech(username=wdc.service.user, password=wdc.service.password)
#print text_to_speech.voices()
module_dir = os.path.dirname(__file__)
filename = 'tts.wav'
file_path = os.path.join(module_dir, '../static/', filename)
with open(file_path, 'wb+') as audio_file:
audio_file.write(text_to_speech.synthesize(text))
def sendDialogAPI(classfier, message):
print "******** sendDialogAPI classfier :"+classfier
if classfier == "Accomodations":
url = "http://jseo-proj-watson.mybluemix.net/wl/converse"
#url = "http://sc-proj-watson007.mybluemix.net/wl/converse"
elif classfier == "restaurants":
url = "http://sc-proj-watson007.mybluemix.net/wl/converse"
print 'requesting to {} for "{}"'.format(url, message)
response = requests.post( url ,
data = message,
)
try:
result = json.loads(response.text)
return result
except:
raise Exception("Error processing the request, HTTP: %d" % response.status_code)
def sendRnrAPI(rnr_user_id, rnr_passwd, params):
url = "https://f46398e8-51c3-43e5-9494-bce1a9a2f2d0:RNHJsMuwZ3ah@gateway.watsonplatform.net/retrieve-and-rank/api/v1/solr_clusters/sc0e7faf94_accd_4f76_bfd0_8b4760ea9c75/solr/korearestaurants-collection/fcselect"
response = requests.get( url ,
params = params,
)
try:
result = json.loads(response.text)
return result
except:
raise Exception("Error processing the request, HTTP: %d" % response.status_code)
def sendRnrSearchAPI(rnr_user_id, rnr_passwd, params):
url = "https://f46398e8-51c3-43e5-9494-bce1a9a2f2d0:RNHJsMuwZ3ah@gateway.watsonplatform.net/retrieve-and-rank/api/v1/solr_clusters/sc0e7faf94_accd_4f76_bfd0_8b4760ea9c75/solr/korearestaurants-collection/select"
response = requests.get( url ,
params = params,
)
try:
result = json.loads(response.text)
return result
except:
raise Exception("Error processing the request, HTTP: %d" % response.status_code)
def classifyTranscript(classURL, transcript):
# Runs classification against a transcript
# The classifier url must be passed in, as it contains the classifier id.
classifiedData = {}
classifiedData["message"] = transcript
theData = {}
wdc = WDCService('LC')
nlcService = wdc.nlcService()
if nlcService is not None:
nlcResult = nlcService.getNLClassification({"text":transcript,}, classURL)
if "error" in nlcResult:
theData = {"error": nlcResult["error"]}
else:
if "top_class" in nlcResult:
classifiedData["top_class"] = nlcResult["top_class"]
if "classes" in nlcResult:
classes = nlcResult["classes"]
for c in classes:
if classifiedData["top_class"] == c["class_name"]:
classifiedData["confidence"] = c["confidence"]
else:
theData = {"error": "Natural Language Classifier service not found"}
if "error" not in theData:
theData = {"classification" : classifiedData}
return theData
| 37.75 | 216 | 0.631275 |
acf076b4502252f4c3c07bf8216f01db5e84ad96 | 33,780 | py | Python | rebalance/rebalance.py | guibressan/plugins | 84413f716646adce6c0354b24d747cc9ac4b63b6 | [
"BSD-3-Clause"
] | 1 | 2021-01-23T08:21:52.000Z | 2021-01-23T08:21:52.000Z | rebalance/rebalance.py | guibressan/plugins | 84413f716646adce6c0354b24d747cc9ac4b63b6 | [
"BSD-3-Clause"
] | null | null | null | rebalance/rebalance.py | guibressan/plugins | 84413f716646adce6c0354b24d747cc9ac4b63b6 | [
"BSD-3-Clause"
] | 2 | 2020-05-09T18:17:28.000Z | 2020-05-09T20:45:21.000Z | #!/usr/bin/env python3
from pyln.client import Plugin, Millisatoshi, RpcError
from threading import Thread, Lock
from datetime import timedelta
import time
import uuid
plugin = Plugin()
plugin.rebalance_stop = False
def setup_routing_fees(plugin, route, msatoshi):
delay = plugin.cltv_final
for r in reversed(route):
r['msatoshi'] = msatoshi.millisatoshis
r['amount_msat'] = msatoshi
r['delay'] = delay
channels = plugin.rpc.listchannels(r['channel'])
ch = next(c for c in channels.get('channels') if c['destination'] == r['id'])
fee = Millisatoshi(ch['base_fee_millisatoshi'])
# BOLT #7 requires fee >= fee_base_msat + ( amount_to_forward * fee_proportional_millionths / 1000000 )
fee += (msatoshi * ch['fee_per_millionth'] + 10**6 - 1) // 10**6 # integer math trick to round up
msatoshi += fee
delay += ch['delay']
def get_channel(plugin, payload, peer_id, scid, check_state: bool = False):
peer = plugin.rpc.listpeers(peer_id).get('peers')[0]
channel = next(c for c in peer['channels'] if c.get('short_channel_id') == scid)
if check_state:
if channel['state'] != "CHANNELD_NORMAL":
raise RpcError('rebalance', payload, {'message': 'Channel %s not in state CHANNELD_NORMAL, but: %s' % (scid, channel['state'])})
if not peer['connected']:
raise RpcError('rebalance', payload, {'message': 'Channel %s peer is not connected.' % scid})
return channel
def amounts_from_scid(plugin, scid):
channels = plugin.rpc.listfunds().get('channels')
channel = next(c for c in channels if c.get('short_channel_id') == scid)
our_msat = Millisatoshi(channel['our_amount_msat'])
total_msat = Millisatoshi(channel['amount_msat'])
return our_msat, total_msat
def peer_from_scid(plugin, short_channel_id, my_node_id, payload):
channels = plugin.rpc.listchannels(short_channel_id).get('channels')
for ch in channels:
if ch['source'] == my_node_id:
return ch['destination']
raise RpcError("rebalance", payload, {'message': 'Cannot find peer for channel: ' + short_channel_id})
def find_worst_channel(route):
if len(route) < 4:
return None
start_idx = 2
worst = route[start_idx]
worst_val = route[start_idx - 1]['msatoshi'] - route[start_idx]['msatoshi']
for i in range(start_idx + 1, len(route) - 1):
val = route[i - 1]['msatoshi'] - route[i]['msatoshi']
if val > worst_val:
worst = route[i]
worst_val = val
return worst
def cleanup(plugin, label, payload, rpc_result, error=None):
try:
plugin.rpc.delinvoice(label, 'unpaid')
except RpcError as e:
# race condition: waitsendpay timed out, but invoice get paid
if 'status is paid' in e.error.get('message', ""):
return rpc_result
if error is not None and isinstance(error, RpcError):
# unwrap rebalance errors as 'normal' RPC result
if error.method == "rebalance":
return {"status": "exception",
"message": error.error.get('message', "error not given")}
raise error
return rpc_result
# This function calculates the optimal rebalance amount
# based on the selected channels capacity and state.
# It will return a value that brings at least one of the channels to balance.
# It will raise an error, when this isnt possible.
#
# EXAMPLE
# |------------------- out_total -------------|
# OUT -v => |-------- out_ours -------||-- out_theirs --| => +v
#
# IN +v <= |-- in_ours --||---------- in_theirs ---------| <= -v
# |--------- in_total --------------------------|
#
# CHEAP SOLUTION: take v_min from 50/50 values
# O* vo = out_ours - (out_total/2)
# I* vi = (in_total/2) - in_ours
# return min(vo, vi)
#
# ... and cover edge cases with exceeding in/out capacity or negative values.
def calc_optimal_amount(out_ours, out_total, in_ours, in_total, payload):
out_ours, out_total = int(out_ours), int(out_total)
in_ours, in_total = int(in_ours), int(in_total)
in_theirs = in_total - in_ours
vo = int(out_ours - (out_total / 2))
vi = int((in_total / 2) - in_ours)
# cases where one option can be eliminated because it exceeds other capacity
if vo > in_theirs and vi > 0 and vi < out_ours:
return Millisatoshi(vi)
if vi > out_ours and vo > 0 and vo < in_theirs:
return Millisatoshi(vo)
# cases where one channel is still capable to bring other to balance
if vo < 0 and vi > 0 and vi < out_ours:
return Millisatoshi(vi)
if vi < 0 and vo > 0 and vo < in_theirs:
return Millisatoshi(vo)
# when both options are possible take the one with least effort
if vo > 0 and vo < in_theirs and vi > 0 and vi < out_ours:
return Millisatoshi(min(vi, vo))
raise RpcError("rebalance", payload, {'message': 'rebalancing these channels will make things worse'})
class NoRouteException(Exception):
pass
def getroute_basic(plugin: Plugin, targetid, fromid, excludes, msatoshi: Millisatoshi):
try:
""" This does not make special assumptions and tries all routes
it gets. Uses less CPU and does not filter any routes.
"""
return plugin.rpc.getroute(targetid,
fromid=fromid,
exclude=excludes,
msatoshi=msatoshi,
maxhops=plugin.maxhops,
riskfactor=10, cltv=9)
except RpcError as e:
# could not find route -> change params and restart loop
if e.method == "getroute" and e.error.get('code') == 205:
raise NoRouteException
raise e
def getroute_iterative(plugin: Plugin, targetid, fromid, excludes, msatoshi: Millisatoshi):
""" This searches for 'shorter and bigger pipes' first in order
to increase likelyhood of success on short timeout.
Can be useful for manual `rebalance`.
"""
try:
return plugin.rpc.getroute(targetid,
fromid=fromid,
exclude=excludes,
msatoshi=msatoshi * plugin.msatfactoridx,
maxhops=plugin.maxhopidx,
riskfactor=10, cltv=9)
except RpcError as e:
# could not find route -> change params and restart loop
if e.method == "getroute" and e.error.get('code') == 205:
# reduce _msatfactor to look for smaller channels now
plugin.msatfactoridx -= 1
if plugin.msatfactoridx < 1:
# when we reached neutral msat factor:
# increase _maxhops and restart with msatfactor
plugin.maxhopidx += 1
plugin.msatfactoridx = plugin.msatfactor
# abort if we reached maxhop limit
if plugin.maxhops > 0 and plugin.maxhopidx > plugin.maxhops:
raise NoRouteException
raise e
def getroute_switch(method_name):
switch = {
"basic": getroute_basic,
"iterative": getroute_iterative
}
return switch.get(method_name, getroute_iterative)
@plugin.method("rebalance")
def rebalance(plugin, outgoing_scid, incoming_scid, msatoshi: Millisatoshi = None,
retry_for: int = 60, maxfeepercent: float = 0.5,
exemptfee: Millisatoshi = Millisatoshi(5000),
getroute_method=None):
"""Rebalancing channel liquidity with circular payments.
This tool helps to move some msatoshis between your channels.
"""
if msatoshi:
msatoshi = Millisatoshi(msatoshi)
retry_for = int(retry_for)
maxfeepercent = float(maxfeepercent)
if getroute_method is None:
getroute = plugin.getroute
else:
getroute = getroute_switch(getroute_method)
exemptfee = Millisatoshi(exemptfee)
payload = {
"outgoing_scid": outgoing_scid,
"incoming_scid": incoming_scid,
"msatoshi": msatoshi,
"retry_for": retry_for,
"maxfeepercent": maxfeepercent,
"exemptfee": exemptfee
}
my_node_id = plugin.rpc.getinfo().get('id')
outgoing_node_id = peer_from_scid(plugin, outgoing_scid, my_node_id, payload)
incoming_node_id = peer_from_scid(plugin, incoming_scid, my_node_id, payload)
get_channel(plugin, payload, outgoing_node_id, outgoing_scid, True)
get_channel(plugin, payload, incoming_node_id, incoming_scid, True)
out_ours, out_total = amounts_from_scid(plugin, outgoing_scid)
in_ours, in_total = amounts_from_scid(plugin, incoming_scid)
# If amount was not given, calculate a suitable 50/50 rebalance amount
if msatoshi is None:
msatoshi = calc_optimal_amount(out_ours, out_total, in_ours, in_total, payload)
plugin.log("Estimating optimal amount %s" % msatoshi)
# Check requested amounts are selected channels
if msatoshi > out_ours or msatoshi > in_total - in_ours:
raise RpcError("rebalance", payload, {'message': 'Channel capacities too low'})
plugin.log(f"starting rebalance out_scid:{outgoing_scid} in_scid:{incoming_scid} amount:{msatoshi}", 'debug')
route_out = {'id': outgoing_node_id, 'channel': outgoing_scid, 'direction': int(not my_node_id < outgoing_node_id)}
route_in = {'id': my_node_id, 'channel': incoming_scid, 'direction': int(not incoming_node_id < my_node_id)}
start_ts = int(time.time())
label = "Rebalance-" + str(uuid.uuid4())
description = "%s to %s" % (outgoing_scid, incoming_scid)
invoice = plugin.rpc.invoice(msatoshi, label, description, retry_for + 60)
payment_hash = invoice['payment_hash']
rpc_result = None
excludes = [my_node_id] # excude all own channels to prevent shortcuts
nodes = {} # here we store erring node counts
plugin.maxhopidx = 1 # start with short routes and increase
plugin.msatfactoridx = plugin.msatfactor # start with high capacity factor
# and decrease to reduce WIRE_TEMPORARY failures because of imbalances
# 'disable' maxhops filter if set to <= 0
# I know this is ugly, but we don't ruin the rest of the code this way
if plugin.maxhops <= 0:
plugin.maxhopidx = 20
# trace stats
count = 0
count_sendpay = 0
time_getroute = 0
time_sendpay = 0
try:
while int(time.time()) - start_ts < retry_for and not plugin.rebalance_stop:
count += 1
try:
time_start = time.time()
r = getroute(plugin,
targetid=incoming_node_id,
fromid=outgoing_node_id,
excludes=excludes,
msatoshi=msatoshi)
time_getroute += time.time() - time_start
except NoRouteException:
# no more chance for a successful getroute
rpc_result = {'status': 'error', 'message': 'No suitable routes found'}
return cleanup(plugin, label, payload, rpc_result)
except RpcError as e:
# getroute can be successful next time with different parameters
if e.method == "getroute" and e.error.get('code') == 205:
continue
else:
raise e
route_mid = r['route']
route = [route_out] + route_mid + [route_in]
setup_routing_fees(plugin, route, msatoshi)
fees = route[0]['amount_msat'] - msatoshi
# check fee and exclude worst channel the next time
# NOTE: the int(msat) casts are just a workaround for outdated pylightning versions
if fees > exemptfee and int(fees) > int(msatoshi) * maxfeepercent / 100:
worst_channel = find_worst_channel(route)
if worst_channel is None:
raise RpcError("rebalance", payload, {'message': 'Insufficient fee'})
excludes.append(worst_channel['channel'] + '/' + str(worst_channel['direction']))
continue
rpc_result = {"sent": msatoshi + fees, "received": msatoshi, "fee": fees, "hops": len(route),
"outgoing_scid": outgoing_scid, "incoming_scid": incoming_scid, "status": "complete",
"message": f"{msatoshi + fees} sent over {len(route)} hops to rebalance {msatoshi}"}
plugin.log("Sending %s over %d hops to rebalance %s" % (msatoshi + fees, len(route), msatoshi), 'debug')
for r in route:
plugin.log(" - %s %14s %s" % (r['id'], r['channel'], r['amount_msat']), 'debug')
time_start = time.time()
count_sendpay += 1
try:
plugin.rpc.sendpay(route, payment_hash)
running_for = int(time.time()) - start_ts
result = plugin.rpc.waitsendpay(payment_hash, max(retry_for - running_for, 0))
time_sendpay += time.time() - time_start
if result.get('status') == "complete":
rpc_result["stats"] = f"running_for:{int(time.time()) - start_ts} count_getroute:{count} time_getroute:{time_getroute} time_getroute_avg:{time_getroute / count} count_sendpay:{count_sendpay} time_sendpay:{time_sendpay} time_sendpay_avg:{time_sendpay / count_sendpay}"
return cleanup(plugin, label, payload, rpc_result)
except RpcError as e:
time_sendpay += time.time() - time_start
plugin.log(f"maxhops:{plugin.maxhopidx} msatfactor:{plugin.msatfactoridx} running_for:{int(time.time()) - start_ts} count_getroute:{count} time_getroute:{time_getroute} time_getroute_avg:{time_getroute / count} count_sendpay:{count_sendpay} time_sendpay:{time_sendpay} time_sendpay_avg:{time_sendpay / count_sendpay}", 'debug')
# plugin.log(f"RpcError: {str(e)}", 'debug')
# check if we ran into the `rpc.waitsendpay` timeout
if e.method == "waitsendpay" and e.error.get('code') == 200:
raise RpcError("rebalance", payload, {'message': 'Timeout reached'})
# check if we have problems with our own channels
erring_node = e.error.get('data', {}).get('erring_node')
erring_channel = e.error.get('data', {}).get('erring_channel')
erring_direction = e.error.get('data', {}).get('erring_direction')
if erring_channel == incoming_scid:
raise RpcError("rebalance", payload, {'message': 'Error with incoming channel'})
if erring_channel == outgoing_scid:
raise RpcError("rebalance", payload, {'message': 'Error with outgoing channel'})
# exclude other erroring channels
if erring_channel is not None and erring_direction is not None:
excludes.append(erring_channel + '/' + str(erring_direction))
# count and exclude nodes that produce a lot of errors
if erring_node and plugin.erringnodes > 0:
if nodes.get(erring_node) is None:
nodes[erring_node] = 0
nodes[erring_node] += 1
if nodes[erring_node] >= plugin.erringnodes:
excludes.append(erring_node)
except Exception as e:
return cleanup(plugin, label, payload, rpc_result, e)
rpc_result = {'status': 'error', 'message': 'Timeout reached'}
return cleanup(plugin, label, payload, rpc_result)
def a_minus_b(a: Millisatoshi, b: Millisatoshi):
# a minus b, but Millisatoshi cannot be negative
return a - b if a > b else Millisatoshi(0)
def must_send(liquidity):
# liquidity is too high, must send some sats
return a_minus_b(liquidity["min"], liquidity["their"])
def should_send(liquidity):
# liquidity is a bit high, would be good to send some sats
return a_minus_b(liquidity["ideal"]["their"], liquidity["their"])
def could_send(liquidity):
# liquidity maybe a bit low, but can send some more sats, if needed
return a_minus_b(liquidity["our"], liquidity["min"])
def must_receive(liquidity):
# liquidity is too low, must receive some sats
return a_minus_b(liquidity["min"], liquidity["our"])
def should_receive(liquidity):
# liquidity is a bit low, would be good to receive some sats
return a_minus_b(liquidity["ideal"]["our"], liquidity["our"])
def could_receive(liquidity):
# liquidity maybe a bit high, but can receive some more sats, if needed
return a_minus_b(liquidity["their"], liquidity["min"])
def get_open_channels(plugin: Plugin):
channels = []
for peer in plugin.rpc.listpeers()["peers"]:
for ch in peer["channels"]:
if ch["state"] == "CHANNELD_NORMAL" and not ch["private"]:
channels.append(ch)
return channels
def check_liquidity_threshold(channels: list, threshold: Millisatoshi):
# check if overall rebalances can be successful with this threshold
our = sum(ch["to_us_msat"] for ch in channels)
total = sum(ch["total_msat"] for ch in channels)
required = Millisatoshi(0)
for ch in channels:
required += min(threshold, ch["total_msat"] / 2)
return required < our and required < total - our
def get_enough_liquidity_threshold(channels: list):
low = Millisatoshi(0)
biggest_channel = max(channels, key=lambda ch: ch["total_msat"])
high = biggest_channel["total_msat"] / 2
while True:
mid = (low + high) / 2
if high - low < Millisatoshi("1sat"):
break
if check_liquidity_threshold(channels, mid):
low = mid
else:
high = mid
return mid / 2
def get_ideal_ratio(channels: list, enough_liquidity: Millisatoshi):
# ideal liquidity ratio for big channels:
# small channels should have a 50/50 liquidity ratio to be usable
# and big channels can store the remaining liquidity above the threshold
assert len(channels) > 0
our = sum(ch["to_us_msat"] for ch in channels)
total = sum(ch["total_msat"] for ch in channels)
chs = list(channels) # get a copy!
while len(chs) > 0:
ratio = int(our) / int(total)
smallest_channel = min(chs, key=lambda ch: ch["total_msat"])
if smallest_channel["total_msat"] * min(ratio, 1 - ratio) > enough_liquidity:
break
min_liquidity = min(smallest_channel["total_msat"] / 2, enough_liquidity)
diff = smallest_channel["total_msat"] * ratio
diff = max(diff, min_liquidity)
diff = min(diff, smallest_channel["total_msat"] - min_liquidity)
our -= diff
total -= smallest_channel["total_msat"]
chs.remove(smallest_channel)
assert 0 <= ratio and ratio <= 1
return ratio
def feeadjust_would_be_nice(plugin: Plugin):
commands = [c for c in plugin.rpc.help().get("help") if c["command"].split()[0] == "feeadjust"]
if len(commands) == 1:
msg = plugin.rpc.feeadjust()
plugin.log(f"Feeadjust succeeded: {msg}")
else:
plugin.log("The feeadjuster plugin would be useful here")
def get_max_amount(i: int, plugin: Plugin):
return max(plugin.min_amount, plugin.enough_liquidity / (4**(i + 1)))
def get_max_fee(plugin: Plugin, msat: Millisatoshi):
# TODO: sanity check
return (plugin.fee_base + msat * plugin.fee_ppm / 10**6) * plugin.feeratio
def get_chan(plugin: Plugin, scid: str):
for peer in plugin.rpc.listpeers()["peers"]:
if len(peer["channels"]) == 0:
continue
# We might have multiple channel entries ! Eg if one was just closed
# and reopened.
for chan in peer["channels"]:
if chan.get("short_channel_id") == scid:
return chan
def liquidity_info(channel, enough_liquidity: Millisatoshi, ideal_ratio: float):
liquidity = {
"our": channel["to_us_msat"],
"their": channel["total_msat"] - channel["to_us_msat"],
"min": min(enough_liquidity, channel["total_msat"] / 2),
"max": max(a_minus_b(channel["total_msat"], enough_liquidity), channel["total_msat"] / 2),
"ideal": {}
}
liquidity["ideal"]["our"] = min(max(channel["total_msat"] * ideal_ratio, liquidity["min"]), liquidity["max"])
liquidity["ideal"]["their"] = min(max(channel["total_msat"] * (1 - ideal_ratio), liquidity["min"]), liquidity["max"])
return liquidity
def wait_for(success, timeout: int = 60):
# cyclical lambda helper
# taken and modified from pyln-testing/pyln/testing/utils.py
start_time = time.time()
interval = 0.25
while not success():
time_left = start_time + timeout - time.time()
if time_left <= 0:
return False
time.sleep(min(interval, time_left))
interval *= 2
if interval > 5:
interval = 5
return True
def wait_for_htlcs(plugin, failed_channels: list, scids: list = None):
# HTLC settlement helper
# taken and modified from pyln-testing/pyln/testing/utils.py
result = True
peers = plugin.rpc.listpeers()['peers']
for p, peer in enumerate(peers):
if 'channels' in peer:
for c, channel in enumerate(peer['channels']):
if scids is not None and channel.get('short_channel_id') not in scids:
continue
if channel.get('short_channel_id') in failed_channels:
result = False
continue
if 'htlcs' in channel:
if not wait_for(lambda: len(plugin.rpc.listpeers()['peers'][p]['channels'][c]['htlcs']) == 0):
failed_channels.append(channel.get('short_channel_id'))
plugin.log(f"Timeout while waiting for htlc settlement in channel {channel.get('short_channel_id')}")
result = False
return result
def maybe_rebalance_pairs(plugin: Plugin, ch1, ch2, failed_channels: list):
scid1 = ch1["short_channel_id"]
scid2 = ch2["short_channel_id"]
result = {"success": False, "fee_spent": Millisatoshi(0)}
if scid1 + ":" + scid2 in failed_channels:
return result
# check if HTLCs are settled
if not wait_for_htlcs(plugin, failed_channels, [scid1, scid2]):
return result
i = 0
while not plugin.rebalance_stop:
liquidity1 = liquidity_info(ch1, plugin.enough_liquidity, plugin.ideal_ratio)
liquidity2 = liquidity_info(ch2, plugin.enough_liquidity, plugin.ideal_ratio)
amount1 = min(must_send(liquidity1), could_receive(liquidity2))
amount2 = min(should_send(liquidity1), should_receive(liquidity2))
amount3 = min(could_send(liquidity1), must_receive(liquidity2))
amount = max(amount1, amount2, amount3)
if amount < plugin.min_amount:
return result
amount = min(amount, get_max_amount(i, plugin))
maxfee = get_max_fee(plugin, amount)
plugin.log(f"Try to rebalance: {scid1} -> {scid2}; amount={amount}; maxfee={maxfee}")
start_ts = time.time()
try:
res = rebalance(plugin, outgoing_scid=scid1, incoming_scid=scid2,
msatoshi=amount, retry_for=1200, maxfeepercent=0,
exemptfee=maxfee)
if not res.get('status') == 'complete':
raise Exception # fall into exception handler below
except Exception:
failed_channels.append(scid1 + ":" + scid2)
# rebalance failed, let's try with a smaller amount
while (get_max_amount(i, plugin) >= amount and
get_max_amount(i, plugin) != get_max_amount(i + 1, plugin)):
i += 1
if amount > get_max_amount(i, plugin):
continue
return result
result["success"] = True
result["fee_spent"] += res["fee"]
htlc_start_ts = time.time()
# wait for settlement
htlc_success = wait_for_htlcs(plugin, failed_channels, [scid1, scid2])
current_ts = time.time()
res["elapsed_time"] = str(timedelta(seconds=current_ts - start_ts))[:-3]
res["htlc_time"] = str(timedelta(seconds=current_ts - htlc_start_ts))[:-3]
plugin.log(f"Rebalance succeeded: {res}")
if not htlc_success:
return result
ch1 = get_chan(plugin, scid1)
assert ch1 is not None
ch2 = get_chan(plugin, scid2)
assert ch2 is not None
return result
def maybe_rebalance_once(plugin: Plugin, failed_channels: list):
channels = get_open_channels(plugin)
for ch1 in channels:
for ch2 in channels:
if ch1 == ch2:
continue
result = maybe_rebalance_pairs(plugin, ch1, ch2, failed_channels)
if result["success"] or plugin.rebalance_stop:
return result
return {"success": False, "fee_spent": Millisatoshi(0)}
def feeadjuster_toggle(plugin: Plugin, new_value: bool):
commands = [c for c in plugin.rpc.help().get("help") if c["command"].split()[0] == "feeadjuster-toggle"]
if len(commands) == 1:
msg = plugin.rpc.feeadjuster_toggle(new_value)
return msg["forward_event_subscription"]["previous"]
else:
return True
def rebalanceall_thread(plugin: Plugin):
if not plugin.mutex.acquire(blocking=False):
return
try:
start_ts = time.time()
feeadjuster_state = feeadjuster_toggle(plugin, False)
channels = get_open_channels(plugin)
plugin.enough_liquidity = get_enough_liquidity_threshold(channels)
plugin.ideal_ratio = get_ideal_ratio(channels, plugin.enough_liquidity)
plugin.log(f"Automatic rebalance is running with enough liquidity threshold: {plugin.enough_liquidity}, "
f"ideal liquidity ratio: {plugin.ideal_ratio * 100:.2f}%, "
f"min rebalancable amount: {plugin.min_amount}, "
f"feeratio: {plugin.feeratio}")
failed_channels = []
success = 0
fee_spent = Millisatoshi(0)
while not plugin.rebalance_stop:
result = maybe_rebalance_once(plugin, failed_channels)
if not result["success"]:
break
success += 1
fee_spent += result["fee_spent"]
feeadjust_would_be_nice(plugin)
feeadjuster_toggle(plugin, feeadjuster_state)
elapsed_time = timedelta(seconds=time.time() - start_ts)
plugin.rebalanceall_msg = f"Automatic rebalance finished: {success} successful rebalance, {fee_spent} fee spent, it took {str(elapsed_time)[:-3]}"
plugin.log(plugin.rebalanceall_msg)
finally:
plugin.mutex.release()
@plugin.method("rebalanceall")
def rebalanceall(plugin: Plugin, min_amount: Millisatoshi = Millisatoshi("50000sat"), feeratio: float = 0.5):
"""Rebalance all unbalanced channels if possible for a very low fee.
Default minimum rebalancable amount is 50000sat. Default feeratio = 0.5, half of our node's default fee.
To be economical, it tries to fix the liquidity cheaper than it can be ruined by transaction forwards.
It may run for a long time (hours) in the background, but can be stopped with the rebalancestop method.
"""
# some early checks before we start the async thread
if plugin.mutex.locked():
return {"message": "Rebalance is already running, this may take a while. To stop it use the cli method 'rebalancestop'."}
channels = get_open_channels(plugin)
if len(channels) <= 1:
return {"message": "Error: Not enough open channels to rebalance anything"}
our = sum(ch["to_us_msat"] for ch in channels)
total = sum(ch["total_msat"] for ch in channels)
min_amount = Millisatoshi(min_amount)
if total - our < min_amount or our < min_amount:
return {"message": "Error: Not enough liquidity to rebalance anything"}
# param parsing ensure correct type
plugin.feeratio = float(feeratio)
plugin.min_amount = min_amount
# run the job
t = Thread(target=rebalanceall_thread, args=(plugin, ))
t.start()
return {"message": f"Rebalance started with min rebalancable amount: {plugin.min_amount}, feeratio: {plugin.feeratio}"}
@plugin.method("rebalancestop")
def rebalancestop(plugin: Plugin):
"""It stops the ongoing rebalanceall.
"""
if not plugin.mutex.locked():
if plugin.rebalanceall_msg is None:
return {"message": "No rebalance is running, nothing to stop."}
return {"message": f"No rebalance is running, nothing to stop. "
f"Last 'rebalanceall' gave: {plugin.rebalanceall_msg}"}
plugin.rebalance_stop = True
plugin.mutex.acquire(blocking=True)
plugin.rebalance_stop = False
plugin.mutex.release()
return {"message": plugin.rebalanceall_msg}
def health_score(liquidity):
if int(liquidity["ideal"]["our"]) == 0 or int(liquidity["ideal"]["their"]) == 0 or int(liquidity["min"]) == 0:
return 0
score_our = int(liquidity["our"]) / int(liquidity["ideal"]["our"])
score_their = int(liquidity["their"]) / int(liquidity["ideal"]["their"])
# distance from ideal liquidity (between 50 and 100)
score = min(score_our, score_their) * 50 + 50
coefficient_our = int(liquidity["our"]) / int(liquidity["min"])
coefficient_their = int(liquidity["their"]) / int(liquidity["min"])
# distance from minimal liquidity as a coefficient (between 0 and 1)
coefficient = min(coefficient_our, coefficient_their, 1)
return score * coefficient
@plugin.method("rebalancereport")
def rebalancereport(plugin: Plugin):
"""Show information about rebalance
"""
res = {}
res["rebalanceall_is_running"] = plugin.mutex.locked()
res["getroute_method"] = plugin.getroute.__name__
res["maxhops_threshold"] = plugin.maxhops
res["msatfactor"] = plugin.msatfactor
res["erringnodes_threshold"] = plugin.erringnodes
channels = get_open_channels(plugin)
health_percent = 0.0
if len(channels) > 1:
enough_liquidity = get_enough_liquidity_threshold(channels)
ideal_ratio = get_ideal_ratio(channels, enough_liquidity)
res["enough_liquidity_threshold"] = enough_liquidity
res["ideal_liquidity_ratio"] = f"{ideal_ratio * 100:.2f}%"
for ch in channels:
liquidity = liquidity_info(ch, enough_liquidity, ideal_ratio)
health_percent += health_score(liquidity) * int(ch["total_msat"])
health_percent /= int(sum(ch["total_msat"] for ch in channels))
else:
res["enough_liquidity_threshold"] = Millisatoshi(0)
res["ideal_liquidity_ratio"] = "0%"
res["liquidity_health"] = f"{health_percent:.2f}%"
invoices = plugin.rpc.listinvoices()['invoices']
rebalances = [i for i in invoices if i.get('status') == 'paid' and i.get('label').startswith("Rebalance")]
total_fee = Millisatoshi(0)
total_amount = Millisatoshi(0)
res["total_successful_rebalances"] = len(rebalances)
for r in rebalances:
try:
pay = plugin.rpc.listpays(r["bolt11"])["pays"][0]
total_amount += pay["amount_msat"]
total_fee += pay["amount_sent_msat"] - pay["amount_msat"]
except Exception:
res["total_successful_rebalances"] -= 1
res["total_rebalanced_amount"] = total_amount
res["total_rebalance_fee"] = total_fee
if total_amount > Millisatoshi(0):
res["average_rebalance_fee_ppm"] = round(total_fee / total_amount * 10**6, 2)
else:
res["average_rebalance_fee_ppm"] = 0
return res
@plugin.init()
def init(options, configuration, plugin):
config = plugin.rpc.listconfigs()
plugin.cltv_final = config.get("cltv-final")
plugin.fee_base = Millisatoshi(config.get("fee-base"))
plugin.fee_ppm = config.get("fee-per-satoshi")
plugin.mutex = Lock()
plugin.maxhops = int(options.get("rebalance-maxhops"))
plugin.msatfactor = float(options.get("rebalance-msatfactor"))
plugin.erringnodes = int(options.get("rebalance-erringnodes"))
plugin.getroute = getroute_switch(options.get("rebalance-getroute"))
plugin.rebalanceall_msg = None
plugin.log(f"Plugin rebalance initialized with {plugin.fee_base} base / {plugin.fee_ppm} ppm fee "
f"cltv_final:{plugin.cltv_final} "
f"maxhops:{plugin.maxhops} "
f"msatfactor:{plugin.msatfactor} "
f"erringnodes:{plugin.erringnodes} "
f"getroute: {plugin.getroute.__name__}")
plugin.add_option(
"rebalance-getroute",
"iterative",
"Getroute method for route search can be 'basic' or 'iterative'."
"'basic': Tries all routes sequentially. "
"'iterative': Tries shorter and bigger routes first.",
"string"
)
plugin.add_option(
"rebalance-maxhops",
"5",
"Maximum number of hops for `getroute` call. Set to 0 to disable. "
"Note: Two hops are added for own nodes input and output channel. "
"Note: Routes with a 8 or more hops have less than 3% success rate.",
"string"
)
plugin.add_option(
"rebalance-msatfactor",
"4",
"Will instruct `getroute` call to use higher requested capacity first. "
"Note: This will decrease to 1 when no routes can be found.",
"string"
)
plugin.add_option(
"rebalance-erringnodes",
"5",
"Exclude nodes from routing that raised N or more errors. "
"Note: Use 0 to disable.",
"string"
)
plugin.run()
| 42.977099 | 351 | 0.632741 |
acf07800cf9a097b1409e60366263bb0d1d50f23 | 551 | py | Python | armulator/armv6/opcodes/arm_instruction_set/arm_data_processing_and_miscellaneous_instructions/arm_data_processing_register/rrx_a1.py | matan1008/armulator | 04d24dcec6ab42326018f5e09331e5b4738d6b52 | [
"MIT"
] | 16 | 2018-01-22T14:36:49.000Z | 2021-12-17T15:39:52.000Z | armulator/armv6/opcodes/arm_instruction_set/arm_data_processing_and_miscellaneous_instructions/arm_data_processing_register/rrx_a1.py | AhmedMounir/armulator | 04d24dcec6ab42326018f5e09331e5b4738d6b52 | [
"MIT"
] | 3 | 2019-02-19T17:51:47.000Z | 2022-03-31T20:45:21.000Z | armulator/armv6/opcodes/arm_instruction_set/arm_data_processing_and_miscellaneous_instructions/arm_data_processing_register/rrx_a1.py | AhmedMounir/armulator | 04d24dcec6ab42326018f5e09331e5b4738d6b52 | [
"MIT"
] | 4 | 2020-06-18T23:51:03.000Z | 2022-02-09T17:43:13.000Z | from armulator.armv6.opcodes.abstract_opcodes.rrx import Rrx
from armulator.armv6.opcodes.opcode import Opcode
class RrxA1(Rrx, Opcode):
def __init__(self, instruction, setflags, m, d):
Opcode.__init__(self, instruction)
Rrx.__init__(self, setflags, m, d)
def is_pc_changing_opcode(self):
return self.d == 15
@staticmethod
def from_bitarray(instr, processor):
rm = instr[-4:]
rd = instr[16:20]
s = instr[11]
return RrxA1(instr, **{"setflags": s, "m": rm.uint, "d": rd.uint})
| 29 | 74 | 0.642468 |
acf0787961951c2af1a55cf3ce17f752510f47aa | 14,290 | py | Python | two1/wallet/hd_account.py | febuiles/two1-python | 88704487dba7715f97a0980781d4c0efb2ea7fc4 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2016-09-10T18:14:33.000Z | 2016-09-10T18:14:33.000Z | two1/wallet/hd_account.py | febuiles/two1-python | 88704487dba7715f97a0980781d4c0efb2ea7fc4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | two1/wallet/hd_account.py | febuiles/two1-python | 88704487dba7715f97a0980781d4c0efb2ea7fc4 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2020-11-04T05:01:17.000Z | 2020-11-04T05:01:17.000Z | import time
from two1.bitcoin.crypto import HDKey, HDPrivateKey, HDPublicKey
from two1.wallet.wallet_txn import WalletTransaction
class HDAccount(object):
""" An implementation of a single HD account to be used in an HD
wallet.
This class handles key generation/management for both internal
(change) and external (payout) purposes. If provided with only
a public key, it is only useful for public key
generation/management. If a private key is provided instead,
private keys can be generated for signing (spending) purposes.
Transaction signing capability is NOT provided by this class.
This is a conscious design decision as the wallet is better
suited to signing & spending as there may be situations
requiring spending coins from multiple accounts in a single
transaction.
This relies on a data provider that derives from
TransactionDataProvider, which provides transaction data and
balance information for provided addresses.
Args:
hd_key (HDKey): Either a HDPrivateKey (enables private key
generation) or HDPublicKey which is the root of this account.
name (str): Name of this account
index (int): Child index of this account relative to the parent.
data_provider (BaseProvider): A compatible data provider.
testnet (bool): Whether or not this account will be used on testnet.
"""
PAYOUT_CHAIN = 0
CHANGE_CHAIN = 1
GAP_LIMIT = 20
DISCOVERY_INCREMENT = GAP_LIMIT
MAX_UPDATE_THRESHOLD = 30 # seconds
def __init__(self, hd_key, name, index, data_provider, cache_manager,
testnet=False, last_state=None, skip_discovery=False):
# Take in either public or private key for this account as we
# can derive everything from it.
if not isinstance(hd_key, HDKey):
raise TypeError("hd_key must be a HDKey object")
self.key = hd_key
self.name = name
self.index = index
self.data_provider = data_provider
self.testnet = testnet
self.last_indices = [-1, -1]
self._cache_manager = cache_manager
self._last_update = 0
self._last_full_update = 0
if last_state is not None and isinstance(last_state, dict):
if "last_payout_index" in last_state:
self.last_indices[self.PAYOUT_CHAIN] = last_state["last_payout_index"]
if "last_change_index" in last_state:
self.last_indices[self.CHANGE_CHAIN] = last_state["last_change_index"]
# Check to see that the address cache has up to last_indices
for change in [self.PAYOUT_CHAIN, self.CHANGE_CHAIN]:
k = self._cache_manager.get_chain_indices(self.index, change)
for i in range(self.last_indices[change] + 1):
if i not in k or k[i] != i:
self.last_indices[change] = -1
break
self._chain_priv_keys = [None, None]
self._chain_pub_keys = [None, None]
for change in [0, 1]:
if isinstance(self.key, HDPrivateKey):
self._chain_priv_keys[change] = HDPrivateKey.from_parent(self.key, change)
self._chain_pub_keys[change] = self._chain_priv_keys[change].public_key
else:
self._chain_pub_keys[change] = HDPublicKey.from_parent(self.key, change)
if not skip_discovery:
self._sync_txns(check_all=True)
self._update_balance()
def _sync_txns(self, max_index=0, check_all=False):
now = time.time()
if now - self._last_full_update > 20 * 60:
check_all = True
for change in [0, 1]:
found_last = False
current_last = self.last_indices[change]
addr_range = 0
while not found_last:
# Try a 2 * GAP_LIMIT at a go
end = addr_range + self.DISCOVERY_INCREMENT
addresses = {i: self.get_address(change, i)
for i in range(addr_range, end)}
if self.data_provider.can_limit_by_height:
min_block = None if check_all else self._cache_manager.last_block
txns = self.data_provider.get_transactions(
list(addresses.values()),
limit=10000,
min_block=min_block)
else:
txns = self.data_provider.get_transactions(
list(addresses.values()),
limit=10000)
inserted_txns = set()
for i in sorted(addresses.keys()):
addr = addresses[i]
self._cache_manager.insert_address(self.index, change, i, addr)
addr_has_txns = self._cache_manager.address_has_txns(addr)
if not addr_has_txns or addr not in txns or \
not bool(txns[addr]):
if i - current_last >= self.GAP_LIMIT:
found_last = True
break
if txns[addr]:
current_last = i
for t in txns[addr]:
txid = str(t['transaction'].hash)
if txid not in inserted_txns:
wt = WalletTransaction.from_transaction(
t['transaction'])
wt.block = t['metadata']['block']
wt.block_hash = t['metadata']['block_hash']
wt.confirmations = t['metadata']['confirmations']
if 'network_time' in t['metadata']:
wt.network_time = t['metadata']['network_time']
self._cache_manager.insert_txn(wt)
inserted_txns.add(txid)
if addr_has_txns:
current_last = i
addr_range += self.DISCOVERY_INCREMENT
self.last_indices[change] = current_last
self._last_update = time.time()
if check_all:
self._last_full_update = self._last_update
def _update_balance(self):
balance = {'confirmed': 0, 'total': 0}
self._address_balances = {}
for unconfirmed in [True, False]:
addr_balances = self._cache_manager.get_balances(
addresses=self.all_used_addresses,
include_unconfirmed=unconfirmed)
key = 'total' if unconfirmed else 'confirmed'
for k, v in addr_balances.items():
if k not in self._address_balances:
self._address_balances[k] = {'confirmed': 0, 'total': 0}
self._address_balances[k][key] = v
balance[key] += v
self._balance_cache = balance
def has_txns(self):
""" Returns whether or not there are any discovered transactions
associated with any address in the account.
Returns:
bool: True if there are discovered transactions, False otherwise.
"""
return self._cache_manager.has_txns(self.index)
def find_addresses(self, addresses):
""" Searches both the change and payout chains up to self.GAP_LIMIT
addresses beyond the last known index for the chain.
Args:
addresses (list(str)): List of Base58Check encoded addresses
Returns:
dict:
Dictionary keyed by address where the value is a tuple
containing the chain (0 or 1) and child index in the chain.
Only found addresses are included in the dict.
"""
found = {}
for change in [0, 1]:
for i in range(self.last_indices[change] + self.GAP_LIMIT + 1):
addr = self.get_address(change, i)
if addr in addresses:
found[addr] = (self.index, change, i)
return found
def get_public_key(self, change, n=-1):
""" Returns a public key in the chain
Args:
change (bool): If True, returns an address for change purposes,
otherwise returns an address for payment.
n (int): index of address in chain. If n == -1, a new key
is created with index = self.last_[change|payout]_index + 1
Returns:
HDPublicKey: A public key in this account's chain.
"""
# We only use public key derivation per BIP44
c = int(change)
k = self._chain_pub_keys[c]
if n < 0:
self.last_indices[c] += 1
i = self.last_indices[c]
pub_key = HDPublicKey.from_parent(k, i)
addr = pub_key.address(True, self.testnet)
self._cache_manager.insert_address(self.index, change, i, addr)
else:
pub_key = HDPublicKey.from_parent(k, n)
return pub_key
def get_private_key(self, change, n):
""" Returns a private key in the chain for use in signing messages
or transactions.
Args:
change (bool): If True, returns an address for change purposes,
otherwise returns an address for payment.
n (int): index of address in chain.
Returns:
HDPrivateKey: A private key in this account's chain.
"""
# We only use public key derivation per BIP44
k = self._chain_priv_keys[change]
if k is None:
raise ValueError("No private key provided for account.")
return HDPrivateKey.from_parent(k, n)
def get_address(self, change, n=-1):
""" Returns a public address
Args:
change (bool): If True, returns an address for change purposes,
otherwise returns an address for payment.
n (int): index of address in chain. If n == -1, a new key
is created with index = self.last_[change|payout]_index + 1
Returns:
str: A bitcoin address
"""
# If this is an address we've already generated, don't regenerate.
c = int(change)
cached = self._cache_manager.get_address(self.index, c, n)
if cached is not None:
return cached
# Always do compressed keys
return self.get_public_key(change, n).address(True, self.testnet)
def _new_key_or_address(self, change, key=False):
c = int(change)
last_index = self.last_indices[c]
# Check to see if the current address has any txns
# associated with it before giving out a new one.
ret = None
need_new = False
if last_index >= 0:
current_addr = self._cache_manager.get_address(self.index, c, last_index)
need_new = self._cache_manager.address_has_txns(current_addr)
else:
need_new = True
if need_new:
ret = self.get_public_key(change) if key else self.get_address(change, last_index + 1)
else:
ret = self.get_public_key(change, last_index) if key else current_addr
return ret
def get_next_address(self, change):
""" Returns the next public address in the specified chain.
A new address is only returned if there are transactions found
for the current address.
Args:
change (bool): If True, returns an address for change purposes,
otherwise returns an address for payment.
Returns:
str: A bitcoin address
"""
return self._new_key_or_address(change)
def get_next_public_key(self, change):
""" Returns the next public key in the specified chain.
A new key is only returned if there are transactions found
for the current key.
Args:
change (bool): If True, returns a PublicKey for change purposes,
otherwise returns a PublicKey for payment.
Returns:
PublicKey: A public key
"""
return self._new_key_or_address(change, True)
def get_utxos(self, include_unconfirmed=False):
""" Gets all unspent transactions associated with all addresses
up to and including the last known indices for both change
and payout chains.
"""
return self._cache_manager.get_utxos(addresses=self.all_used_addresses,
include_unconfirmed=include_unconfirmed)
def to_dict(self):
""" Returns a JSON-serializable dict to save account data
Returns:
dict: Dict that can be serialized into a JSON string
"""
if isinstance(self.key, HDPublicKey):
pub_key = self.key
else:
pub_key = self.key.public_key
return {"public_key": pub_key.to_b58check(self.testnet),
"last_payout_index": self.last_indices[self.PAYOUT_CHAIN],
"last_change_index": self.last_indices[self.CHANGE_CHAIN]}
def balances_by_address(self):
""" Returns a dict with balances for each used
address in the account
Returns:
dict: key/values are addresses and current balance
"""
return self._address_balances
@property
def balance(self):
""" Returns balances, both confirmed and total, for this
account.
Returns:
dict:
'confirmed' and 'total' keys with balance values in
satoshis for each. The total balance includes
unconfirmed transactions.
"""
self._update_balance()
return self._balance_cache
@property
def all_used_addresses(self):
""" List of all used addresses
Returns:
list(str): list of all used addresses (Base58Check encoded)
"""
all_addresses = []
for change in [self.PAYOUT_CHAIN, self.CHANGE_CHAIN]:
last = self.last_indices[change]
all_addresses += [self.get_address(change, i)
for i in range(last + 1)]
return all_addresses
| 37.904509 | 98 | 0.585934 |
acf07886ea8d5760e0792afc4d6ffc26a8e7aaed | 10,726 | py | Python | intersight/model/kubernetes_trusted_registries_policy_all_of.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 5 | 2021-12-16T15:13:32.000Z | 2022-03-29T16:09:54.000Z | intersight/model/kubernetes_trusted_registries_policy_all_of.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 4 | 2022-01-25T19:05:51.000Z | 2022-03-29T20:18:37.000Z | intersight/model/kubernetes_trusted_registries_policy_all_of.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 2 | 2020-07-07T15:01:08.000Z | 2022-01-31T04:27:35.000Z | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.kubernetes_cluster_profile_relationship import KubernetesClusterProfileRelationship
from intersight.model.organization_organization_relationship import OrganizationOrganizationRelationship
globals()['KubernetesClusterProfileRelationship'] = KubernetesClusterProfileRelationship
globals()['OrganizationOrganizationRelationship'] = OrganizationOrganizationRelationship
class KubernetesTrustedRegistriesPolicyAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'KUBERNETES.TRUSTEDREGISTRIESPOLICY': "kubernetes.TrustedRegistriesPolicy",
},
('object_type',): {
'KUBERNETES.TRUSTEDREGISTRIESPOLICY': "kubernetes.TrustedRegistriesPolicy",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'root_ca_registries': ([str], none_type,), # noqa: E501
'unsigned_registries': ([str], none_type,), # noqa: E501
'cluster_profiles': ([KubernetesClusterProfileRelationship], none_type,), # noqa: E501
'organization': (OrganizationOrganizationRelationship,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'root_ca_registries': 'RootCaRegistries', # noqa: E501
'unsigned_registries': 'UnsignedRegistries', # noqa: E501
'cluster_profiles': 'ClusterProfiles', # noqa: E501
'organization': 'Organization', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""KubernetesTrustedRegistriesPolicyAllOf - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "kubernetes.TrustedRegistriesPolicy", must be one of ["kubernetes.TrustedRegistriesPolicy", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "kubernetes.TrustedRegistriesPolicy", must be one of ["kubernetes.TrustedRegistriesPolicy", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
root_ca_registries ([str], none_type): [optional] # noqa: E501
unsigned_registries ([str], none_type): [optional] # noqa: E501
cluster_profiles ([KubernetesClusterProfileRelationship], none_type): An array of relationships to kubernetesClusterProfile resources.. [optional] # noqa: E501
organization (OrganizationOrganizationRelationship): [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "kubernetes.TrustedRegistriesPolicy")
object_type = kwargs.get('object_type', "kubernetes.TrustedRegistriesPolicy")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.class_id = class_id
self.object_type = object_type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 53.09901 | 1,678 | 0.653739 |
acf07896f67d78a89d672d7b20d7f574ca09eb9c | 9,536 | py | Python | atom/nucleus/python/nucleus_api/models/chat_info.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/nucleus_api/models/chat_info.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/nucleus_api/models/chat_info.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Hydrogen Atom API
The Hydrogen Atom API # noqa: E501
OpenAPI spec version: 1.7.0
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ChatInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'assigned_to': 'str',
'chat_log': 'str',
'comments': 'str',
'create_date': 'datetime',
'id': 'str',
'is_notification': 'bool',
'is_open': 'bool',
'metadata': 'dict(str, str)',
'secondary_id': 'str',
'update_date': 'datetime'
}
attribute_map = {
'assigned_to': 'assigned_to',
'chat_log': 'chat_log',
'comments': 'comments',
'create_date': 'create_date',
'id': 'id',
'is_notification': 'is_notification',
'is_open': 'is_open',
'metadata': 'metadata',
'secondary_id': 'secondary_id',
'update_date': 'update_date'
}
def __init__(self, assigned_to=None, chat_log=None, comments=None, create_date=None, id=None, is_notification=None, is_open=None, metadata=None, secondary_id=None, update_date=None): # noqa: E501
"""ChatInfo - a model defined in Swagger""" # noqa: E501
self._assigned_to = None
self._chat_log = None
self._comments = None
self._create_date = None
self._id = None
self._is_notification = None
self._is_open = None
self._metadata = None
self._secondary_id = None
self._update_date = None
self.discriminator = None
if assigned_to is not None:
self.assigned_to = assigned_to
self.chat_log = chat_log
if comments is not None:
self.comments = comments
if create_date is not None:
self.create_date = create_date
if id is not None:
self.id = id
if is_notification is not None:
self.is_notification = is_notification
if is_open is not None:
self.is_open = is_open
if metadata is not None:
self.metadata = metadata
if secondary_id is not None:
self.secondary_id = secondary_id
if update_date is not None:
self.update_date = update_date
@property
def assigned_to(self):
"""Gets the assigned_to of this ChatInfo. # noqa: E501
assignedTo # noqa: E501
:return: The assigned_to of this ChatInfo. # noqa: E501
:rtype: str
"""
return self._assigned_to
@assigned_to.setter
def assigned_to(self, assigned_to):
"""Sets the assigned_to of this ChatInfo.
assignedTo # noqa: E501
:param assigned_to: The assigned_to of this ChatInfo. # noqa: E501
:type: str
"""
self._assigned_to = assigned_to
@property
def chat_log(self):
"""Gets the chat_log of this ChatInfo. # noqa: E501
chatInfo # noqa: E501
:return: The chat_log of this ChatInfo. # noqa: E501
:rtype: str
"""
return self._chat_log
@chat_log.setter
def chat_log(self, chat_log):
"""Sets the chat_log of this ChatInfo.
chatInfo # noqa: E501
:param chat_log: The chat_log of this ChatInfo. # noqa: E501
:type: str
"""
if chat_log is None:
raise ValueError("Invalid value for `chat_log`, must not be `None`") # noqa: E501
self._chat_log = chat_log
@property
def comments(self):
"""Gets the comments of this ChatInfo. # noqa: E501
comments # noqa: E501
:return: The comments of this ChatInfo. # noqa: E501
:rtype: str
"""
return self._comments
@comments.setter
def comments(self, comments):
"""Sets the comments of this ChatInfo.
comments # noqa: E501
:param comments: The comments of this ChatInfo. # noqa: E501
:type: str
"""
self._comments = comments
@property
def create_date(self):
"""Gets the create_date of this ChatInfo. # noqa: E501
:return: The create_date of this ChatInfo. # noqa: E501
:rtype: datetime
"""
return self._create_date
@create_date.setter
def create_date(self, create_date):
"""Sets the create_date of this ChatInfo.
:param create_date: The create_date of this ChatInfo. # noqa: E501
:type: datetime
"""
self._create_date = create_date
@property
def id(self):
"""Gets the id of this ChatInfo. # noqa: E501
:return: The id of this ChatInfo. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ChatInfo.
:param id: The id of this ChatInfo. # noqa: E501
:type: str
"""
self._id = id
@property
def is_notification(self):
"""Gets the is_notification of this ChatInfo. # noqa: E501
isNotification # noqa: E501
:return: The is_notification of this ChatInfo. # noqa: E501
:rtype: bool
"""
return self._is_notification
@is_notification.setter
def is_notification(self, is_notification):
"""Sets the is_notification of this ChatInfo.
isNotification # noqa: E501
:param is_notification: The is_notification of this ChatInfo. # noqa: E501
:type: bool
"""
self._is_notification = is_notification
@property
def is_open(self):
"""Gets the is_open of this ChatInfo. # noqa: E501
isOpen # noqa: E501
:return: The is_open of this ChatInfo. # noqa: E501
:rtype: bool
"""
return self._is_open
@is_open.setter
def is_open(self, is_open):
"""Sets the is_open of this ChatInfo.
isOpen # noqa: E501
:param is_open: The is_open of this ChatInfo. # noqa: E501
:type: bool
"""
self._is_open = is_open
@property
def metadata(self):
"""Gets the metadata of this ChatInfo. # noqa: E501
:return: The metadata of this ChatInfo. # noqa: E501
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this ChatInfo.
:param metadata: The metadata of this ChatInfo. # noqa: E501
:type: dict(str, str)
"""
self._metadata = metadata
@property
def secondary_id(self):
"""Gets the secondary_id of this ChatInfo. # noqa: E501
:return: The secondary_id of this ChatInfo. # noqa: E501
:rtype: str
"""
return self._secondary_id
@secondary_id.setter
def secondary_id(self, secondary_id):
"""Sets the secondary_id of this ChatInfo.
:param secondary_id: The secondary_id of this ChatInfo. # noqa: E501
:type: str
"""
self._secondary_id = secondary_id
@property
def update_date(self):
"""Gets the update_date of this ChatInfo. # noqa: E501
:return: The update_date of this ChatInfo. # noqa: E501
:rtype: datetime
"""
return self._update_date
@update_date.setter
def update_date(self, update_date):
"""Sets the update_date of this ChatInfo.
:param update_date: The update_date of this ChatInfo. # noqa: E501
:type: datetime
"""
self._update_date = update_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ChatInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ChatInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.415512 | 200 | 0.572253 |
acf078a4bcfa9b6b01e0d7f3448a4bbb96ae3344 | 1,868 | py | Python | tacker/vnfm/policy_actions/autoscaling/autoscaling.py | mail2nsrajesh/tacker | dce6690659836c2885f1cf8227c19be234f8fe25 | [
"Apache-2.0"
] | 1 | 2020-10-13T05:20:38.000Z | 2020-10-13T05:20:38.000Z | tacker/vnfm/policy_actions/autoscaling/autoscaling.py | mail2nsrajesh/tacker | dce6690659836c2885f1cf8227c19be234f8fe25 | [
"Apache-2.0"
] | null | null | null | tacker/vnfm/policy_actions/autoscaling/autoscaling.py | mail2nsrajesh/tacker | dce6690659836c2885f1cf8227c19be234f8fe25 | [
"Apache-2.0"
] | 1 | 2019-01-21T10:57:10.000Z | 2019-01-21T10:57:10.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from oslo_utils import timeutils
from tacker.db.common_services import common_services_db_plugin
from tacker.plugins.common import constants
from tacker.vnfm.policy_actions import abstract_action
LOG = logging.getLogger(__name__)
def _log_monitor_events(context, vnf_dict, evt_details):
_cos_db_plg = common_services_db_plugin.CommonServicesPluginDb()
_cos_db_plg.create_event(context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF,
res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_MONITOR,
tstamp=timeutils.utcnow(),
details=evt_details)
class VNFActionAutoscaling(abstract_action.AbstractPolicyAction):
def get_type(self):
return 'autoscaling'
def get_name(self):
return 'autoscaling'
def get_description(self):
return 'Tacker VNF auto-scaling policy'
def execute_action(self, plugin, context, vnf_dict, args):
vnf_id = vnf_dict['id']
_log_monitor_events(context,
vnf_dict,
"ActionAutoscalingHeat invoked")
plugin.create_vnf_scale(context, vnf_id, args)
| 36.627451 | 78 | 0.676124 |
acf079125b88f43c4f2361c20a57c3461c3ea211 | 1,977 | py | Python | chroma_core/models/action_queue.py | beevans/integrated-manager-for-lustre | 6b7e49b8a58058e6139ad815a4388f21a581dfa0 | [
"MIT"
] | null | null | null | chroma_core/models/action_queue.py | beevans/integrated-manager-for-lustre | 6b7e49b8a58058e6139ad815a4388f21a581dfa0 | [
"MIT"
] | null | null | null | chroma_core/models/action_queue.py | beevans/integrated-manager-for-lustre | 6b7e49b8a58058e6139ad815a4388f21a581dfa0 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
from django.db import models
from django.db.models import CASCADE
from django.contrib.postgres.fields import ArrayField, JSONField
from chroma_core.lib.job import DependOn, DependAll, Step, job_log
from chroma_core.models import ManagedFilesystem
class LustreFidField(models.Field):
description = "A Lustre FID"
seq = models.BigIntegerField()
oid = models.IntegerField()
ver = models.IntegerField()
def db_type(self, connection):
return "lustre_fid"
class Meta:
app_label = "chroma_core"
class Mailboxes(models.Model):
""" List of deliveries for action queues """
class Meta:
app_label = "chroma_core"
filesystem = models.ForeignKey("ManagedFilesystem", on_delete=CASCADE)
name = models.CharField(max_length=128)
start = models.DateTimeField()
finish = models.DateTimeField()
state = models.CharField(max_length=16)
fids_total = models.BigIntegerField(default=0)
fids_completed = models.BigIntegerField(default=0)
fids_failed = models.BigIntegerField(default=0)
data_transfered = models.BigIntegerField(default=0)
keep_failed = models.BooleanField(default=True, null=False)
# Actually links to ActionType
actions = ArrayField(models.PositiveIntegerField())
args = JSONField(default={})
class FidActionQueue(models.Model):
# Use of abstract base classes to avoid django bug #12002
class Meta:
app_label = "chroma_core"
fid = LustreFidField()
mailbox = models.ForeignKey("Mailboxes", on_delete=CASCADE)
entries = JSONField(default={})
failed = models.PositiveSmallIntegerField()
class ActionType(models.Model):
class Meta:
app_label = "chroma_core"
ordering = ["id"]
unique_together = ("name",)
name = models.CharField(max_length=64)
| 27.082192 | 74 | 0.713708 |
acf079238c5c9ad138099c4e51ebc94fb1e09a56 | 51 | py | Python | 00001_____Challenges/02_HackerRank/03_In Python/05_Loops/loops.py | Magdyedwar1996/Data-Structures-Algorithms | 11b1823374089dfc86611b65719348d012834872 | [
"MIT"
] | null | null | null | 00001_____Challenges/02_HackerRank/03_In Python/05_Loops/loops.py | Magdyedwar1996/Data-Structures-Algorithms | 11b1823374089dfc86611b65719348d012834872 | [
"MIT"
] | null | null | null | 00001_____Challenges/02_HackerRank/03_In Python/05_Loops/loops.py | Magdyedwar1996/Data-Structures-Algorithms | 11b1823374089dfc86611b65719348d012834872 | [
"MIT"
] | null | null | null | a = int(input())
for i in range(a):
print(i**2) | 17 | 18 | 0.54902 |
acf0793cd3192418bf995ee68f91aa8af2ad29f0 | 576 | py | Python | huobi/model/account/account_asset_valuation.py | xujunhuii/huobi_Python | 958df8b22ce774329c7e15a1ecf2f52eea5f6af8 | [
"Apache-2.0"
] | null | null | null | huobi/model/account/account_asset_valuation.py | xujunhuii/huobi_Python | 958df8b22ce774329c7e15a1ecf2f52eea5f6af8 | [
"Apache-2.0"
] | null | null | null | huobi/model/account/account_asset_valuation.py | xujunhuii/huobi_Python | 958df8b22ce774329c7e15a1ecf2f52eea5f6af8 | [
"Apache-2.0"
] | null | null | null | class AccountAssetValuationResult:
"""
The account information for spot account, margin account etc.
:member
balance: balance valuation bases on given valuation currency.
timestamp: unix timestamp from server.
"""
def __init__(self):
self.balance = ""
self.timestamp = 0
def print_object(self, format_data=""):
from huobi.utils.print_mix_object import PrintBasic
PrintBasic.print_basic(self.balance, format_data + "balance")
PrintBasic.print_basic(self.timestamp, format_data + "timestamp")
| 28.8 | 73 | 0.682292 |
acf07bd6a0a09c6718ce21b9890de7e458c2f027 | 10,602 | py | Python | Lib/site-packages/astroid/brain/brain_http.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/astroid/brain/brain_http.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/astroid/brain/brain_http.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | # Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt
"""Astroid brain hints for some of the `http` module."""
import textwrap
from astroid.brain.helpers import register_module_extender
from astroid.builder import AstroidBuilder
from astroid.manager import AstroidManager
def _http_transform():
code = textwrap.dedent(
"""
from collections import namedtuple
_HTTPStatus = namedtuple('_HTTPStatus', 'value phrase description')
class HTTPStatus:
@property
def phrase(self):
return ""
@property
def value(self):
return 0
@property
def description(self):
return ""
# informational
CONTINUE = _HTTPStatus(100, 'Continue', 'Request received, please continue')
SWITCHING_PROTOCOLS = _HTTPStatus(101, 'Switching Protocols',
'Switching to new protocol; obey Upgrade header')
PROCESSING = _HTTPStatus(102, 'Processing', '')
OK = _HTTPStatus(200, 'OK', 'Request fulfilled, document follows')
CREATED = _HTTPStatus(201, 'Created', 'Document created, URL follows')
ACCEPTED = _HTTPStatus(202, 'Accepted',
'Request accepted, processing continues off-line')
NON_AUTHORITATIVE_INFORMATION = _HTTPStatus(203,
'Non-Authoritative Information', 'Request fulfilled from cache')
NO_CONTENT = _HTTPStatus(204, 'No Content', 'Request fulfilled, nothing follows')
RESET_CONTENT =_HTTPStatus(205, 'Reset Content', 'Clear input form for further input')
PARTIAL_CONTENT = _HTTPStatus(206, 'Partial Content', 'Partial content follows')
MULTI_STATUS = _HTTPStatus(207, 'Multi-Status', '')
ALREADY_REPORTED = _HTTPStatus(208, 'Already Reported', '')
IM_USED = _HTTPStatus(226, 'IM Used', '')
MULTIPLE_CHOICES = _HTTPStatus(300, 'Multiple Choices',
'Object has several resources -- see URI list')
MOVED_PERMANENTLY = _HTTPStatus(301, 'Moved Permanently',
'Object moved permanently -- see URI list')
FOUND = _HTTPStatus(302, 'Found', 'Object moved temporarily -- see URI list')
SEE_OTHER = _HTTPStatus(303, 'See Other', 'Object moved -- see Method and URL list')
NOT_MODIFIED = _HTTPStatus(304, 'Not Modified',
'Document has not changed since given time')
USE_PROXY = _HTTPStatus(305, 'Use Proxy',
'You must use proxy specified in Location to access this resource')
TEMPORARY_REDIRECT = _HTTPStatus(307, 'Temporary Redirect',
'Object moved temporarily -- see URI list')
PERMANENT_REDIRECT = _HTTPStatus(308, 'Permanent Redirect',
'Object moved permanently -- see URI list')
BAD_REQUEST = _HTTPStatus(400, 'Bad Request',
'Bad request syntax or unsupported method')
UNAUTHORIZED = _HTTPStatus(401, 'Unauthorized',
'No permission -- see authorization schemes')
PAYMENT_REQUIRED = _HTTPStatus(402, 'Payment Required',
'No payment -- see charging schemes')
FORBIDDEN = _HTTPStatus(403, 'Forbidden',
'Request forbidden -- authorization will not help')
NOT_FOUND = _HTTPStatus(404, 'Not Found',
'Nothing matches the given URI')
METHOD_NOT_ALLOWED = _HTTPStatus(405, 'Method Not Allowed',
'Specified method is invalid for this resource')
NOT_ACCEPTABLE = _HTTPStatus(406, 'Not Acceptable',
'URI not available in preferred format')
PROXY_AUTHENTICATION_REQUIRED = _HTTPStatus(407,
'Proxy Authentication Required',
'You must authenticate with this proxy before proceeding')
REQUEST_TIMEOUT = _HTTPStatus(408, 'Request Timeout',
'Request timed out; try again later')
CONFLICT = _HTTPStatus(409, 'Conflict', 'Request conflict')
GONE = _HTTPStatus(410, 'Gone',
'URI no longer exists and has been permanently removed')
LENGTH_REQUIRED = _HTTPStatus(411, 'Length Required',
'Client must specify Content-Length')
PRECONDITION_FAILED = _HTTPStatus(412, 'Precondition Failed',
'Precondition in headers is false')
REQUEST_ENTITY_TOO_LARGE = _HTTPStatus(413, 'Request Entity Too Large',
'Entity is too large')
REQUEST_URI_TOO_LONG = _HTTPStatus(414, 'Request-URI Too Long',
'URI is too long')
UNSUPPORTED_MEDIA_TYPE = _HTTPStatus(415, 'Unsupported Media Type',
'Entity body in unsupported format')
REQUESTED_RANGE_NOT_SATISFIABLE = _HTTPStatus(416,
'Requested Range Not Satisfiable',
'Cannot satisfy request range')
EXPECTATION_FAILED = _HTTPStatus(417, 'Expectation Failed',
'Expect condition could not be satisfied')
MISDIRECTED_REQUEST = _HTTPStatus(421, 'Misdirected Request',
'Server is not able to produce a response')
UNPROCESSABLE_ENTITY = _HTTPStatus(422, 'Unprocessable Entity')
LOCKED = _HTTPStatus(423, 'Locked')
FAILED_DEPENDENCY = _HTTPStatus(424, 'Failed Dependency')
UPGRADE_REQUIRED = _HTTPStatus(426, 'Upgrade Required')
PRECONDITION_REQUIRED = _HTTPStatus(428, 'Precondition Required',
'The origin server requires the request to be conditional')
TOO_MANY_REQUESTS = _HTTPStatus(429, 'Too Many Requests',
'The user has sent too many requests in '
'a given amount of time ("rate limiting")')
REQUEST_HEADER_FIELDS_TOO_LARGE = _HTTPStatus(431,
'Request Header Fields Too Large',
'The server is unwilling to process the request because its header '
'fields are too large')
UNAVAILABLE_FOR_LEGAL_REASONS = _HTTPStatus(451,
'Unavailable For Legal Reasons',
'The server is denying access to the '
'resource as a consequence of a legal demand')
INTERNAL_SERVER_ERROR = _HTTPStatus(500, 'Internal Server Error',
'Server got itself in trouble')
NOT_IMPLEMENTED = _HTTPStatus(501, 'Not Implemented',
'Server does not support this operation')
BAD_GATEWAY = _HTTPStatus(502, 'Bad Gateway',
'Invalid responses from another server/proxy')
SERVICE_UNAVAILABLE = _HTTPStatus(503, 'Service Unavailable',
'The server cannot process the request due to a high load')
GATEWAY_TIMEOUT = _HTTPStatus(504, 'Gateway Timeout',
'The gateway server did not receive a timely response')
HTTP_VERSION_NOT_SUPPORTED = _HTTPStatus(505, 'HTTP Version Not Supported',
'Cannot fulfill request')
VARIANT_ALSO_NEGOTIATES = _HTTPStatus(506, 'Variant Also Negotiates')
INSUFFICIENT_STORAGE = _HTTPStatus(507, 'Insufficient Storage')
LOOP_DETECTED = _HTTPStatus(508, 'Loop Detected')
NOT_EXTENDED = _HTTPStatus(510, 'Not Extended')
NETWORK_AUTHENTICATION_REQUIRED = _HTTPStatus(511,
'Network Authentication Required',
'The client needs to authenticate to gain network access')
"""
)
return AstroidBuilder(AstroidManager()).string_build(code)
def _http_client_transform():
return AstroidBuilder(AstroidManager()).string_build(
textwrap.dedent(
"""
from http import HTTPStatus
CONTINUE = HTTPStatus.CONTINUE
SWITCHING_PROTOCOLS = HTTPStatus.SWITCHING_PROTOCOLS
PROCESSING = HTTPStatus.PROCESSING
OK = HTTPStatus.OK
CREATED = HTTPStatus.CREATED
ACCEPTED = HTTPStatus.ACCEPTED
NON_AUTHORITATIVE_INFORMATION = HTTPStatus.NON_AUTHORITATIVE_INFORMATION
NO_CONTENT = HTTPStatus.NO_CONTENT
RESET_CONTENT = HTTPStatus.RESET_CONTENT
PARTIAL_CONTENT = HTTPStatus.PARTIAL_CONTENT
MULTI_STATUS = HTTPStatus.MULTI_STATUS
ALREADY_REPORTED = HTTPStatus.ALREADY_REPORTED
IM_USED = HTTPStatus.IM_USED
MULTIPLE_CHOICES = HTTPStatus.MULTIPLE_CHOICES
MOVED_PERMANENTLY = HTTPStatus.MOVED_PERMANENTLY
FOUND = HTTPStatus.FOUND
SEE_OTHER = HTTPStatus.SEE_OTHER
NOT_MODIFIED = HTTPStatus.NOT_MODIFIED
USE_PROXY = HTTPStatus.USE_PROXY
TEMPORARY_REDIRECT = HTTPStatus.TEMPORARY_REDIRECT
PERMANENT_REDIRECT = HTTPStatus.PERMANENT_REDIRECT
BAD_REQUEST = HTTPStatus.BAD_REQUEST
UNAUTHORIZED = HTTPStatus.UNAUTHORIZED
PAYMENT_REQUIRED = HTTPStatus.PAYMENT_REQUIRED
FORBIDDEN = HTTPStatus.FORBIDDEN
NOT_FOUND = HTTPStatus.NOT_FOUND
METHOD_NOT_ALLOWED = HTTPStatus.METHOD_NOT_ALLOWED
NOT_ACCEPTABLE = HTTPStatus.NOT_ACCEPTABLE
PROXY_AUTHENTICATION_REQUIRED = HTTPStatus.PROXY_AUTHENTICATION_REQUIRED
REQUEST_TIMEOUT = HTTPStatus.REQUEST_TIMEOUT
CONFLICT = HTTPStatus.CONFLICT
GONE = HTTPStatus.GONE
LENGTH_REQUIRED = HTTPStatus.LENGTH_REQUIRED
PRECONDITION_FAILED = HTTPStatus.PRECONDITION_FAILED
REQUEST_ENTITY_TOO_LARGE = HTTPStatus.REQUEST_ENTITY_TOO_LARGE
REQUEST_URI_TOO_LONG = HTTPStatus.REQUEST_URI_TOO_LONG
UNSUPPORTED_MEDIA_TYPE = HTTPStatus.UNSUPPORTED_MEDIA_TYPE
REQUESTED_RANGE_NOT_SATISFIABLE = HTTPStatus.REQUESTED_RANGE_NOT_SATISFIABLE
EXPECTATION_FAILED = HTTPStatus.EXPECTATION_FAILED
UNPROCESSABLE_ENTITY = HTTPStatus.UNPROCESSABLE_ENTITY
LOCKED = HTTPStatus.LOCKED
FAILED_DEPENDENCY = HTTPStatus.FAILED_DEPENDENCY
UPGRADE_REQUIRED = HTTPStatus.UPGRADE_REQUIRED
PRECONDITION_REQUIRED = HTTPStatus.PRECONDITION_REQUIRED
TOO_MANY_REQUESTS = HTTPStatus.TOO_MANY_REQUESTS
REQUEST_HEADER_FIELDS_TOO_LARGE = HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE
INTERNAL_SERVER_ERROR = HTTPStatus.INTERNAL_SERVER_ERROR
NOT_IMPLEMENTED = HTTPStatus.NOT_IMPLEMENTED
BAD_GATEWAY = HTTPStatus.BAD_GATEWAY
SERVICE_UNAVAILABLE = HTTPStatus.SERVICE_UNAVAILABLE
GATEWAY_TIMEOUT = HTTPStatus.GATEWAY_TIMEOUT
HTTP_VERSION_NOT_SUPPORTED = HTTPStatus.HTTP_VERSION_NOT_SUPPORTED
VARIANT_ALSO_NEGOTIATES = HTTPStatus.VARIANT_ALSO_NEGOTIATES
INSUFFICIENT_STORAGE = HTTPStatus.INSUFFICIENT_STORAGE
LOOP_DETECTED = HTTPStatus.LOOP_DETECTED
NOT_EXTENDED = HTTPStatus.NOT_EXTENDED
NETWORK_AUTHENTICATION_REQUIRED = HTTPStatus.NETWORK_AUTHENTICATION_REQUIRED
"""
)
)
register_module_extender(AstroidManager(), "http", _http_transform)
register_module_extender(AstroidManager(), "http.client", _http_client_transform)
| 50.009434 | 94 | 0.705433 |
acf07cbaaba8d5ebd9424cf9b9fe0f0a570d2223 | 20,603 | py | Python | test/functional/feature_pruning.py | hiphopcoin24/hiphopcoin24 | 09b780546ba9e28b452a8641863aafa90def40d1 | [
"MIT"
] | null | null | null | test/functional/feature_pruning.py | hiphopcoin24/hiphopcoin24 | 09b780546ba9e28b452a8641863aafa90def40d1 | [
"MIT"
] | null | null | null | test/functional/feature_pruning.py | hiphopcoin24/hiphopcoin24 | 09b780546ba9e28b452a8641863aafa90def40d1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Hiphopcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the pruning code.
WARNING:
This test uses 4GB of disk space.
This test takes 30 mins or more (up to 2 hours)
"""
import os
from test_framework.blocktools import create_coinbase
from test_framework.messages import CBlock, ToHex
from test_framework.script import CScript, OP_RETURN, OP_NOP
from test_framework.test_framework import HiphopcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time.
TIMESTAMP_WINDOW = 2 * 60 * 60
def mine_large_blocks(node, n):
# Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN
# followed by 950k of OP_NOP. This would be non-standard in a non-coinbase
# transaction but is consensus valid.
# Set the nTime if this is the first time this function has been called.
# A static variable ensures that time is monotonicly increasing and is therefore
# different for each block created => blockhash is unique.
if "nTimes" not in mine_large_blocks.__dict__:
mine_large_blocks.nTime = 0
# Get the block parameters for the first block
big_script = CScript([OP_RETURN] + [OP_NOP] * 950000)
best_block = node.getblock(node.getbestblockhash())
height = int(best_block["height"]) + 1
mine_large_blocks.nTime = max(mine_large_blocks.nTime, int(best_block["time"])) + 1
previousblockhash = int(best_block["hash"], 16)
for _ in range(n):
# Build the coinbase transaction (with large scriptPubKey)
coinbase_tx = create_coinbase(height)
coinbase_tx.vin[0].nSequence = 2 ** 32 - 1
coinbase_tx.vout[0].scriptPubKey = big_script
coinbase_tx.rehash()
# Build the block
block = CBlock()
block.nVersion = best_block["version"]
block.hashPrevBlock = previousblockhash
block.nTime = mine_large_blocks.nTime
block.nBits = int('207fffff', 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Submit to the node
node.submitblock(ToHex(block))
previousblockhash = block.sha256
height += 1
mine_large_blocks.nTime += 1
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.)
class PruneTest(HiphopcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
self.supports_cli = False
# Create nodes 0 and 1 to mine.
# Create node 2 to test pruning.
self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5"]
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
# Create nodes 5 to test wallet in prune mode, but do not connect
self.extra_args = [
self.full_node_default_args,
self.full_node_default_args,
["-maxreceivebuffer=20000", "-prune=550"],
["-maxreceivebuffer=20000"],
["-maxreceivebuffer=20000"],
["-prune=550"],
]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
self.prunedir = os.path.join(self.nodes[2].datadir, self.chain, 'blocks', '')
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.connect_nodes(0, 2)
self.connect_nodes(0, 3)
self.connect_nodes(0, 4)
self.sync_blocks(self.nodes[0:5])
def setup_nodes(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
self.sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
mine_large_blocks(self.nodes[0], 645)
self.sync_blocks(self.nodes[0:5])
def test_height_min(self):
assert os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early"
self.log.info("Success")
self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir))
self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
mine_large_blocks(self.nodes[0], 25)
# Wait for blk00000.dat to be pruned
self.wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)
self.log.info("Success")
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
assert_greater_than(550, usage)
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for _ in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
self.disconnect_nodes(0, 1)
self.disconnect_nodes(0, 2)
# Mine 24 blocks in node 1
mine_large_blocks(self.nodes[1], 24)
# Reorg back with 25 block chain from node 0
mine_large_blocks(self.nodes[0], 25)
# Create connections in the order so both nodes can see the reorg at the same time
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
self.sync_blocks(self.nodes[0:3])
self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
height = self.nodes[1].getblockcount()
self.log.info("Current block height: %d" % height)
self.forkheight = height - 287
self.forkhash = self.nodes[1].getblockhash(self.forkheight)
self.log.info("Invalidating block %s at height %d" % (self.forkhash, self.forkheight))
self.nodes[1].invalidateblock(self.forkhash)
# We've now switched to our previously mined-24 block fork on node 1, but that's not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(self.forkheight - 1)
curhash = self.nodes[1].getblockhash(self.forkheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(self.forkheight - 1)
assert self.nodes[1].getblockcount() == self.forkheight - 1
self.log.info("New best height: %d" % self.nodes[1].getblockcount())
# Disconnect node1 and generate the new chain
self.disconnect_nodes(0, 1)
self.disconnect_nodes(1, 2)
self.log.info("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
self.log.info("Reconnect nodes")
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.sync_blocks(self.nodes[0:3], timeout=120)
self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount())
self.log.info("Usage possibly still high because of stale blocks in block files: %d" % calc_usage(self.prunedir))
self.log.info("Mine 220 more large blocks so we have requisite history")
mine_large_blocks(self.nodes[0], 220)
self.sync_blocks(self.nodes[0:3], timeout=120)
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
assert_greater_than(550, usage)
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
with self.nodes[2].assert_debug_log(expected_msgs=['block verification stopping at height', '(pruning, no data)']):
self.nodes[2].verifychain(checklevel=4, nblocks=0)
self.log.info("Will need to redownload block %d" % self.forkheight)
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large blocks are in the block files after it,
# it is expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer that's on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
assert_equal(self.nodes[0].getblockcount(), self.mainchainheight)
assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
# Wait for Node 2 to reorg to proper height
self.wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900)
assert_equal(self.nodes[2].getbestblockhash(), goalbesthash)
# Verify we can now have the data for a block previously pruned
assert_equal(self.nodes[2].getblock(self.forkhash)["height"], self.forkheight)
def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
self.start_node(node_number)
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500)
# now re-start in manual pruning mode
self.restart_node(node_number, extra_args=["-prune=1"])
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
def height(index):
if use_timestamp:
return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW
else:
return index
def prune(index):
ret = node.pruneblockchain(height=height(index))
assert_equal(ret, node.getblockchaininfo()['pruneheight'])
def has_block(index):
return os.path.isfile(os.path.join(self.nodes[node_number].datadir, self.chain, "blocks", "blk{:05}.dat".format(index)))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# Save block transaction count before pruning, assert value
block1_details = node.getblock(node.getblockhash(1))
assert_equal(block1_details["nTx"], len(block1_details["tx"]))
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
node.generate(6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
# Pruned block should still know the number of transactions
assert_equal(node.getblockheader(node.getblockhash(1))["nTx"], block1_details["nTx"])
# negative heights should raise an exception
assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10)
# height=100 too low to prune first block file so this is a no-op
prune(100)
assert has_block(0), "blk00000.dat is missing when should still be there"
# Does nothing
node.pruneblockchain(height(0))
assert has_block(0), "blk00000.dat is missing when should still be there"
# height=500 should prune first file
prune(500)
assert not has_block(0), "blk00000.dat is still there, should be pruned by now"
assert has_block(1), "blk00001.dat is missing when should still be there"
# height=650 should prune second file
prune(650)
assert not has_block(1), "blk00001.dat is still there, should be pruned by now"
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000)
assert has_block(2), "blk00002.dat is still there, should be pruned by now"
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
node.generate(288)
prune(1000)
assert not has_block(2), "blk00002.dat is still there, should be pruned by now"
assert not has_block(3), "blk00003.dat is still there, should be pruned by now"
# stop node, start back up with auto-prune at 550 MiB, make sure still runs
self.restart_node(node_number, extra_args=["-prune=550"])
self.log.info("Success")
def wallet_test(self):
# check that the pruning node's wallet is still in good shape
self.log.info("Stop and start pruning node to trigger wallet rescan")
self.restart_node(2, extra_args=["-prune=550"])
self.log.info("Success")
# check that wallet loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
self.log.info("Syncing node 5 to test wallet")
self.connect_nodes(0, 5)
nds = [self.nodes[0], self.nodes[5]]
self.sync_blocks(nds, wait=5, timeout=300)
self.restart_node(5, extra_args=["-prune=550"]) # restart to trigger rescan
self.log.info("Success")
def run_test(self):
self.log.info("Warning! This test requires 4GB of disk space")
self.log.info("Mining a big blockchain of 995 blocks")
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
# stop manual-pruning node with 995 blocks
self.stop_node(3)
self.stop_node(4)
self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() # 1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
self.log.info("Check that we can survive a 288 block reorg still")
self.reorg_test() # (1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
self.log.info("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False)
self.log.info("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
self.log.info("Test wallet re-scan")
self.wallet_test()
self.log.info("Done")
if __name__ == '__main__':
PruneTest().main()
| 45.182018 | 140 | 0.612435 |
acf07d081be26422da160d25c329ab65e2e9370e | 1,107 | py | Python | apps/ndvi_anomaly/apps.py | pinkerltm/datacube-ui | 325d404a994d49c23922e7de10c7ab244b78500b | [
"Apache-2.0"
] | 1 | 2019-07-22T05:24:40.000Z | 2019-07-22T05:24:40.000Z | apps/ndvi_anomaly/apps.py | SivaramakrishnanKN/NE-GeoCloud | affcae49e0ccd7d29360a2771a9517147ed56590 | [
"Apache-2.0"
] | 1 | 2019-06-06T18:31:29.000Z | 2019-06-06T18:31:29.000Z | apps/ndvi_anomaly/apps.py | SivaramakrishnanKN/NE-GeoCloud | affcae49e0ccd7d29360a2771a9517147ed56590 | [
"Apache-2.0"
] | 5 | 2019-06-05T07:26:13.000Z | 2019-06-08T06:53:11.000Z | # Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.apps import AppConfig
class NdviAnomalyToolConfig(AppConfig):
name = 'ndvi_anomaly'
| 41 | 78 | 0.771454 |
acf07d7091164b076d9267829a2e6e623a053949 | 59,177 | py | Python | venv/Lib/site-packages/pandas/tests/io/json/test_pandas.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | 1 | 2021-02-06T21:00:00.000Z | 2021-02-06T21:00:00.000Z | venv/Lib/site-packages/pandas/tests/io/json/test_pandas.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/io/json/test_pandas.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null | from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=["D", "C", "B", "A"])
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(scope="function", autouse=True)
def setup(self, datapath):
self.dirpath = datapath("io", "json", "data")
self.ts = tm.makeTimeSeries()
self.ts.name = "ts"
self.series = tm.makeStringSeries()
self.series.name = "series"
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = "objects"
self.empty_series = Series([], index=[], dtype=np.float64)
self.empty_frame = DataFrame()
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.dirpath
del self.ts
del self.series
del self.objSeries
del self.empty_series
del self.empty_frame
del self.frame
del self.frame2
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self):
assert self.frame.to_json() == self.frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype):
data = self.frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.frame.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy):
data = self.empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=values, index=index)
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = df.copy()
expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize(
"data,msg,orient",
[
('{"key":b:a:d}', "Expected object or value", "columns"),
# too few indices
(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)",
"split",
),
# too many columns
(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
"3 columns passed, passed data had 2 columns",
"split",
),
# bad key
(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"unexpected key\(s\): badkey",
"split",
),
],
)
def test_frame_from_json_bad_data_raises(self, data, msg, orient):
with pytest.raises(ValueError, match=msg):
read_json(StringIO(data), orient=orient)
@pytest.mark.parametrize("dtype", [True, False])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype):
num_df = DataFrame([[1, 2], [4, 5, 6]])
result = read_json(
num_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
assert np.isnan(result.iloc[0, 2])
obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
result = read_json(
obj_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
if not dtype: # TODO: Special case for object data; maybe a bug?
assert result.iloc[0, 2] is None
else:
assert np.isnan(result.iloc[0, 2])
@pytest.mark.parametrize("inf", [np.inf, np.NINF])
@pytest.mark.parametrize("dtype", [True, False])
def test_frame_infinity(self, orient, inf, dtype):
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = inf
result = read_json(df.to_json(), dtype=dtype)
assert np.isnan(result.iloc[0, 2])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
@pytest.mark.parametrize(
"value,precision,expected_val",
[
(0.95, 1, 1.0),
(1.95, 1, 2.0),
(-1.95, 1, -2.0),
(0.995, 2, 1.0),
(0.9995, 3, 1.0),
(0.99999999999999944, 15, 1.0),
],
)
def test_frame_to_json_float_precision(self, value, precision, expected_val):
df = pd.DataFrame([dict(a_float=value)])
encoded = df.to_json(double_precision=precision)
assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=["jim", "joe"])
assert not df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
# GH 7445
result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns")
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=["jim", "joe"])
df["joe"] = df["joe"].astype("i8")
assert df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
def test_frame_mixedtype_orient(self): # GH10289
vals = [
[10, 1, "foo", 0.1, 0.01],
[20, 2, "bar", 0.2, 0.02],
[30, 3, "baz", 0.3, 0.03],
[40, 4, "qux", 0.4, 0.04],
]
df = DataFrame(
vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
)
assert df._is_mixed_type
right = df.copy()
for orient in ["split", "index", "columns"]:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
tm.assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient="records")
left = read_json(inp, orient="records", convert_axes=False)
tm.assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient="values")
left = read_json(inp, orient="values", convert_axes=False)
tm.assert_frame_equal(left, right)
def test_v12_compat(self):
df = DataFrame(
[
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478],
],
columns=["A", "B", "C", "D"],
index=pd.date_range("2000-01-03", "2000-01-07"),
)
df["date"] = pd.Timestamp("19920106 18:21:32.12")
df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101")
df["modified"] = df["date"]
df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
v12_json = os.path.join(self.dirpath, "tsframe_v012.json")
df_unser = pd.read_json(v12_json)
tm.assert_frame_equal(df, df_unser)
df_iso = df.drop(["modified"], axis=1)
v12_iso_json = os.path.join(self.dirpath, "tsframe_iso_v012.json")
df_unser_iso = pd.read_json(v12_iso_json)
tm.assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range("20000101", periods=10, freq="H")
df_mixed = DataFrame(
OrderedDict(
float_1=[
-0.92077639,
0.77434435,
1.25234727,
0.61485564,
-0.60316077,
0.24653374,
0.28668979,
-2.51969012,
0.95748401,
-1.02970536,
],
int_1=[
19680418,
75337055,
99973684,
65103179,
79373900,
40314334,
21290235,
4991321,
41903419,
16008365,
],
str_1=[
"78c608f1",
"64a99743",
"13d2ff52",
"ca7f4af2",
"97236474",
"bde7e214",
"1a6bde47",
"b1190be5",
"7a669144",
"8d64d068",
],
float_2=[
-0.0428278,
-1.80872357,
3.36042349,
-0.7573685,
-0.48217572,
0.86229683,
1.08935819,
0.93898739,
-0.03030452,
1.43366348,
],
str_2=[
"14f04af9",
"d085da90",
"4bcfac83",
"81504caf",
"2ffef4a9",
"08e2f5c4",
"07e1af03",
"addbd4a7",
"1f6a09ba",
"4bfc4d87",
],
int_2=[
86967717,
98098830,
51927505,
20372254,
12601730,
20884027,
34193846,
10561746,
24867120,
76131025,
],
),
index=index,
)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype("unicode")
df_roundtrip = pd.read_json(df_mixed.to_json(orient="split"), orient="split")
tm.assert_frame_equal(
df_mixed,
df_roundtrip,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
by_blocks=True,
check_exact=True,
)
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
class BinaryThing:
def __init__(self, hexed):
self.hexed = hexed
self.binary = bytes.fromhex(hexed)
def __str__(self) -> str:
return self.hexed
hexed = "574b4454ba8c5eb4f98a8f45"
binthing = BinaryThing(hexed)
# verify the proper conversion of printable content
df_printable = DataFrame({"A": [binthing.hexed]})
assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({"A": [binthing]})
msg = "Unsupported UTF-8 sequence length when encoding string"
with pytest.raises(OverflowError, match=msg):
df_nonprintable.to_json()
# the same with multiple columns threw segfaults
df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
with pytest.raises(OverflowError):
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
result = df_nonprintable.to_json(default_handler=str)
expected = f'{{"A":{{"0":"{hexed}"}}}}'
assert result == expected
assert (
df_mixed.to_json(default_handler=str)
== f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'
)
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
result = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()
expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'
assert result == expected
def test_series_non_unique_index(self):
s = Series(["a", "b"], index=[1, 1])
msg = "Series index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="index")
tm.assert_series_equal(
s, read_json(s.to_json(orient="split"), orient="split", typ="series")
)
unser = read_json(s.to_json(orient="records"), orient="records", typ="series")
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_default_orient(self):
assert self.series.to_json() == self.series.to_json(orient="index")
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_simple(self, orient, numpy):
data = self.series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = self.series.copy()
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [False, None])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_object(self, orient, numpy, dtype):
data = self.objSeries.to_json(orient=orient)
result = pd.read_json(
data, typ="series", orient=orient, numpy=numpy, dtype=dtype
)
expected = self.objSeries.copy()
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_empty(self, orient, numpy):
data = self.empty_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = self.empty_series.copy()
# TODO: see what causes inconsistency
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
else:
expected.index = expected.index.astype(float)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_timeseries(self, orient, numpy):
data = self.ts.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = self.ts.copy()
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.float64, np.int])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_numeric(self, orient, numpy, dtype):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
data = s.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = s.copy()
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
tm.assert_series_equal(result, expected)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", precise_float=True)
tm.assert_series_equal(result, s, check_index_type=False)
def test_series_with_dtype(self):
# GH 21986
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", dtype=np.int64)
expected = Series([4] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype,expected",
[
(True, Series(["2000-01-01"], dtype="datetime64[ns]")),
(False, Series([946684800000])),
],
)
def test_series_with_dtype_datetime(self, dtype, expected):
s = Series(["2000-01-01"], dtype="datetime64[ns]")
data = s.to_json()
result = pd.read_json(data, typ="series", dtype=dtype)
tm.assert_series_equal(result, expected)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
tm.assert_frame_equal(
result, df, check_index_type=False, check_column_type=False
)
def test_typ(self):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
result = read_json(s.to_json(), typ=None)
tm.assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
def test_path(self):
with tm.ensure_clean("test.json") as path:
for df in [
self.frame,
self.frame2,
self.intframe,
self.tsframe,
self.mixed_frame,
]:
df.to_json(path)
read_json(path)
def test_axis_dates(self):
# frame
json = self.tsframe.to_json()
result = read_json(json)
tm.assert_frame_equal(result, self.tsframe)
# series
json = self.ts.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, self.ts, check_names=False)
assert result.name is None
def test_convert_dates(self):
# frame
df = self.tsframe.copy()
df["date"] = Timestamp("20130101")
json = df.to_json()
result = read_json(json)
tm.assert_frame_equal(result, df)
df["foo"] = 1.0
json = df.to_json(date_unit="ns")
result = read_json(json, convert_dates=False)
expected = df.copy()
expected["date"] = expected["date"].values.view("i8")
expected["foo"] = expected["foo"].astype("int64")
tm.assert_frame_equal(result, expected)
# series
ts = Series(Timestamp("20130101"), index=self.ts.index)
json = ts.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, ts)
@pytest.mark.parametrize("date_format", ["epoch", "iso"])
@pytest.mark.parametrize("as_object", [True, False])
@pytest.mark.parametrize(
"date_typ", [datetime.date, datetime.datetime, pd.Timestamp]
)
def test_date_index_and_values(self, date_format, as_object, date_typ):
data = [date_typ(year=2020, month=1, day=1), pd.NaT]
if as_object:
data.append("a")
ser = pd.Series(data, index=data)
result = ser.to_json(date_format=date_format)
if date_format == "epoch":
expected = '{"1577836800000":1577836800000,"null":null}'
else:
expected = (
'{"2020-01-01T00:00:00.000Z":"2020-01-01T00:00:00.000Z","null":null}'
)
if as_object:
expected = expected.replace("}", ',"a":"a"}')
assert result == expected
@pytest.mark.parametrize(
"infer_word",
[
"trade_time",
"date",
"datetime",
"sold_at",
"modified",
"timestamp",
"timestamps",
],
)
def test_convert_dates_infer(self, infer_word):
# GH10747
from pandas.io.json import dumps
data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
expected = DataFrame(
[[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
)
result = read_json(dumps(data))[["id", infer_word]]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_frame(self, date, date_unit):
df = self.tsframe.copy()
df["date"] = Timestamp(date)
df.iloc[1, df.columns.get_loc("date")] = pd.NaT
df.iloc[5, df.columns.get_loc("date")] = pd.NaT
if date_unit:
json = df.to_json(date_format="iso", date_unit=date_unit)
else:
json = df.to_json(date_format="iso")
result = read_json(json)
expected = df.copy()
expected.index = expected.index.tz_localize("UTC")
expected["date"] = expected["date"].dt.tz_localize("UTC")
tm.assert_frame_equal(result, expected)
def test_date_format_frame_raises(self):
df = self.tsframe.copy()
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
df.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_series(self, date, date_unit):
ts = Series(Timestamp(date), index=self.ts.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format="iso", date_unit=date_unit)
else:
json = ts.to_json(date_format="iso")
result = read_json(json, typ="series")
expected = ts.copy()
expected.index = expected.index.tz_localize("UTC")
expected = expected.dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
def test_date_format_series_raises(self):
ts = Series(Timestamp("20130101 20:43:42.123"), index=self.ts.index)
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ts.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
def test_date_unit(self, unit):
df = self.tsframe.copy()
df["date"] = Timestamp("20130101 20:43:42")
dl = df.columns.get_loc("date")
df.iloc[1, dl] = Timestamp("19710101 20:43:42")
df.iloc[2, dl] = Timestamp("21460101 20:43:42")
df.iloc[4, dl] = pd.NaT
json = df.to_json(date_format="epoch", date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
tm.assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
tm.assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r"""{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}"""
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list("AB"))
dfj2["date"] = Timestamp("20130101")
dfj2["ints"] = range(5)
dfj2["bools"] = True
dfj2.index = pd.date_range("20130101", periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
tm.assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\['a', 'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with pytest.raises(AssertionError, match=error_msg):
tm.assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@tm.network
@pytest.mark.single
def test_round_trip_exception_(self):
# GH 3867
csv = "https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv"
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
tm.assert_frame_equal(result.reindex(index=df.index, columns=df.columns), df)
@tm.network
@pytest.mark.single
@pytest.mark.parametrize(
"field,dtype",
[
["created_at", pd.DatetimeTZDtype(tz="UTC")],
["closed_at", "datetime64[ns]"],
["updated_at", pd.DatetimeTZDtype(tz="UTC")],
],
)
def test_url(self, field, dtype):
url = "https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5" # noqa
result = read_json(url, convert_dates=True)
assert result[field].dtype == dtype
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit="ms")
s = Series([timedelta(23), timedelta(seconds=5)])
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
tm.assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)], index=pd.Index([0, 1]))
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
tm.assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
assert frame[0].dtype == "timedelta64[ns]"
tm.assert_frame_equal(frame, pd.read_json(frame.to_json()).apply(converter))
frame = DataFrame(
{
"a": [timedelta(days=23), timedelta(seconds=5)],
"b": [1, 2],
"c": pd.date_range(start="20130101", periods=2),
}
)
result = pd.read_json(frame.to_json(date_unit="ns"))
result["a"] = pd.to_timedelta(result.a, unit="ns")
result["c"] = pd.to_datetime(result.c)
tm.assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame(
{"a": [timedelta(23), pd.Timestamp("20130101")]}, dtype=object
)
expected = DataFrame(
{"a": [pd.Timedelta(frame.a[0]).value, pd.Timestamp(frame.a[1]).value]}
)
result = pd.read_json(frame.to_json(date_unit="ns"), dtype={"a": "int64"})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_default_handler(self):
value = object()
frame = DataFrame({"a": [7, value]})
expected = DataFrame({"a": [7, str(value)]})
result = pd.read_json(frame.to_json(default_handler=str))
tm.assert_frame_equal(expected, result, check_index_type=False)
def test_default_handler_indirect(self):
from pandas.io.json import dumps
def default(obj):
if isinstance(obj, complex):
return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)]
return str(obj)
df_list = [
9,
DataFrame(
{"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]},
columns=["a", "b"],
),
]
expected = (
'[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
'["re",4.0],["im",-5.0]],"N\\/A"]]]'
)
assert dumps(df_list, default_handler=default, orient="values") == expected
def test_default_handler_numpy_unsupported_dtype(self):
# GH12554 to_json raises 'Unhandled numpy dtype 15'
df = DataFrame(
{"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]},
columns=["a", "b"],
)
expected = (
'[["(1+0j)","(nan+0j)"],'
'["(2.3+0j)","(nan+0j)"],'
'["(4-5j)","(1.2+0j)"]]'
)
assert df.to_json(default_handler=str, orient="values") == expected
def test_default_handler_raises(self):
msg = "raisin"
def my_handler_raises(obj):
raise TypeError(msg)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, object()]}).to_json(
default_handler=my_handler_raises
)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, complex(4, -5)]}).to_json(
default_handler=my_handler_raises
)
def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
df["B"] = df["A"]
expected = df.to_json()
df["B"] = df["A"].astype("category")
assert expected == df.to_json()
s = df["A"]
sc = df["B"]
assert s.to_json() == sc.to_json()
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
tz_range = pd.date_range("20130101", periods=3, tz="US/Eastern")
tz_naive = tz_range.tz_convert("utc").tz_localize(None)
df = DataFrame({"A": tz_range, "B": pd.date_range("20130101", periods=3)})
df_naive = df.copy()
df_naive["A"] = tz_naive
expected = df_naive.to_json()
assert expected == df.to_json()
stz = Series(tz_range)
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.loc[:8] = np.nan
sdf = df.astype("Sparse")
expected = df.to_json()
assert expected == sdf.to_json()
s = pd.Series(np.random.randn(10))
s.loc[:8] = np.nan
ss = s.astype("Sparse")
expected = s.to_json()
assert expected == ss.to_json()
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-10 05:00:00Z"),
Timestamp("2013-01-10 00:00:00", tz="US/Eastern"),
Timestamp("2013-01-10 00:00:00-0500"),
],
)
def test_tz_is_utc(self, ts):
from pandas.io.json import dumps
exp = '"2013-01-10T05:00:00.000Z"'
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
@pytest.mark.parametrize(
"tz_range",
[
pd.date_range("2013-01-01 05:00:00Z", periods=2),
pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"),
pd.date_range("2013-01-01 00:00:00-0500", periods=2),
],
)
def test_tz_range_is_utc(self, tz_range):
from pandas.io.json import dumps
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
dfexp = (
'{"DT":{'
'"0":"2013-01-01T05:00:00.000Z",'
'"1":"2013-01-02T05:00:00.000Z"}}'
)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({"DT": dti})
result = dumps(df, iso_dates=True)
assert result == dfexp
def test_read_inline_jsonl(self):
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@td.skip_if_not_us_locale
def test_read_s3_jsonl(self, s3_resource):
# GH17200
result = read_json("s3n://pandas-test/items.jsonl", lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_local_jsonl(self):
# GH17200
with tm.ensure_clean("tmp_items.json") as path:
with open(path, "w") as infile:
infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')
result = read_json(path, lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars(self):
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
# simulate string
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_json_large_numbers(self):
# GH18842
json = '{"articleId": "1404366058080022500245"}'
json = StringIO(json)
result = read_json(json, typ="series")
expected = Series(1.404366e21, index=["articleId"])
tm.assert_series_equal(result, expected)
json = '{"0": {"articleId": "1404366058080022500245"}}'
json = StringIO(json)
result = read_json(json)
expected = DataFrame(1.404366e21, index=["articleId"], columns=[0])
tm.assert_frame_equal(result, expected)
def test_to_jsonl(self):
# GH9180
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
assert result == expected
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
assert result == expected
tm.assert_frame_equal(pd.read_json(result, lines=True), df)
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}'
assert result == expected
tm.assert_frame_equal(pd.read_json(result, lines=True), df)
# TODO: there is a near-identical test for pytables; can we share?
def test_latin_encoding(self):
# GH 13774
pytest.skip("encoding not implemented in .to_json(), xref #13774")
values = [
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
]
values = [
[x.decode("latin-1") if isinstance(x, bytes) else x for x in y]
for y in values
]
examples = []
for dtype in ["category", object]:
for val in values:
examples.append(Series(val, dtype=dtype))
def roundtrip(s, encoding="latin-1"):
with tm.ensure_clean("test.json") as path:
s.to_json(path, encoding=encoding)
retr = read_json(path, encoding=encoding)
tm.assert_series_equal(s, retr, check_categorical=False)
for s in examples:
roundtrip(s)
def test_data_frame_size_after_to_json(self):
# GH15344
df = DataFrame({"a": [str(1)]})
size_before = df.memory_usage(index=True, deep=True).sum()
df.to_json()
size_after = df.memory_usage(index=True, deep=True).sum()
assert size_before == size_after
@pytest.mark.parametrize(
"index", [None, [1, 2], [1.0, 2.0], ["a", "b"], ["1", "2"], ["1.", "2."]]
)
@pytest.mark.parametrize("columns", [["a", "b"], ["1", "2"], ["1.", "2."]])
def test_from_json_to_json_table_index_and_columns(self, index, columns):
# GH25433 GH25435
expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns)
dfjson = expected.to_json(orient="table")
result = pd.read_json(dfjson, orient="table")
tm.assert_frame_equal(result, expected)
def test_from_json_to_json_table_dtypes(self):
# GH21345
expected = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})
dfjson = expected.to_json(orient="table")
result = pd.read_json(dfjson, orient="table")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [True, {"b": int, "c": int}])
def test_read_json_table_dtype_raises(self, dtype):
# GH21345
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})
dfjson = df.to_json(orient="table")
msg = "cannot pass both dtype and orient='table'"
with pytest.raises(ValueError, match=msg):
pd.read_json(dfjson, orient="table", dtype=dtype)
def test_read_json_table_convert_axes_raises(self):
# GH25433 GH25435
df = DataFrame([[1, 2], [3, 4]], index=[1.0, 2.0], columns=["1.", "2."])
dfjson = df.to_json(orient="table")
msg = "cannot pass both convert_axes and orient='table'"
with pytest.raises(ValueError, match=msg):
pd.read_json(dfjson, orient="table", convert_axes=True)
@pytest.mark.parametrize(
"data, expected",
[
(
DataFrame([[1, 2], [4, 5]], columns=["a", "b"]),
{"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
),
(
DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo"),
{"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
),
(
DataFrame(
[[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]
),
{"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
),
(Series([1, 2, 3], name="A"), {"name": "A", "data": [1, 2, 3]}),
(
Series([1, 2, 3], name="A").rename_axis("foo"),
{"name": "A", "data": [1, 2, 3]},
),
(
Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]]),
{"name": "A", "data": [1, 2]},
),
],
)
def test_index_false_to_json_split(self, data, expected):
# GH 17394
# Testing index=False in to_json with orient='split'
result = data.to_json(orient="split", index=False)
result = json.loads(result)
assert result == expected
@pytest.mark.parametrize(
"data",
[
(DataFrame([[1, 2], [4, 5]], columns=["a", "b"])),
(DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo")),
(
DataFrame(
[[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]
)
),
(Series([1, 2, 3], name="A")),
(Series([1, 2, 3], name="A").rename_axis("foo")),
(Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]])),
],
)
def test_index_false_to_json_table(self, data):
# GH 17394
# Testing index=False in to_json with orient='table'
result = data.to_json(orient="table", index=False)
result = json.loads(result)
expected = {
"schema": pd.io.json.build_table_schema(data, index=False),
"data": DataFrame(data).to_dict(orient="records"),
}
assert result == expected
@pytest.mark.parametrize("orient", ["records", "index", "columns", "values"])
def test_index_false_error_to_json(self, orient):
# GH 17394
# Testing error message from to_json with index=False
df = pd.DataFrame([[1, 2], [4, 5]], columns=["a", "b"])
msg = "'index=False' is only valid when 'orient' is 'split' or 'table'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient, index=False)
@pytest.mark.parametrize("orient", ["split", "table"])
@pytest.mark.parametrize("index", [True, False])
def test_index_false_from_json_to_json(self, orient, index):
# GH25170
# Test index=False in from_json to_json
expected = DataFrame({"a": [1, 2], "b": [3, 4]})
dfjson = expected.to_json(orient=orient, index=index)
result = read_json(dfjson, orient=orient)
tm.assert_frame_equal(result, expected)
def test_read_timezone_information(self):
# GH 25546
result = read_json(
'{"2019-01-01T11:00:00.000Z":88}', typ="series", orient="index"
)
expected = Series([88], index=DatetimeIndex(["2019-01-01 11:00:00"], tz="UTC"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"date_format,key", [("epoch", 86400000), ("iso", "P1DT0H0M0S")]
)
def test_timedelta_as_label(self, date_format, key):
df = pd.DataFrame([[1]], columns=[pd.Timedelta("1D")])
expected = f'{{"{key}":{{"0":1}}}}'
result = df.to_json(date_format=date_format)
assert result == expected
@pytest.mark.parametrize(
"orient,expected",
[
("index", "{\"('a', 'b')\":{\"('c', 'd')\":1}}"),
("columns", "{\"('c', 'd')\":{\"('a', 'b')\":1}}"),
# TODO: the below have separate encoding procedures
# They produce JSON but not in a consistent manner
pytest.param("split", "", marks=pytest.mark.skip),
pytest.param("table", "", marks=pytest.mark.skip),
],
)
def test_tuple_labels(self, orient, expected):
# GH 20500
df = pd.DataFrame([[1]], index=[("a", "b")], columns=[("c", "d")])
result = df.to_json(orient=orient)
assert result == expected
@pytest.mark.parametrize("indent", [1, 2, 4])
def test_to_json_indent(self, indent):
# GH 12004
df = pd.DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"])
result = df.to_json(indent=indent)
spaces = " " * indent
expected = f"""{{
{spaces}"a":{{
{spaces}{spaces}"0":"foo",
{spaces}{spaces}"1":"baz"
{spaces}}},
{spaces}"b":{{
{spaces}{spaces}"0":"bar",
{spaces}{spaces}"1":"qux"
{spaces}}}
}}"""
assert result == expected
@pytest.mark.parametrize(
"orient,expected",
[
(
"split",
"""{
"columns":[
"a",
"b"
],
"index":[
0,
1
],
"data":[
[
"foo",
"bar"
],
[
"baz",
"qux"
]
]
}""",
),
(
"records",
"""[
{
"a":"foo",
"b":"bar"
},
{
"a":"baz",
"b":"qux"
}
]""",
),
(
"index",
"""{
"0":{
"a":"foo",
"b":"bar"
},
"1":{
"a":"baz",
"b":"qux"
}
}""",
),
(
"columns",
"""{
"a":{
"0":"foo",
"1":"baz"
},
"b":{
"0":"bar",
"1":"qux"
}
}""",
),
(
"values",
"""[
[
"foo",
"bar"
],
[
"baz",
"qux"
]
]""",
),
(
"table",
"""{
"schema":{
"fields":[
{
"name":"index",
"type":"integer"
},
{
"name":"a",
"type":"string"
},
{
"name":"b",
"type":"string"
}
],
"primaryKey":[
"index"
],
"pandas_version":"0.20.0"
},
"data":[
{
"index":0,
"a":"foo",
"b":"bar"
},
{
"index":1,
"a":"baz",
"b":"qux"
}
]
}""",
),
],
)
def test_json_indent_all_orients(self, orient, expected):
# GH 12004
df = pd.DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"])
result = df.to_json(orient=orient, indent=4)
assert result == expected
def test_json_negative_indent_raises(self):
with pytest.raises(ValueError, match="must be a nonnegative integer"):
pd.DataFrame().to_json(indent=-1)
def test_emca_262_nan_inf_support(self):
# GH 12213
data = '["a", NaN, "NaN", Infinity, "Infinity", -Infinity, "-Infinity"]'
result = pd.read_json(data)
expected = pd.DataFrame(
["a", np.nan, "NaN", np.inf, "Infinity", -np.inf, "-Infinity"]
)
tm.assert_frame_equal(result, expected)
def test_deprecate_numpy_argument_read_json(self):
# GH 28512
expected = DataFrame([1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
result = read_json(expected.to_json(), numpy=True)
tm.assert_frame_equal(result, expected)
def test_json_pandas_na(self):
# GH 31615
result = pd.DataFrame([[pd.NA]]).to_json()
assert result == '{"0":{"0":null}}'
def test_json_pandas_nulls(self, nulls_fixture):
# GH 31615
result = pd.DataFrame([[nulls_fixture]]).to_json()
assert result == '{"0":{"0":null}}'
| 35.799758 | 89 | 0.523464 |
acf07ecf5b2c4d8e7fbeb80dfe27cac3b270380b | 11,971 | py | Python | theano/gpuarray/rng_mrg.py | AT-jamesp0013/Theano | b158a2360e76f19434592c30cab707d88c5fd725 | [
"BSD-3-Clause"
] | null | null | null | theano/gpuarray/rng_mrg.py | AT-jamesp0013/Theano | b158a2360e76f19434592c30cab707d88c5fd725 | [
"BSD-3-Clause"
] | null | null | null | theano/gpuarray/rng_mrg.py | AT-jamesp0013/Theano | b158a2360e76f19434592c30cab707d88c5fd725 | [
"BSD-3-Clause"
] | null | null | null | """
GPU implementation of MRG31k3p random number generator for Theano.
Generator code in SSJ package (L'Ecuyer & Simard).
http://www.iro.umontreal.ca/~simardr/ssj/indexe.html
"""
from __future__ import absolute_import, print_function, division
from theano import Apply, tensor
from theano.gof import local_optimizer
from theano.sandbox.rng_mrg import mrg_uniform_base, mrg_uniform
from theano.tensor import as_tensor_variable, get_vector_length
from theano.scalar import int32 as int_t
from .basic_ops import (GpuKernelBase, Kernel, infer_context_name,
host_from_gpu, as_gpuarray_variable)
from .type import GpuArrayType, gpu_context_type
from .fp16_help import write_w
from .opt import register_opt, register_opt2
class GPUA_mrg_uniform(GpuKernelBase, mrg_uniform_base):
# GpuArray version
_f16_ok = True
params_type = mrg_uniform_base.params_type.extended(otypecode=int_t, context=gpu_context_type)
otypecode = property(lambda self: self.output_type.typecode)
def make_node(self, rstate, size):
# error checking slightly redundant here, since
# this op should not be called directly.
#
# call through MRG_RandomStreams instead.
broad = []
for i in range(self.output_type.ndim):
broad.append(tensor.extract_constant(size[i]) == 1)
output_type = self.output_type.clone(broadcastable=broad)()
rstate = as_gpuarray_variable(rstate, infer_context_name(rstate))
return Apply(self,
[rstate, size],
[rstate.type(), output_type])
def get_params(self, node):
return self.params_type.get_params(self, context=node.inputs[0].type.context)
@classmethod
def new(cls, rstate, ndim, dtype, size):
v_size = as_tensor_variable(size)
if ndim is None:
ndim = get_vector_length(v_size)
op = cls(GpuArrayType(dtype, (False,) * ndim))
return op(rstate, v_size)
def c_headers(self):
return super(GPUA_mrg_uniform, self).c_headers() + ['numpy_compat.h']
def gpu_kernels(self, node, name):
write = write_w(self.output_type.dtype)
if self.output_type.dtype == 'float16':
otype = 'ga_half'
# limit the values of the state that we use.
mask = '& 0x7fff'
offset = '+ 1'
NORM = '3.0458e-05f' # numpy.float16(1.0/(2**15+33))
# this was determined by finding the biggest number such that
# numpy.float16(number * ((M1 & 0x7fff) + 1)) < 1.0
elif self.output_type.dtype == 'float32':
otype = 'float'
mask = ''
offset = ''
NORM = '4.6566126e-10f' # numpy.float32(1.0/(2**31+65))
# this was determined by finding the biggest number such that
# numpy.float32(number * M1) < 1.0
elif self.output_type.dtype == 'float64':
otype = 'double'
mask = ''
offset = ''
NORM = '4.656612873077392578125e-10'
else:
raise ValueError('Unsupported data type for output',
self.output_type.dtype)
code = """
KERNEL void mrg_uniform(
GLOBAL_MEM %(otype)s *sample_data,
ga_size sample_offset,
GLOBAL_MEM ga_int *state_data,
ga_size state_offset,
const ga_uint Nsamples,
const ga_uint Nstreams_used)
{
sample_data = (GLOBAL_MEM %(otype)s *)(((GLOBAL_MEM char *)sample_data) + sample_offset);
state_data = (GLOBAL_MEM ga_int *)(((GLOBAL_MEM char *)state_data) + state_offset);
/*
* The cluda backend makes sure that ga_int corresponds to
* a 32 bit signed type on the target device. It is not a
* variable width type.
*/
const ga_int i7 = 7;
const ga_int i9 = 9;
const ga_int i15 = 15;
const ga_int i16 = 16;
const ga_int i22 = 22;
const ga_int i24 = 24;
const ga_int M1 = 2147483647; //2^31 - 1
const ga_int M2 = 2147462579; //2^31 - 21069
const ga_int MASK12 = 511; //2^9 - 1
const ga_int MASK13 = 16777215; //2^24 - 1
const ga_int MASK2 = 65535; //2^16 - 1
const ga_int MULT2 = 21069;
const ga_uint idx = GID_0 * LDIM_0 + LID_0;
ga_int y1, y2, x11, x12, x13, x21, x22, x23;
if (idx < Nstreams_used)
{
x11 = state_data[idx*6+0];
x12 = state_data[idx*6+1];
x13 = state_data[idx*6+2];
x21 = state_data[idx*6+3];
x22 = state_data[idx*6+4];
x23 = state_data[idx*6+5];
for (ga_uint i = idx; i < Nsamples; i += Nstreams_used)
{
y1 = ((x12 & MASK12) << i22) + (x12 >> i9) + ((x13 & MASK13) << i7) + (x13 >> i24);
y1 -= (y1 < 0 || y1 >= M1) ? M1 : 0;
y1 += x13;
y1 -= (y1 < 0 || y1 >= M1) ? M1 : 0;
x13 = x12;
x12 = x11;
x11 = y1;
y1 = ((x21 & MASK2) << i15) + (MULT2 * (x21 >> i16));
y1 -= (y1 < 0 || y1 >= M2) ? M2 : 0;
y2 = ((x23 & MASK2) << i15) + (MULT2 * (x23 >> i16));
y2 -= (y2 < 0 || y2 >= M2) ? M2 : 0;
y2 += x23;
y2 -= (y2 < 0 || y2 >= M2) ? M2 : 0;
y2 += y1;
y2 -= (y2 < 0 || y2 >= M2) ? M2 : 0;
x23 = x22;
x22 = x21;
x21 = y2;
if (x11 <= x21) {
sample_data[i] = %(write)s((((x11 - x21 + M1) %(mask)s) %(offset)s) * %(NORM)s);
}
else
{
sample_data[i] = %(write)s((((x11 - x21) %(mask)s) %(offset)s) * %(NORM)s);
}
}
state_data[idx*6+0]= x11;
state_data[idx*6+1]= x12;
state_data[idx*6+2]= x13;
state_data[idx*6+3]= x21;
state_data[idx*6+4]= x22;
state_data[idx*6+5]= x23;
}
}
""" % locals()
# we shouldn't get to this line if it's about to fail
from pygpu import gpuarray
return [Kernel(code=code, name="mrg_uniform",
params=[gpuarray.GpuArray, gpuarray.SIZE,
gpuarray.GpuArray, gpuarray.SIZE,
'uint32', 'uint32'],
flags=Kernel.get_flags(self.output_type.dtype, 'int32'))
]
def c_code(self, node, nodename, inp, out, sub):
return """
npy_int64 M1 = 2147483647; //2^31 - 1
size_t n_elements = 1;
unsigned int n_streams;
int must_alloc_sample = ((NULL == %(o_sample)s)
|| !pygpu_GpuArray_Check((PyObject*)%(o_sample)s)
|| !(%(o_sample)s->ga.flags & GA_C_CONTIGUOUS)
|| (PyGpuArray_NDIM(%(o_sample)s) != %(params)s->ndim));
size_t* odims = (size_t*)malloc(%(params)s->ndim * sizeof(size_t));
if (odims == NULL) {
PyErr_NoMemory();
%(just_fail)s
}
if (PyArray_NDIM(%(size)s) != 1)
{
PyErr_SetString(PyExc_ValueError, "size must be vector");
%(fail)s
}
if (PyArray_DIMS(%(size)s)[0] != %(params)s->ndim)
{
PyErr_Format(PyExc_ValueError, "size must have length %%i (not %%li)",
%(params)s->ndim, PyArray_DIMS(%(size)s)[0]);
%(fail)s
}
for (int i = 0; i < %(params)s->ndim; ++i)
{
odims[i] = *(dtype_%(size)s *)PyArray_GETPTR1(%(size)s, i);
n_elements *= odims[i];
must_alloc_sample = (must_alloc_sample
|| PyGpuArray_DIMS(%(o_sample)s)[i] != odims[i]);
}
if (n_elements > M1)
{
PyErr_SetString(
PyExc_ValueError,
"rng_mrg gpu implementation does not support more than (2**31 -1) samples");
%(fail)s
}
if (must_alloc_sample)
{
Py_XDECREF(%(o_sample)s);
%(o_sample)s = pygpu_empty(%(params)s->ndim, odims, %(params)s->otypecode, GA_C_ORDER,
%(params)s->context, Py_None);
if(!%(o_sample)s)
{
%(fail)s;
}
}
if (!pygpu_GpuArray_Check((PyObject*)%(rstate)s))
{
PyErr_Format(PyExc_ValueError, "rstate must be gpuarray");
%(fail)s;
}
Py_XDECREF(%(o_rstate)s);
if (%(params)s->inplace)
{
Py_INCREF(%(rstate)s);
%(o_rstate)s = %(rstate)s;
}
else
{
%(o_rstate)s = pygpu_copy(%(rstate)s, GA_ANY_ORDER);
if (!%(o_rstate)s) {
%(fail)s
}
}
if (PyGpuArray_NDIM(%(o_rstate)s) != 2)
{
PyErr_SetString(PyExc_ValueError, "rstate must be a matrix");
%(fail)s
}
if (PyGpuArray_DIMS(%(o_rstate)s)[1] != 6)
{
PyErr_Format(PyExc_ValueError, "rstate must have 6 columns");
%(fail)s
}
if (%(o_rstate)s->ga.typecode != GA_INT) {
PyErr_Format(PyExc_ValueError, "rstate must be int32");
%(fail)s
}
if (!GpuArray_CHKFLAGS(&%(o_rstate)s->ga, GA_C_CONTIGUOUS)) {
PyErr_Format(PyExc_ValueError, "rstate must be C contiguous");
%(fail)s
}
n_streams = PyGpuArray_DIMS(%(o_rstate)s)[0];
if (n_streams > n_elements)
n_streams = n_elements;
{
size_t ls = 0, gs = 0;
int err = GpuKernel_sched(&%(kname)s, n_streams, &ls, &gs);
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError, "GpuKernel_sched: %%s\\n",
GpuKernel_error(&%(kname)s, err));
%(fail)s
}
// Make sure we run as many blocks as we need to cover the whole n_streams
gs = (n_streams + ls - 1)/ls;
err = mrg_uniform_call(1, &ls, &gs, 0, %(o_sample)s->ga.data, %(o_sample)s->ga.offset, %(o_rstate)s->ga.data, %(o_rstate)s->ga.offset, n_elements, n_streams);
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError, "mrg_uniform_call: %%s\\n",
GpuKernel_error(&%(kname)s, err));
%(fail)s
}
}
free(odims);
""" % dict(rstate=inp[0], size=inp[1],
o_rstate=out[0], o_sample=out[1],
kname=self.gpu_kernels(node, nodename)[0].objvar,
params=sub['params'],
just_fail=sub['fail'],
fail="""
{
free(odims);
%(fail)s
}
""" % dict(fail=sub['fail']))
def c_code_cache_version(self):
return (16,)
@register_opt2([mrg_uniform], 'fast_compile')
def local_gpua_mrg_graph(op, context_name, inputs, outputs):
if (type(op) == mrg_uniform and
isinstance(inputs[0].type, GpuArrayType)):
outs = GPUA_mrg_uniform.new(inputs[0],
op.output_type.ndim,
op.output_type.dtype,
inputs[1])
return [outs[0], host_from_gpu(outs[1])]
@register_opt('fast_compile')
@local_optimizer([mrg_uniform])
def local_gpua_mrg(node):
context_name = infer_context_name(*node.inputs)
return local_gpua_mrg_graph(node.op, context_name, node.inputs, node.outputs)
| 36.947531 | 168 | 0.508395 |
acf07f204d2b27b122e5701d02b621f11cfa55c2 | 6,232 | py | Python | data_loader/uts_classification_data_loader.py | Neronjust2017/keras-project | 919e67e10b0bf518eb9cc63df68c79fe2bb71b36 | [
"Apache-2.0"
] | 2 | 2020-07-07T12:29:02.000Z | 2020-09-16T15:33:02.000Z | data_loader/uts_classification_data_loader.py | Neronjust2017/keras-project | 919e67e10b0bf518eb9cc63df68c79fe2bb71b36 | [
"Apache-2.0"
] | 1 | 2020-10-04T12:08:27.000Z | 2020-10-05T05:05:39.000Z | data_loader/uts_classification_data_loader.py | Neronjust2017/keras-project | 919e67e10b0bf518eb9cc63df68c79fe2bb71b36 | [
"Apache-2.0"
] | null | null | null | from base.base_data_loader import BaseDataLoader
from utils.uts_classification.utils import readucr,readmts,transform_labels,readmts_uci_har,readmts_ptb,readmts_ptb_aug
import sklearn
import numpy as np
import os
import pickle as dill
from collections import Counter
class UtsClassificationDataLoader(BaseDataLoader):
def __init__(self, config):
super(UtsClassificationDataLoader, self).__init__(config)
if config.dataset.type == 'uts':
if config.dataset.name == 'AFClassification':
from utils.AFClassication.data import loaddata
(X_train, y_train), (Xval, yval), (final_testset, final_testtarget), (R_train, Rval, Rtest), (
P_train, Pval, Ptest), (Q_train, Qval, Qtest), (T_train, Tval, Ttest) = loaddata()
X_train = X_train[0]
X_val = Xval[0]
y_val = yval
X_test = final_testset[0]
y_test = final_testtarget
self.nb_classes = 3
self.y_train = y_train
self.y_test = y_test
self.X_val = X_val.reshape((X_val.shape[0], X_val.shape[1], 1))
self.y_val = y_val
self.y_true = np.argmax(y_test, axis=1)
elif config.dataset.name == 'ptbdb':
file_path = './datasets/uts_data/ptbdb/'
X_train, y_train, X_val, y_val, X_test, y_test = readmts_ptb_aug(file_path)
self.nb_classes = len(np.unique(np.concatenate((y_train, y_val, y_test), axis=0)))
y_train, y_val, y_test = transform_labels(y_train, y_test, y_val)
self.X_val = X_val.reshape((self.X_val.shape[0], self.X_val.shape[1], 1))
enc = sklearn.preprocessing.OneHotEncoder()
enc.fit(np.concatenate((y_train, y_val, y_test), axis=0).reshape(-1, 1))
self.y_train = enc.transform(y_train.reshape(-1, 1)).toarray()
self.y_val = enc.transform(self.y_val.reshape(-1, 1)).toarray()
self.y_test = enc.transform(y_test.reshape(-1, 1)).toarray()
else:
file_name = 'datasets/uts_data/' + config.dataset.name + '/' + config.dataset.name
X_train, y_train = readucr(file_name + '_TRAIN.txt')
X_test, y_test = readucr(file_name + '_TEST.txt')
self.nb_classes = len(np.unique(np.concatenate((y_train, y_test), axis=0)))
# make the min to zero of labels
y_train, y_test = transform_labels(y_train, y_test)
else:
if config.dataset.name == 'UCI_HAR_Dataset':
file_name = 'datasets/mts_data/' + config.dataset.name
X_train, y_train, X_test, y_test = readmts_uci_har(file_name)
# 调整划分比例
data = np.concatenate((X_train, X_test),axis=0)
label = np.concatenate((y_train, y_test),axis=0)
N = data.shape[0]
ind = int(N*0.9)
X_train = data[:ind]
y_train = label[:ind]
X_test = data[ind:]
y_test = label[ind:]
self.nb_classes = 6
# make the min to zero of labels
y_train, y_test = transform_labels(y_train, y_test)
elif config.dataset.name == 'Challeng2018':
from utils.AFClassication.data_challenge2018 import loaddata
(X_train, y_train), (Xval, yval), (final_testset, final_testtarget)= loaddata()
X_val = Xval
X_test = final_testset
y_val = yval
y_test = final_testtarget
self.nb_classes = 9
self.X_val = X_val
self.y_train = y_train
self.y_test = y_test
self.y_val = y_val
self.y_true = np.argmax(y_test, axis=1)
else:
file_name = 'datasets/mts_data/' + config.dataset.name + '/' + config.dataset.name
X_train, y_train, X_test, y_test, self.nb_classes = readmts(file_name)
if config.dataset.name not in ['ptbdb','AFClassification', 'Challeng2018']:
# save orignal y because later we will use binary
self.y_true = y_test.astype(np.int64)
# transform the labels from integers to one hot vectors
enc = sklearn.preprocessing.OneHotEncoder()
enc.fit(np.concatenate((y_train, y_test), axis=0).reshape(-1, 1))
self.y_train = enc.transform(y_train.reshape(-1, 1)).toarray()
self.y_test = enc.transform(y_test.reshape(-1, 1)).toarray()
if config.dataset.type == 'uts':
self.X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
self.X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], 1))
else:
self.X_train = X_train
self.X_test = X_test
self.train_size = self.X_train.shape[0]
self.test_size = self.X_test.shape[0]
self.input_shape = self.X_train.shape[1:]
if(self.config.model.name == "tlenet"):
from models.classification.tlenet import Classifier_TLENET
self.X_train, self.y_train, self.X_test, self.y_test, self.tot_increase_num, \
self.input_shape, self.nb_classes = Classifier_TLENET().pre_processing(self.X_train, self.y_train, self.X_test, self.y_test)
print(self.input_shape)
print("********************************")
def get_train_data(self):
if self.config.dataset.name in ['ptbdb','Challeng2018']:
return self.X_train, self.y_train, self.X_val, self.y_val
else:
return self.X_train, self.y_train
def get_test_data(self):
if (self.config.model.name == "tlenet"):
return self.X_test, self.y_test, self.y_true, self.tot_increase_num
return self.X_test, self.y_test, self.y_true
def get_inputshape(self):
return self.input_shape
def get_nbclasses(self):
return self.nb_classes
def get_train_size(self):
return self.train_size
def get_test_size(self):
return self.test_size | 47.212121 | 136 | 0.585526 |
acf07fbf5afee6e74da4b761c6abfceffeb86548 | 9,707 | py | Python | custom_components/eventsensor/config_flow.py | kmclaugh/eventsensor | 138e89153b53c8a1da15a7ebd6ad7d0080c84221 | [
"MIT"
] | null | null | null | custom_components/eventsensor/config_flow.py | kmclaugh/eventsensor | 138e89153b53c8a1da15a7ebd6ad7d0080c84221 | [
"MIT"
] | null | null | null | custom_components/eventsensor/config_flow.py | kmclaugh/eventsensor | 138e89153b53c8a1da15a7ebd6ad7d0080c84221 | [
"MIT"
] | null | null | null | """Adds config flow for eventsensor."""
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_EVENT, CONF_EVENT_DATA, CONF_NAME, CONF_STATE
from homeassistant.core import callback
from homeassistant.helpers.event import EVENT_STATE_CHANGED
from .common import (
CONF_STATE_MAP,
DOMAIN,
PRESET_AQARA_CUBE,
PRESET_AQARA_CUBE_MAPPING,
PRESET_AQARA_SMART_BUTTON,
PRESET_AQARA_SMART_BUTTON_MAPPING,
PRESET_FOH,
PRESET_FOH_MAPPING,
PRESET_HUE_DIMMER,
PRESET_HUE_DIMMER_MAPPING,
PRESET_HUE_TAP,
PRESET_HUE_TAP_MAPPING,
make_string_ui_from_dict,
make_unique_id,
parse_dict_from_ui_string,
)
_LOGGER = logging.getLogger(__name__)
CONF_PRESET_CONFIG = "preset_config"
CONF_INTEGRATION = "integration_source"
CONF_IDENTIFIER = "identifier"
CONF_TYPE_IDENTIFIER = "type_identifier"
_EVENT_SOURCE_HUE = "Hue"
_EVENT_SOURCE_DECONZ = "deCONZ"
_EVENT_SOURCE_GENERIC = "Any other"
_IDENTIFIER_ID = "id"
_IDENTIFIER_UNIQUE_ID = "uniqueid"
_PRESET_GENERIC = "Custom state mapping"
STEP_1_INITIAL = vol.Schema(
{
vol.Required(CONF_NAME): str,
vol.Required(CONF_INTEGRATION, default=_EVENT_SOURCE_HUE): vol.In(
[_EVENT_SOURCE_HUE, _EVENT_SOURCE_DECONZ, _EVENT_SOURCE_GENERIC]
),
},
)
STEP_2_PRECONFIGURED = vol.Schema(
{
vol.Required(CONF_TYPE_IDENTIFIER, default=_IDENTIFIER_ID): vol.In(
[_IDENTIFIER_ID, _IDENTIFIER_UNIQUE_ID]
),
vol.Optional(CONF_IDENTIFIER, default=""): str,
vol.Required(CONF_PRESET_CONFIG, default=PRESET_HUE_DIMMER): vol.In(
[
PRESET_HUE_DIMMER,
PRESET_HUE_TAP,
PRESET_FOH,
PRESET_AQARA_SMART_BUTTON,
PRESET_AQARA_CUBE,
_PRESET_GENERIC,
]
),
},
)
STEP_2_GENERIC_SCHEMA = vol.Schema(
{
vol.Required(CONF_EVENT): str,
vol.Required(CONF_STATE): str,
vol.Optional(CONF_EVENT_DATA, default=""): str,
vol.Optional(CONF_STATE_MAP, default=""): str,
},
)
@config_entries.HANDLERS.register(DOMAIN)
class EventSensorFlowHandler(config_entries.ConfigFlow):
"""Config flow for eventsensor."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize."""
self._data_steps_config = {}
async def _create_entry(self):
event = self._data_steps_config.get(CONF_EVENT)
if event == EVENT_STATE_CHANGED:
return self.async_abort(reason="forbidden_event")
unique_id = make_unique_id(self._data_steps_config)
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
name = self._data_steps_config.get(CONF_NAME)
entry_data = {
CONF_NAME: name,
CONF_EVENT: self._data_steps_config.get(CONF_EVENT),
CONF_EVENT_DATA: self._data_steps_config.get(CONF_EVENT_DATA, {}),
CONF_STATE: self._data_steps_config.get(CONF_STATE),
CONF_STATE_MAP: self._data_steps_config.get(CONF_STATE_MAP, {}),
}
return self.async_create_entry(title=name, data=entry_data)
def _parse_dict_fields(self, user_input, field):
field_map = {}
raw_field_map = user_input.get(field)
if raw_field_map:
field_map = parse_dict_from_ui_string(raw_field_map)
self._data_steps_config[field] = field_map
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if user_input is not None:
self._data_steps_config[CONF_NAME] = user_input.get(CONF_NAME)
event_source = user_input.get(CONF_INTEGRATION)
if event_source == _EVENT_SOURCE_HUE:
self._data_steps_config[CONF_EVENT] = "hue_event"
self._data_steps_config[CONF_STATE] = "event"
elif event_source == _EVENT_SOURCE_DECONZ:
self._data_steps_config[CONF_EVENT] = "deconz_event"
self._data_steps_config[CONF_STATE] = "event"
else:
return await self.async_step_generic()
return await self.async_step_preset()
return self.async_show_form(step_id="user", data_schema=STEP_1_INITIAL)
async def async_step_preset(self, user_input=None):
"""Handle a flow initialized by the user."""
if user_input is not None:
type_id = user_input.get(CONF_TYPE_IDENTIFIER)
identifier = user_input.get(CONF_IDENTIFIER)
filter_map = {}
if identifier:
filter_map = {type_id: identifier}
self._data_steps_config[CONF_EVENT_DATA] = filter_map
preset_map = {}
preset_config = user_input.get(CONF_PRESET_CONFIG)
if preset_config == PRESET_HUE_DIMMER:
preset_map = PRESET_HUE_DIMMER_MAPPING
elif preset_config == PRESET_HUE_TAP:
preset_map = PRESET_HUE_TAP_MAPPING
elif preset_config == PRESET_FOH:
preset_map = PRESET_FOH_MAPPING
elif preset_config == PRESET_AQARA_SMART_BUTTON:
preset_map = PRESET_AQARA_SMART_BUTTON_MAPPING
elif preset_config == PRESET_AQARA_CUBE:
preset_map = PRESET_AQARA_CUBE_MAPPING
self._data_steps_config[CONF_STATE] = "gesture"
self._data_steps_config[CONF_STATE_MAP] = preset_map
return await self.async_step_state_mapping()
return self.async_show_form(step_id="preset", data_schema=STEP_2_PRECONFIGURED)
async def async_step_generic(self, user_input=None):
"""Handle a flow initialized by the user."""
if user_input is not None:
self._data_steps_config[CONF_EVENT] = user_input.get(CONF_EVENT)
self._data_steps_config[CONF_STATE] = user_input.get(CONF_STATE)
self._parse_dict_fields(user_input, CONF_EVENT_DATA)
self._parse_dict_fields(user_input, CONF_STATE_MAP)
return await self._create_entry()
return self.async_show_form(
step_id="generic", data_schema=STEP_2_GENERIC_SCHEMA
)
async def async_step_state_mapping(self, user_input=None):
"""Handle a flow initialized by the user."""
if user_input is not None:
self._parse_dict_fields(user_input, CONF_STATE_MAP)
return await self._create_entry()
state_map_ui = make_string_ui_from_dict(
self._data_steps_config.get(CONF_STATE_MAP, {})
)
return self.async_show_form(
step_id="state_mapping",
data_schema=vol.Schema(
{vol.Optional(CONF_STATE_MAP, default=state_map_ui): str},
),
)
async def async_step_import(self, import_info):
"""Handle import from YAML config file."""
self._data_steps_config.update(import_info)
return await self._create_entry()
@staticmethod
@callback
def async_get_options_flow(config_entry: config_entries.ConfigEntry):
"""Get the options flow for this handler to make a tariff change."""
return EventSensorOptionsFlowHandler(config_entry)
class EventSensorOptionsFlowHandler(config_entries.OptionsFlow):
"""
Handle the Options flow for `eventsensor` to edit the configuration.
**entry.options is used as a container to make changes over entry.data**
"""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize the options flow handler with the config entry to modify."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
# Inverse conversion for mappings shown as strings
for c in (CONF_EVENT_DATA, CONF_STATE_MAP):
user_input[c] = parse_dict_from_ui_string(user_input[c])
new_unique_id = make_unique_id(user_input)
if self.config_entry.unique_id != new_unique_id:
# check change of unique_id to prevent collisions
for entry in filter(
lambda x: x.entry_id != self.config_entry.entry_id,
self.hass.config_entries.async_entries(DOMAIN),
):
if entry.unique_id == new_unique_id:
_LOGGER.error(
"The `unique_id` is already used by another sensor"
)
return self.async_abort(reason="already_configured")
return self.async_create_entry(title="", data=user_input)
# Fill options with entry data
container = self.config_entry.data
name = container.get(CONF_NAME)
event = container.get(CONF_EVENT)
state = container.get(CONF_STATE)
filter_ev_str = make_string_ui_from_dict(container.get(CONF_EVENT_DATA, {}))
state_map_str = make_string_ui_from_dict(container.get(CONF_STATE_MAP, {}))
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Required(CONF_NAME, default=name): str,
vol.Required(CONF_EVENT, default=event): str,
vol.Required(CONF_STATE, default=state): str,
vol.Optional(CONF_EVENT_DATA, default=filter_ev_str): str,
vol.Optional(CONF_STATE_MAP, default=state_map_str): str,
},
),
)
| 37.334615 | 87 | 0.65324 |
acf0806d041fffe28dd017ef5b7bf4c802620cf5 | 35,450 | py | Python | wiktionary_parser/hyphenate.py | ajalt/whatshouldicallmybro | 9d11eedddbc13b147ec316b488ff36a1d4ce7360 | [
"MIT"
] | 7 | 2017-02-06T11:39:33.000Z | 2020-08-12T05:27:49.000Z | wiktionary_parser/hyphenate.py | ajalt/whatshouldicallmybro | 9d11eedddbc13b147ec316b488ff36a1d4ce7360 | [
"MIT"
] | null | null | null | wiktionary_parser/hyphenate.py | ajalt/whatshouldicallmybro | 9d11eedddbc13b147ec316b488ff36a1d4ce7360 | [
"MIT"
] | 2 | 2017-10-25T14:15:39.000Z | 2018-10-23T12:48:27.000Z | """ Hyphenation, using Frank Liang's algorithm.
This module provides a single function to hyphenate words. hyphenate_word takes
a string (the word), and returns a list of parts that can be separated by hyphens.
>>> hyphenate_word("hyphenation")
['hy', 'phen', 'ation']
>>> hyphenate_word("supercalifragilisticexpialidocious")
['su', 'per', 'cal', 'ifrag', 'ilis', 'tic', 'ex', 'pi', 'ali', 'do', 'cious']
>>> hyphenate_word("project")
['project']
Ned Batchelder, July 2007.
This Python code is in the public domain.
"""
import re
__version__ = '1.0.20070709'
char_re = re.compile(r'[a-z]')
char_dot_re = re.compile(r'[.a-z]')
digit_re = re.compile(r'\d')
class Hyphenator:
def __init__(self, patterns, exceptions=''):
self.tree = {}
for pattern in patterns.split():
self._insert_pattern(pattern)
self.exceptions = {}
for ex in exceptions.split():
# Convert the hyphenated pattern into a point array for use later.
self.exceptions[ex.replace('-', '')] = [0] + [ int(h == '-') for h in char_re.split(ex) ]
def _insert_pattern(self, pattern):
# Convert the a pattern like 'a1bc3d4' into a string of chars 'abcd'
# and a list of points [ 1, 0, 3, 4 ].
chars = digit_re.sub('', pattern)
points = [ int(d or 0) for d in char_dot_re.split(pattern) ]
# Insert the pattern into the tree. Each character finds a dict
# another level down in the tree, and leaf nodes have the list of
# points.
t = self.tree
for c in chars:
if c not in t:
t[c] = {}
t = t[c]
t[None] = points
def hyphenate_word(self, word):
""" Given a word, returns a list of pieces, broken at the possible
hyphenation points.
"""
# Short words aren't hyphenated.
if len(word) <= 4:
return [word]
# If the word is an exception, get the stored points.
lower = word.lower()
if lower in self.exceptions:
points = self.exceptions[lower]
else:
work = '.' + lower + '.'
points = [0] * (len(work)+1)
for i in range(len(work)):
t = self.tree
for c in work[i:]:
if c in t:
t = t[c]
if None in t:
p = t[None]
for j in range(len(p)):
points[i+j] = max(points[i+j], p[j])
else:
break
# No hyphens in the first two chars or the last two.
points[1] = points[2] = points[-2] = points[-3] = 0
# Examine the points to build the pieces list.
pieces = ['']
for c, p in zip(word, points[2:]):
pieces[-1] += c
if p % 2:
pieces.append('')
return pieces
patterns = (
# Knuth and Liang's original hyphenation patterns from classic TeX.
# In the public domain.
"""
.ach4 .ad4der .af1t .al3t .am5at .an5c .ang4 .ani5m .ant4 .an3te .anti5s .ar5s
.ar4tie .ar4ty .as3c .as1p .as1s .aster5 .atom5 .au1d .av4i .awn4 .ba4g .ba5na
.bas4e .ber4 .be5ra .be3sm .be5sto .bri2 .but4ti .cam4pe .can5c .capa5b .car5ol
.ca4t .ce4la .ch4 .chill5i .ci2 .cit5r .co3e .co4r .cor5ner .de4moi .de3o .de3ra
.de3ri .des4c .dictio5 .do4t .du4c .dumb5 .earth5 .eas3i .eb4 .eer4 .eg2 .el5d
.el3em .enam3 .en3g .en3s .eq5ui5t .er4ri .es3 .eu3 .eye5 .fes3 .for5mer .ga2
.ge2 .gen3t4 .ge5og .gi5a .gi4b .go4r .hand5i .han5k .he2 .hero5i .hes3 .het3
.hi3b .hi3er .hon5ey .hon3o .hov5 .id4l .idol3 .im3m .im5pin .in1 .in3ci .ine2
.in2k .in3s .ir5r .is4i .ju3r .la4cy .la4m .lat5er .lath5 .le2 .leg5e .len4
.lep5 .lev1 .li4g .lig5a .li2n .li3o .li4t .mag5a5 .mal5o .man5a .mar5ti .me2
.mer3c .me5ter .mis1 .mist5i .mon3e .mo3ro .mu5ta .muta5b .ni4c .od2 .odd5
.of5te .or5ato .or3c .or1d .or3t .os3 .os4tl .oth3 .out3 .ped5al .pe5te .pe5tit
.pi4e .pio5n .pi2t .pre3m .ra4c .ran4t .ratio5na .ree2 .re5mit .res2 .re5stat
.ri4g .rit5u .ro4q .ros5t .row5d .ru4d .sci3e .self5 .sell5 .se2n .se5rie .sh2
.si2 .sing4 .st4 .sta5bl .sy2 .ta4 .te4 .ten5an .th2 .ti2 .til4 .tim5o5 .ting4
.tin5k .ton4a .to4p .top5i .tou5s .trib5ut .un1a .un3ce .under5 .un1e .un5k
.un5o .un3u .up3 .ure3 .us5a .ven4de .ve5ra .wil5i .ye4 4ab. a5bal a5ban abe2
ab5erd abi5a ab5it5ab ab5lat ab5o5liz 4abr ab5rog ab3ul a4car ac5ard ac5aro
a5ceou ac1er a5chet 4a2ci a3cie ac1in a3cio ac5rob act5if ac3ul ac4um a2d ad4din
ad5er. 2adi a3dia ad3ica adi4er a3dio a3dit a5diu ad4le ad3ow ad5ran ad4su 4adu
a3duc ad5um ae4r aeri4e a2f aff4 a4gab aga4n ag5ell age4o 4ageu ag1i 4ag4l ag1n
a2go 3agog ag3oni a5guer ag5ul a4gy a3ha a3he ah4l a3ho ai2 a5ia a3ic. ai5ly
a4i4n ain5in ain5o ait5en a1j ak1en al5ab al3ad a4lar 4aldi 2ale al3end a4lenti
a5le5o al1i al4ia. ali4e al5lev 4allic 4alm a5log. a4ly. 4alys 5a5lyst 5alyt
3alyz 4ama am5ab am3ag ama5ra am5asc a4matis a4m5ato am5era am3ic am5if am5ily
am1in ami4no a2mo a5mon amor5i amp5en a2n an3age 3analy a3nar an3arc anar4i
a3nati 4and ande4s an3dis an1dl an4dow a5nee a3nen an5est. a3neu 2ang ang5ie
an1gl a4n1ic a3nies an3i3f an4ime a5nimi a5nine an3io a3nip an3ish an3it a3niu
an4kli 5anniz ano4 an5ot anoth5 an2sa an4sco an4sn an2sp ans3po an4st an4sur
antal4 an4tie 4anto an2tr an4tw an3ua an3ul a5nur 4ao apar4 ap5at ap5ero a3pher
4aphi a4pilla ap5illar ap3in ap3ita a3pitu a2pl apoc5 ap5ola apor5i apos3t
aps5es a3pu aque5 2a2r ar3act a5rade ar5adis ar3al a5ramete aran4g ara3p ar4at
a5ratio ar5ativ a5rau ar5av4 araw4 arbal4 ar4chan ar5dine ar4dr ar5eas a3ree
ar3ent a5ress ar4fi ar4fl ar1i ar5ial ar3ian a3riet ar4im ar5inat ar3io ar2iz
ar2mi ar5o5d a5roni a3roo ar2p ar3q arre4 ar4sa ar2sh 4as. as4ab as3ant ashi4
a5sia. a3sib a3sic 5a5si4t ask3i as4l a4soc as5ph as4sh as3ten as1tr asur5a a2ta
at3abl at5ac at3alo at5ap ate5c at5ech at3ego at3en. at3era ater5n a5terna
at3est at5ev 4ath ath5em a5then at4ho ath5om 4ati. a5tia at5i5b at1ic at3if
ation5ar at3itu a4tog a2tom at5omiz a4top a4tos a1tr at5rop at4sk at4tag at5te
at4th a2tu at5ua at5ue at3ul at3ura a2ty au4b augh3 au3gu au4l2 aun5d au3r
au5sib aut5en au1th a2va av3ag a5van ave4no av3era av5ern av5ery av1i avi4er
av3ig av5oc a1vor 3away aw3i aw4ly aws4 ax4ic ax4id ay5al aye4 ays4 azi4er azz5i
5ba. bad5ger ba4ge bal1a ban5dag ban4e ban3i barbi5 bari4a bas4si 1bat ba4z 2b1b
b2be b3ber bbi4na 4b1d 4be. beak4 beat3 4be2d be3da be3de be3di be3gi be5gu 1bel
be1li be3lo 4be5m be5nig be5nu 4bes4 be3sp be5str 3bet bet5iz be5tr be3tw be3w
be5yo 2bf 4b3h bi2b bi4d 3bie bi5en bi4er 2b3if 1bil bi3liz bina5r4 bin4d bi5net
bi3ogr bi5ou bi2t 3bi3tio bi3tr 3bit5ua b5itz b1j bk4 b2l2 blath5 b4le. blen4
5blesp b3lis b4lo blun4t 4b1m 4b3n bne5g 3bod bod3i bo4e bol3ic bom4bi bon4a
bon5at 3boo 5bor. 4b1ora bor5d 5bore 5bori 5bos4 b5ota both5 bo4to bound3 4bp
4brit broth3 2b5s2 bsor4 2bt bt4l b4to b3tr buf4fer bu4ga bu3li bumi4 bu4n
bunt4i bu3re bus5ie buss4e 5bust 4buta 3butio b5uto b1v 4b5w 5by. bys4 1ca
cab3in ca1bl cach4 ca5den 4cag4 2c5ah ca3lat cal4la call5in 4calo can5d can4e
can4ic can5is can3iz can4ty cany4 ca5per car5om cast5er cas5tig 4casy ca4th
4cativ cav5al c3c ccha5 cci4a ccompa5 ccon4 ccou3t 2ce. 4ced. 4ceden 3cei 5cel.
3cell 1cen 3cenc 2cen4e 4ceni 3cent 3cep ce5ram 4cesa 3cessi ces5si5b ces5t cet4
c5e4ta cew4 2ch 4ch. 4ch3ab 5chanic ch5a5nis che2 cheap3 4ched che5lo 3chemi
ch5ene ch3er. ch3ers 4ch1in 5chine. ch5iness 5chini 5chio 3chit chi2z 3cho2
ch4ti 1ci 3cia ci2a5b cia5r ci5c 4cier 5cific. 4cii ci4la 3cili 2cim 2cin c4ina
3cinat cin3em c1ing c5ing. 5cino cion4 4cipe ci3ph 4cipic 4cista 4cisti 2c1it
cit3iz 5ciz ck1 ck3i 1c4l4 4clar c5laratio 5clare cle4m 4clic clim4 cly4 c5n 1co
co5ag coe2 2cog co4gr coi4 co3inc col5i 5colo col3or com5er con4a c4one con3g
con5t co3pa cop3ic co4pl 4corb coro3n cos4e cov1 cove4 cow5a coz5e co5zi c1q
cras5t 5crat. 5cratic cre3at 5cred 4c3reta cre4v cri2 cri5f c4rin cris4 5criti
cro4pl crop5o cros4e cru4d 4c3s2 2c1t cta4b ct5ang c5tant c2te c3ter c4ticu
ctim3i ctu4r c4tw cud5 c4uf c4ui cu5ity 5culi cul4tis 3cultu cu2ma c3ume cu4mi
3cun cu3pi cu5py cur5a4b cu5ria 1cus cuss4i 3c4ut cu4tie 4c5utiv 4cutr 1cy cze4
1d2a 5da. 2d3a4b dach4 4daf 2dag da2m2 dan3g dard5 dark5 4dary 3dat 4dativ 4dato
5dav4 dav5e 5day d1b d5c d1d4 2de. deaf5 deb5it de4bon decan4 de4cil de5com
2d1ed 4dee. de5if deli4e del5i5q de5lo d4em 5dem. 3demic dem5ic. de5mil de4mons
demor5 1den de4nar de3no denti5f de3nu de1p de3pa depi4 de2pu d3eq d4erh 5derm
dern5iz der5s des2 d2es. de1sc de2s5o des3ti de3str de4su de1t de2to de1v dev3il
4dey 4d1f d4ga d3ge4t dg1i d2gy d1h2 5di. 1d4i3a dia5b di4cam d4ice 3dict 3did
5di3en d1if di3ge di4lato d1in 1dina 3dine. 5dini di5niz 1dio dio5g di4pl dir2
di1re dirt5i dis1 5disi d4is3t d2iti 1di1v d1j d5k2 4d5la 3dle. 3dled 3dles.
4dless 2d3lo 4d5lu 2dly d1m 4d1n4 1do 3do. do5de 5doe 2d5of d4og do4la doli4
do5lor dom5iz do3nat doni4 doo3d dop4p d4or 3dos 4d5out do4v 3dox d1p 1dr
drag5on 4drai dre4 drea5r 5dren dri4b dril4 dro4p 4drow 5drupli 4dry 2d1s2 ds4p
d4sw d4sy d2th 1du d1u1a du2c d1uca duc5er 4duct. 4ducts du5el du4g d3ule dum4be
du4n 4dup du4pe d1v d1w d2y 5dyn dy4se dys5p e1a4b e3act ead1 ead5ie ea4ge
ea5ger ea4l eal5er eal3ou eam3er e5and ear3a ear4c ear5es ear4ic ear4il ear5k
ear2t eart3e ea5sp e3ass east3 ea2t eat5en eath3i e5atif e4a3tu ea2v eav3en
eav5i eav5o 2e1b e4bel. e4bels e4ben e4bit e3br e4cad ecan5c ecca5 e1ce ec5essa
ec2i e4cib ec5ificat ec5ifie ec5ify ec3im eci4t e5cite e4clam e4clus e2col
e4comm e4compe e4conc e2cor ec3ora eco5ro e1cr e4crem ec4tan ec4te e1cu e4cul
ec3ula 2e2da 4ed3d e4d1er ede4s 4edi e3dia ed3ib ed3ica ed3im ed1it edi5z 4edo
e4dol edon2 e4dri e4dul ed5ulo ee2c eed3i ee2f eel3i ee4ly ee2m ee4na ee4p1
ee2s4 eest4 ee4ty e5ex e1f e4f3ere 1eff e4fic 5efici efil4 e3fine ef5i5nite
3efit efor5es e4fuse. 4egal eger4 eg5ib eg4ic eg5ing e5git5 eg5n e4go. e4gos
eg1ul e5gur 5egy e1h4 eher4 ei2 e5ic ei5d eig2 ei5gl e3imb e3inf e1ing e5inst
eir4d eit3e ei3th e5ity e1j e4jud ej5udi eki4n ek4la e1la e4la. e4lac elan4d
el5ativ e4law elaxa4 e3lea el5ebra 5elec e4led el3ega e5len e4l1er e1les el2f
el2i e3libe e4l5ic. el3ica e3lier el5igib e5lim e4l3ing e3lio e2lis el5ish
e3liv3 4ella el4lab ello4 e5loc el5og el3op. el2sh el4ta e5lud el5ug e4mac e4mag
e5man em5ana em5b e1me e2mel e4met em3ica emi4e em5igra em1in2 em5ine em3i3ni
e4mis em5ish e5miss em3iz 5emniz emo4g emoni5o em3pi e4mul em5ula emu3n e3my
en5amo e4nant ench4er en3dic e5nea e5nee en3em en5ero en5esi en5est en3etr e3new
en5ics e5nie e5nil e3nio en3ish en3it e5niu 5eniz 4enn 4eno eno4g e4nos en3ov
en4sw ent5age 4enthes en3ua en5uf e3ny. 4en3z e5of eo2g e4oi4 e3ol eop3ar e1or
eo3re eo5rol eos4 e4ot eo4to e5out e5ow e2pa e3pai ep5anc e5pel e3pent ep5etitio
ephe4 e4pli e1po e4prec ep5reca e4pred ep3reh e3pro e4prob ep4sh ep5ti5b e4put
ep5uta e1q equi3l e4q3ui3s er1a era4b 4erand er3ar 4erati. 2erb er4bl er3ch
er4che 2ere. e3real ere5co ere3in er5el. er3emo er5ena er5ence 4erene er3ent
ere4q er5ess er3est eret4 er1h er1i e1ria4 5erick e3rien eri4er er3ine e1rio
4erit er4iu eri4v e4riva er3m4 er4nis 4ernit 5erniz er3no 2ero er5ob e5roc ero4r
er1ou er1s er3set ert3er 4ertl er3tw 4eru eru4t 5erwau e1s4a e4sage. e4sages
es2c e2sca es5can e3scr es5cu e1s2e e2sec es5ecr es5enc e4sert. e4serts e4serva
4esh e3sha esh5en e1si e2sic e2sid es5iden es5igna e2s5im es4i4n esis4te esi4u
e5skin es4mi e2sol es3olu e2son es5ona e1sp es3per es5pira es4pre 2ess es4si4b
estan4 es3tig es5tim 4es2to e3ston 2estr e5stro estruc5 e2sur es5urr es4w eta4b
eten4d e3teo ethod3 et1ic e5tide etin4 eti4no e5tir e5titio et5itiv 4etn et5ona
e3tra e3tre et3ric et5rif et3rog et5ros et3ua et5ym et5z 4eu e5un e3up eu3ro
eus4 eute4 euti5l eu5tr eva2p5 e2vas ev5ast e5vea ev3ell evel3o e5veng even4i
ev1er e5verb e1vi ev3id evi4l e4vin evi4v e5voc e5vu e1wa e4wag e5wee e3wh ewil5
ew3ing e3wit 1exp 5eyc 5eye. eys4 1fa fa3bl fab3r fa4ce 4fag fain4 fall5e 4fa4ma
fam5is 5far far5th fa3ta fa3the 4fato fault5 4f5b 4fd 4fe. feas4 feath3 fe4b
4feca 5fect 2fed fe3li fe4mo fen2d fend5e fer1 5ferr fev4 4f1f f4fes f4fie
f5fin. f2f5is f4fly f2fy 4fh 1fi fi3a 2f3ic. 4f3ical f3ican 4ficate f3icen
fi3cer fic4i 5ficia 5ficie 4fics fi3cu fi5del fight5 fil5i fill5in 4fily 2fin
5fina fin2d5 fi2ne f1in3g fin4n fis4ti f4l2 f5less flin4 flo3re f2ly5 4fm 4fn
1fo 5fon fon4de fon4t fo2r fo5rat for5ay fore5t for4i fort5a fos5 4f5p fra4t
f5rea fres5c fri2 fril4 frol5 2f3s 2ft f4to f2ty 3fu fu5el 4fug fu4min fu5ne
fu3ri fusi4 fus4s 4futa 1fy 1ga gaf4 5gal. 3gali ga3lo 2gam ga5met g5amo gan5is
ga3niz gani5za 4gano gar5n4 gass4 gath3 4gativ 4gaz g3b gd4 2ge. 2ged geez4
gel4in ge5lis ge5liz 4gely 1gen ge4nat ge5niz 4geno 4geny 1geo ge3om g4ery 5gesi
geth5 4geto ge4ty ge4v 4g1g2 g2ge g3ger gglu5 ggo4 gh3in gh5out gh4to 5gi. 1gi4a
gia5r g1ic 5gicia g4ico gien5 5gies. gil4 g3imen 3g4in. gin5ge 5g4ins 5gio 3gir
gir4l g3isl gi4u 5giv 3giz gl2 gla4 glad5i 5glas 1gle gli4b g3lig 3glo glo3r g1m
g4my gn4a g4na. gnet4t g1ni g2nin g4nio g1no g4non 1go 3go. gob5 5goe 3g4o4g
go3is gon2 4g3o3na gondo5 go3ni 5goo go5riz gor5ou 5gos. gov1 g3p 1gr 4grada
g4rai gran2 5graph. g5rapher 5graphic 4graphy 4gray gre4n 4gress. 4grit g4ro
gruf4 gs2 g5ste gth3 gu4a 3guard 2gue 5gui5t 3gun 3gus 4gu4t g3w 1gy 2g5y3n
gy5ra h3ab4l hach4 hae4m hae4t h5agu ha3la hala3m ha4m han4ci han4cy 5hand.
han4g hang5er hang5o h5a5niz han4k han4te hap3l hap5t ha3ran ha5ras har2d hard3e
har4le harp5en har5ter has5s haun4 5haz haz3a h1b 1head 3hear he4can h5ecat h4ed
he5do5 he3l4i hel4lis hel4ly h5elo hem4p he2n hena4 hen5at heo5r hep5 h4era
hera3p her4ba here5a h3ern h5erou h3ery h1es he2s5p he4t het4ed heu4 h1f h1h
hi5an hi4co high5 h4il2 himer4 h4ina hion4e hi4p hir4l hi3ro hir4p hir4r his3el
his4s hith5er hi2v 4hk 4h1l4 hlan4 h2lo hlo3ri 4h1m hmet4 2h1n h5odiz h5ods ho4g
hoge4 hol5ar 3hol4e ho4ma home3 hon4a ho5ny 3hood hoon4 hor5at ho5ris hort3e
ho5ru hos4e ho5sen hos1p 1hous house3 hov5el 4h5p 4hr4 hree5 hro5niz hro3po
4h1s2 h4sh h4tar ht1en ht5es h4ty hu4g hu4min hun5ke hun4t hus3t4 hu4t h1w
h4wart hy3pe hy3ph hy2s 2i1a i2al iam4 iam5ete i2an 4ianc ian3i 4ian4t ia5pe
iass4 i4ativ ia4tric i4atu ibe4 ib3era ib5ert ib5ia ib3in ib5it. ib5ite i1bl
ib3li i5bo i1br i2b5ri i5bun 4icam 5icap 4icar i4car. i4cara icas5 i4cay iccu4
4iceo 4ich 2ici i5cid ic5ina i2cip ic3ipa i4cly i2c5oc 4i1cr 5icra i4cry ic4te
ictu2 ic4t3ua ic3ula ic4um ic5uo i3cur 2id i4dai id5anc id5d ide3al ide4s i2di
id5ian idi4ar i5die id3io idi5ou id1it id5iu i3dle i4dom id3ow i4dr i2du id5uo
2ie4 ied4e 5ie5ga ield3 ien5a4 ien4e i5enn i3enti i1er. i3esc i1est i3et 4if.
if5ero iff5en if4fr 4ific. i3fie i3fl 4ift 2ig iga5b ig3era ight3i 4igi i3gib
ig3il ig3in ig3it i4g4l i2go ig3or ig5ot i5gre igu5i ig1ur i3h 4i5i4 i3j 4ik
i1la il3a4b i4lade i2l5am ila5ra i3leg il1er ilev4 il5f il1i il3ia il2ib il3io
il4ist 2ilit il2iz ill5ab 4iln il3oq il4ty il5ur il3v i4mag im3age ima5ry
imenta5r 4imet im1i im5ida imi5le i5mini 4imit im4ni i3mon i2mu im3ula 2in.
i4n3au 4inav incel4 in3cer 4ind in5dling 2ine i3nee iner4ar i5ness 4inga 4inge
in5gen 4ingi in5gling 4ingo 4ingu 2ini i5ni. i4nia in3io in1is i5nite. 5initio
in3ity 4ink 4inl 2inn 2i1no i4no4c ino4s i4not 2ins in3se insur5a 2int. 2in4th
in1u i5nus 4iny 2io 4io. ioge4 io2gr i1ol io4m ion3at ion4ery ion3i io5ph ior3i
i4os io5th i5oti io4to i4our 2ip ipe4 iphras4 ip3i ip4ic ip4re4 ip3ul i3qua
iq5uef iq3uid iq3ui3t 4ir i1ra ira4b i4rac ird5e ire4de i4ref i4rel4 i4res ir5gi
ir1i iri5de ir4is iri3tu 5i5r2iz ir4min iro4g 5iron. ir5ul 2is. is5ag is3ar
isas5 2is1c is3ch 4ise is3er 3isf is5han is3hon ish5op is3ib isi4d i5sis is5itiv
4is4k islan4 4isms i2so iso5mer is1p is2pi is4py 4is1s is4sal issen4 is4ses
is4ta. is1te is1ti ist4ly 4istral i2su is5us 4ita. ita4bi i4tag 4ita5m i3tan
i3tat 2ite it3era i5teri it4es 2ith i1ti 4itia 4i2tic it3ica 5i5tick it3ig
it5ill i2tim 2itio 4itis i4tism i2t5o5m 4iton i4tram it5ry 4itt it3uat i5tud
it3ul 4itz. i1u 2iv iv3ell iv3en. i4v3er. i4vers. iv5il. iv5io iv1it i5vore
iv3o3ro i4v3ot 4i5w ix4o 4iy 4izar izi4 5izont 5ja jac4q ja4p 1je jer5s 4jestie
4jesty jew3 jo4p 5judg 3ka. k3ab k5ag kais4 kal4 k1b k2ed 1kee ke4g ke5li k3en4d
k1er kes4 k3est. ke4ty k3f kh4 k1i 5ki. 5k2ic k4ill kilo5 k4im k4in. kin4de
k5iness kin4g ki4p kis4 k5ish kk4 k1l 4kley 4kly k1m k5nes 1k2no ko5r kosh4 k3ou
kro5n 4k1s2 k4sc ks4l k4sy k5t k1w lab3ic l4abo laci4 l4ade la3dy lag4n lam3o
3land lan4dl lan5et lan4te lar4g lar3i las4e la5tan 4lateli 4lativ 4lav la4v4a
2l1b lbin4 4l1c2 lce4 l3ci 2ld l2de ld4ere ld4eri ldi4 ld5is l3dr l4dri le2a
le4bi left5 5leg. 5legg le4mat lem5atic 4len. 3lenc 5lene. 1lent le3ph le4pr
lera5b ler4e 3lerg 3l4eri l4ero les2 le5sco 5lesq 3less 5less. l3eva lev4er.
lev4era lev4ers 3ley 4leye 2lf l5fr 4l1g4 l5ga lgar3 l4ges lgo3 2l3h li4ag li2am
liar5iz li4as li4ato li5bi 5licio li4cor 4lics 4lict. l4icu l3icy l3ida lid5er
3lidi lif3er l4iff li4fl 5ligate 3ligh li4gra 3lik 4l4i4l lim4bl lim3i li4mo
l4im4p l4ina 1l4ine lin3ea lin3i link5er li5og 4l4iq lis4p l1it l2it. 5litica
l5i5tics liv3er l1iz 4lj lka3 l3kal lka4t l1l l4law l2le l5lea l3lec l3leg l3lel
l3le4n l3le4t ll2i l2lin4 l5lina ll4o lloqui5 ll5out l5low 2lm l5met lm3ing
l4mod lmon4 2l1n2 3lo. lob5al lo4ci 4lof 3logic l5ogo 3logu lom3er 5long lon4i
l3o3niz lood5 5lope. lop3i l3opm lora4 lo4rato lo5rie lor5ou 5los. los5et
5losophiz 5losophy los4t lo4ta loun5d 2lout 4lov 2lp lpa5b l3pha l5phi lp5ing
l3pit l4pl l5pr 4l1r 2l1s2 l4sc l2se l4sie 4lt lt5ag ltane5 l1te lten4 ltera4
lth3i l5ties. ltis4 l1tr ltu2 ltur3a lu5a lu3br luch4 lu3ci lu3en luf4 lu5id
lu4ma 5lumi l5umn. 5lumnia lu3o luo3r 4lup luss4 lus3te 1lut l5ven l5vet4 2l1w
1ly 4lya 4lyb ly5me ly3no 2lys4 l5yse 1ma 2mab ma2ca ma5chine ma4cl mag5in 5magn
2mah maid5 4mald ma3lig ma5lin mal4li mal4ty 5mania man5is man3iz 4map ma5rine.
ma5riz mar4ly mar3v ma5sce mas4e mas1t 5mate math3 ma3tis 4matiza 4m1b mba4t5
m5bil m4b3ing mbi4v 4m5c 4me. 2med 4med. 5media me3die m5e5dy me2g mel5on mel4t
me2m mem1o3 1men men4a men5ac men4de 4mene men4i mens4 mensu5 3ment men4te me5on
m5ersa 2mes 3mesti me4ta met3al me1te me5thi m4etr 5metric me5trie me3try me4v
4m1f 2mh 5mi. mi3a mid4a mid4g mig4 3milia m5i5lie m4ill min4a 3mind m5inee
m4ingl min5gli m5ingly min4t m4inu miot4 m2is mis4er. mis5l mis4ti m5istry 4mith
m2iz 4mk 4m1l m1m mma5ry 4m1n mn4a m4nin mn4o 1mo 4mocr 5mocratiz mo2d1 mo4go
mois2 moi5se 4mok mo5lest mo3me mon5et mon5ge moni3a mon4ism mon4ist mo3niz
monol4 mo3ny. mo2r 4mora. mos2 mo5sey mo3sp moth3 m5ouf 3mous mo2v 4m1p mpara5
mpa5rab mpar5i m3pet mphas4 m2pi mpi4a mp5ies m4p1in m5pir mp5is mpo3ri mpos5ite
m4pous mpov5 mp4tr m2py 4m3r 4m1s2 m4sh m5si 4mt 1mu mula5r4 5mult multi3 3mum
mun2 4mup mu4u 4mw 1na 2n1a2b n4abu 4nac. na4ca n5act nag5er. nak4 na4li na5lia
4nalt na5mit n2an nanci4 nan4it nank4 nar3c 4nare nar3i nar4l n5arm n4as nas4c
nas5ti n2at na3tal nato5miz n2au nau3se 3naut nav4e 4n1b4 ncar5 n4ces. n3cha
n5cheo n5chil n3chis nc1in nc4it ncour5a n1cr n1cu n4dai n5dan n1de nd5est.
ndi4b n5d2if n1dit n3diz n5duc ndu4r nd2we 2ne. n3ear ne2b neb3u ne2c 5neck 2ned
ne4gat neg5ativ 5nege ne4la nel5iz ne5mi ne4mo 1nen 4nene 3neo ne4po ne2q n1er
nera5b n4erar n2ere n4er5i ner4r 1nes 2nes. 4nesp 2nest 4nesw 3netic ne4v n5eve
ne4w n3f n4gab n3gel nge4n4e n5gere n3geri ng5ha n3gib ng1in n5git n4gla ngov4
ng5sh n1gu n4gum n2gy 4n1h4 nha4 nhab3 nhe4 3n4ia ni3an ni4ap ni3ba ni4bl ni4d
ni5di ni4er ni2fi ni5ficat n5igr nik4 n1im ni3miz n1in 5nine. nin4g ni4o 5nis.
nis4ta n2it n4ith 3nitio n3itor ni3tr n1j 4nk2 n5kero n3ket nk3in n1kl 4n1l n5m
nme4 nmet4 4n1n2 nne4 nni3al nni4v nob4l no3ble n5ocl 4n3o2d 3noe 4nog noge4
nois5i no5l4i 5nologis 3nomic n5o5miz no4mo no3my no4n non4ag non5i n5oniz 4nop
5nop5o5li nor5ab no4rary 4nosc nos4e nos5t no5ta 1nou 3noun nov3el3 nowl3 n1p4
npi4 npre4c n1q n1r nru4 2n1s2 ns5ab nsati4 ns4c n2se n4s3es nsid1 nsig4 n2sl
ns3m n4soc ns4pe n5spi nsta5bl n1t nta4b nter3s nt2i n5tib nti4er nti2f n3tine
n4t3ing nti4p ntrol5li nt4s ntu3me nu1a nu4d nu5en nuf4fe n3uin 3nu3it n4um
nu1me n5umi 3nu4n n3uo nu3tr n1v2 n1w4 nym4 nyp4 4nz n3za 4oa oad3 o5a5les oard3
oas4e oast5e oat5i ob3a3b o5bar obe4l o1bi o2bin ob5ing o3br ob3ul o1ce och4
o3chet ocif3 o4cil o4clam o4cod oc3rac oc5ratiz ocre3 5ocrit octor5a oc3ula
o5cure od5ded od3ic odi3o o2do4 odor3 od5uct. od5ucts o4el o5eng o3er oe4ta o3ev
o2fi of5ite ofit4t o2g5a5r og5ativ o4gato o1ge o5gene o5geo o4ger o3gie 1o1gis
og3it o4gl o5g2ly 3ogniz o4gro ogu5i 1ogy 2ogyn o1h2 ohab5 oi2 oic3es oi3der
oiff4 oig4 oi5let o3ing oint5er o5ism oi5son oist5en oi3ter o5j 2ok o3ken ok5ie
o1la o4lan olass4 ol2d old1e ol3er o3lesc o3let ol4fi ol2i o3lia o3lice ol5id.
o3li4f o5lil ol3ing o5lio o5lis. ol3ish o5lite o5litio o5liv olli4e ol5ogiz
olo4r ol5pl ol2t ol3ub ol3ume ol3un o5lus ol2v o2ly om5ah oma5l om5atiz om2be
om4bl o2me om3ena om5erse o4met om5etry o3mia om3ic. om3ica o5mid om1in o5mini
5ommend omo4ge o4mon om3pi ompro5 o2n on1a on4ac o3nan on1c 3oncil 2ond on5do
o3nen on5est on4gu on1ic o3nio on1is o5niu on3key on4odi on3omy on3s onspi4
onspir5a onsu4 onten4 on3t4i ontif5 on5um onva5 oo2 ood5e ood5i oo4k oop3i o3ord
oost5 o2pa ope5d op1er 3opera 4operag 2oph o5phan o5pher op3ing o3pit o5pon
o4posi o1pr op1u opy5 o1q o1ra o5ra. o4r3ag or5aliz or5ange ore5a o5real or3ei
ore5sh or5est. orew4 or4gu 4o5ria or3ica o5ril or1in o1rio or3ity o3riu or2mi
orn2e o5rof or3oug or5pe 3orrh or4se ors5en orst4 or3thi or3thy or4ty o5rum o1ry
os3al os2c os4ce o3scop 4oscopi o5scr os4i4e os5itiv os3ito os3ity osi4u os4l
o2so os4pa os4po os2ta o5stati os5til os5tit o4tan otele4g ot3er. ot5ers o4tes
4oth oth5esi oth3i4 ot3ic. ot5ica o3tice o3tif o3tis oto5s ou2 ou3bl ouch5i
ou5et ou4l ounc5er oun2d ou5v ov4en over4ne over3s ov4ert o3vis oviti4 o5v4ol
ow3der ow3el ow5est ow1i own5i o4wo oy1a 1pa pa4ca pa4ce pac4t p4ad 5pagan
p3agat p4ai pain4 p4al pan4a pan3el pan4ty pa3ny pa1p pa4pu para5bl par5age
par5di 3pare par5el p4a4ri par4is pa2te pa5ter 5pathic pa5thy pa4tric pav4 3pay
4p1b pd4 4pe. 3pe4a pear4l pe2c 2p2ed 3pede 3pedi pedia4 ped4ic p4ee pee4d pek4
pe4la peli4e pe4nan p4enc pen4th pe5on p4era. pera5bl p4erag p4eri peri5st
per4mal perme5 p4ern per3o per3ti pe5ru per1v pe2t pe5ten pe5tiz 4pf 4pg 4ph.
phar5i phe3no ph4er ph4es. ph1ic 5phie ph5ing 5phisti 3phiz ph2l 3phob 3phone
5phoni pho4r 4phs ph3t 5phu 1phy pi3a pian4 pi4cie pi4cy p4id p5ida pi3de 5pidi
3piec pi3en pi4grap pi3lo pi2n p4in. pind4 p4ino 3pi1o pion4 p3ith pi5tha pi2tu
2p3k2 1p2l2 3plan plas5t pli3a pli5er 4plig pli4n ploi4 plu4m plum4b 4p1m 2p3n
po4c 5pod. po5em po3et5 5po4g poin2 5point poly5t po4ni po4p 1p4or po4ry 1pos
pos1s p4ot po4ta 5poun 4p1p ppa5ra p2pe p4ped p5pel p3pen p3per p3pet ppo5site
pr2 pray4e 5preci pre5co pre3em pref5ac pre4la pre3r p3rese 3press pre5ten pre3v
5pri4e prin4t3 pri4s pris3o p3roca prof5it pro3l pros3e pro1t 2p1s2 p2se ps4h
p4sib 2p1t pt5a4b p2te p2th pti3m ptu4r p4tw pub3 pue4 puf4 pul3c pu4m pu2n
pur4r 5pus pu2t 5pute put3er pu3tr put4ted put4tin p3w qu2 qua5v 2que. 3quer
3quet 2rab ra3bi rach4e r5acl raf5fi raf4t r2ai ra4lo ram3et r2ami rane5o ran4ge
r4ani ra5no rap3er 3raphy rar5c rare4 rar5ef 4raril r2as ration4 rau4t ra5vai
rav3el ra5zie r1b r4bab r4bag rbi2 rbi4f r2bin r5bine rb5ing. rb4o r1c r2ce
rcen4 r3cha rch4er r4ci4b rc4it rcum3 r4dal rd2i rdi4a rdi4er rdin4 rd3ing 2re.
re1al re3an re5arr 5reav re4aw r5ebrat rec5oll rec5ompe re4cre 2r2ed re1de
re3dis red5it re4fac re2fe re5fer. re3fi re4fy reg3is re5it re1li re5lu r4en4ta
ren4te re1o re5pin re4posi re1pu r1er4 r4eri rero4 re5ru r4es. re4spi ress5ib
res2t re5stal re3str re4ter re4ti4z re3tri reu2 re5uti rev2 re4val rev3el
r5ev5er. re5vers re5vert re5vil rev5olu re4wh r1f rfu4 r4fy rg2 rg3er r3get
r3gic rgi4n rg3ing r5gis r5git r1gl rgo4n r3gu rh4 4rh. 4rhal ri3a ria4b ri4ag
r4ib rib3a ric5as r4ice 4rici 5ricid ri4cie r4ico rid5er ri3enc ri3ent ri1er
ri5et rig5an 5rigi ril3iz 5riman rim5i 3rimo rim4pe r2ina 5rina. rin4d rin4e
rin4g ri1o 5riph riph5e ri2pl rip5lic r4iq r2is r4is. ris4c r3ish ris4p ri3ta3b
r5ited. rit5er. rit5ers rit3ic ri2tu rit5ur riv5el riv3et riv3i r3j r3ket rk4le
rk4lin r1l rle4 r2led r4lig r4lis rl5ish r3lo4 r1m rma5c r2me r3men rm5ers
rm3ing r4ming. r4mio r3mit r4my r4nar r3nel r4ner r5net r3ney r5nic r1nis4 r3nit
r3niv rno4 r4nou r3nu rob3l r2oc ro3cr ro4e ro1fe ro5fil rok2 ro5ker 5role.
rom5ete rom4i rom4p ron4al ron4e ro5n4is ron4ta 1room 5root ro3pel rop3ic ror3i
ro5ro ros5per ros4s ro4the ro4ty ro4va rov5el rox5 r1p r4pea r5pent rp5er. r3pet
rp4h4 rp3ing r3po r1r4 rre4c rre4f r4reo rre4st rri4o rri4v rron4 rros4 rrys4
4rs2 r1sa rsa5ti rs4c r2se r3sec rse4cr rs5er. rs3es rse5v2 r1sh r5sha r1si
r4si4b rson3 r1sp r5sw rtach4 r4tag r3teb rten4d rte5o r1ti rt5ib rti4d r4tier
r3tig rtil3i rtil4l r4tily r4tist r4tiv r3tri rtroph4 rt4sh ru3a ru3e4l ru3en
ru4gl ru3in rum3pl ru2n runk5 run4ty r5usc ruti5n rv4e rvel4i r3ven rv5er.
r5vest r3vey r3vic rvi4v r3vo r1w ry4c 5rynge ry3t sa2 2s1ab 5sack sac3ri s3act
5sai salar4 sal4m sa5lo sal4t 3sanc san4de s1ap sa5ta 5sa3tio sat3u sau4 sa5vor
5saw 4s5b scan4t5 sca4p scav5 s4ced 4scei s4ces sch2 s4cho 3s4cie 5scin4d scle5
s4cli scof4 4scopy scour5a s1cu 4s5d 4se. se4a seas4 sea5w se2c3o 3sect 4s4ed
se4d4e s5edl se2g seg3r 5sei se1le 5self 5selv 4seme se4mol sen5at 4senc sen4d
s5ened sen5g s5enin 4sentd 4sentl sep3a3 4s1er. s4erl ser4o 4servo s1e4s se5sh
ses5t 5se5um 5sev sev3en sew4i 5sex 4s3f 2s3g s2h 2sh. sh1er 5shev sh1in sh3io
3ship shiv5 sho4 sh5old shon3 shor4 short5 4shw si1b s5icc 3side. 5sides 5sidi
si5diz 4signa sil4e 4sily 2s1in s2ina 5sine. s3ing 1sio 5sion sion5a si2r sir5a
1sis 3sitio 5siu 1siv 5siz sk2 4ske s3ket sk5ine sk5ing s1l2 s3lat s2le slith5
2s1m s3ma small3 sman3 smel4 s5men 5smith smol5d4 s1n4 1so so4ce soft3 so4lab
sol3d2 so3lic 5solv 3som 3s4on. sona4 son4g s4op 5sophic s5ophiz s5ophy sor5c
sor5d 4sov so5vi 2spa 5spai spa4n spen4d 2s5peo 2sper s2phe 3spher spho5 spil4
sp5ing 4spio s4ply s4pon spor4 4spot squal4l s1r 2ss s1sa ssas3 s2s5c s3sel
s5seng s4ses. s5set s1si s4sie ssi4er ss5ily s4sl ss4li s4sn sspend4 ss2t ssur5a
ss5w 2st. s2tag s2tal stam4i 5stand s4ta4p 5stat. s4ted stern5i s5tero ste2w
stew5a s3the st2i s4ti. s5tia s1tic 5stick s4tie s3tif st3ing 5stir s1tle 5stock
stom3a 5stone s4top 3store st4r s4trad 5stratu s4tray s4trid 4stry 4st3w s2ty
1su su1al su4b3 su2g3 su5is suit3 s4ul su2m sum3i su2n su2r 4sv sw2 4swo s4y
4syc 3syl syn5o sy5rin 1ta 3ta. 2tab ta5bles 5taboliz 4taci ta5do 4taf4 tai5lo
ta2l ta5la tal5en tal3i 4talk tal4lis ta5log ta5mo tan4de tanta3 ta5per ta5pl
tar4a 4tarc 4tare ta3riz tas4e ta5sy 4tatic ta4tur taun4 tav4 2taw tax4is 2t1b
4tc t4ch tch5et 4t1d 4te. tead4i 4teat tece4 5tect 2t1ed te5di 1tee teg4 te5ger
te5gi 3tel. teli4 5tels te2ma2 tem3at 3tenan 3tenc 3tend 4tenes 1tent ten4tag
1teo te4p te5pe ter3c 5ter3d 1teri ter5ies ter3is teri5za 5ternit ter5v 4tes.
4tess t3ess. teth5e 3teu 3tex 4tey 2t1f 4t1g 2th. than4 th2e 4thea th3eas the5at
the3is 3thet th5ic. th5ica 4thil 5think 4thl th5ode 5thodic 4thoo thor5it
tho5riz 2ths 1tia ti4ab ti4ato 2ti2b 4tick t4ico t4ic1u 5tidi 3tien tif2 ti5fy
2tig 5tigu till5in 1tim 4timp tim5ul 2t1in t2ina 3tine. 3tini 1tio ti5oc tion5ee
5tiq ti3sa 3tise tis4m ti5so tis4p 5tistica ti3tl ti4u 1tiv tiv4a 1tiz ti3za
ti3zen 2tl t5la tlan4 3tle. 3tled 3tles. t5let. t5lo 4t1m tme4 2t1n2 1to to3b
to5crat 4todo 2tof to2gr to5ic to2ma tom4b to3my ton4ali to3nat 4tono 4tony
to2ra to3rie tor5iz tos2 5tour 4tout to3war 4t1p 1tra tra3b tra5ch traci4
trac4it trac4te tras4 tra5ven trav5es5 tre5f tre4m trem5i 5tria tri5ces 5tricia
4trics 2trim tri4v tro5mi tron5i 4trony tro5phe tro3sp tro3v tru5i trus4 4t1s2
t4sc tsh4 t4sw 4t3t2 t4tes t5to ttu4 1tu tu1a tu3ar tu4bi tud2 4tue 4tuf4 5tu3i
3tum tu4nis 2t3up. 3ture 5turi tur3is tur5o tu5ry 3tus 4tv tw4 4t1wa twis4 4two
1ty 4tya 2tyl type3 ty5ph 4tz tz4e 4uab uac4 ua5na uan4i uar5ant uar2d uar3i
uar3t u1at uav4 ub4e u4bel u3ber u4bero u1b4i u4b5ing u3ble. u3ca uci4b uc4it
ucle3 u3cr u3cu u4cy ud5d ud3er ud5est udev4 u1dic ud3ied ud3ies ud5is u5dit
u4don ud4si u4du u4ene uens4 uen4te uer4il 3ufa u3fl ugh3en ug5in 2ui2 uil5iz
ui4n u1ing uir4m uita4 uiv3 uiv4er. u5j 4uk u1la ula5b u5lati ulch4 5ulche
ul3der ul4e u1len ul4gi ul2i u5lia ul3ing ul5ish ul4lar ul4li4b ul4lis 4ul3m
u1l4o 4uls uls5es ul1ti ultra3 4ultu u3lu ul5ul ul5v um5ab um4bi um4bly u1mi
u4m3ing umor5o um2p unat4 u2ne un4er u1ni un4im u2nin un5ish uni3v un3s4 un4sw
unt3ab un4ter. un4tes unu4 un5y un5z u4ors u5os u1ou u1pe uper5s u5pia up3ing
u3pl up3p upport5 upt5ib uptu4 u1ra 4ura. u4rag u4ras ur4be urc4 ur1d ure5at
ur4fer ur4fr u3rif uri4fic ur1in u3rio u1rit ur3iz ur2l url5ing. ur4no uros4
ur4pe ur4pi urs5er ur5tes ur3the urti4 ur4tie u3ru 2us u5sad u5san us4ap usc2
us3ci use5a u5sia u3sic us4lin us1p us5sl us5tere us1tr u2su usur4 uta4b u3tat
4ute. 4utel 4uten uten4i 4u1t2i uti5liz u3tine ut3ing ution5a u4tis 5u5tiz u4t1l
ut5of uto5g uto5matic u5ton u4tou uts4 u3u uu4m u1v2 uxu3 uz4e 1va 5va. 2v1a4b
vac5il vac3u vag4 va4ge va5lie val5o val1u va5mo va5niz va5pi var5ied 3vat 4ve.
4ved veg3 v3el. vel3li ve4lo v4ely ven3om v5enue v4erd 5vere. v4erel v3eren
ver5enc v4eres ver3ie vermi4n 3verse ver3th v4e2s 4ves. ves4te ve4te vet3er
ve4ty vi5ali 5vian 5vide. 5vided 4v3iden 5vides 5vidi v3if vi5gn vik4 2vil
5vilit v3i3liz v1in 4vi4na v2inc vin5d 4ving vio3l v3io4r vi1ou vi4p vi5ro
vis3it vi3so vi3su 4viti vit3r 4vity 3viv 5vo. voi4 3vok vo4la v5ole 5volt 3volv
vom5i vor5ab vori4 vo4ry vo4ta 4votee 4vv4 v4y w5abl 2wac wa5ger wag5o wait5
w5al. wam4 war4t was4t wa1te wa5ver w1b wea5rie weath3 wed4n weet3 wee5v wel4l
w1er west3 w3ev whi4 wi2 wil2 will5in win4de win4g wir4 3wise with3 wiz5 w4k
wl4es wl3in w4no 1wo2 wom1 wo5ven w5p wra4 wri4 writa4 w3sh ws4l ws4pe w5s4t 4wt
wy4 x1a xac5e x4ago xam3 x4ap xas5 x3c2 x1e xe4cuto x2ed xer4i xe5ro x1h xhi2
xhil5 xhu4 x3i xi5a xi5c xi5di x4ime xi5miz x3o x4ob x3p xpan4d xpecto5 xpe3d
x1t2 x3ti x1u xu3a xx4 y5ac 3yar4 y5at y1b y1c y2ce yc5er y3ch ych4e ycom4 ycot4
y1d y5ee y1er y4erf yes4 ye4t y5gi 4y3h y1i y3la ylla5bl y3lo y5lu ymbol5 yme4
ympa3 yn3chr yn5d yn5g yn5ic 5ynx y1o4 yo5d y4o5g yom4 yo5net y4ons y4os y4ped
yper5 yp3i y3po y4poc yp2ta y5pu yra5m yr5ia y3ro yr4r ys4c y3s2e ys3ica ys3io
3ysis y4so yss4 ys1t ys3ta ysur4 y3thin yt3ic y1w za1 z5a2b zar2 4zb 2ze ze4n
ze4p z1er ze3ro zet4 2z1i z4il z4is 5zl 4zm 1zo zo4m zo5ol zte4 4z1z2 z4zy
"""
# Extra patterns, from ushyphmax.tex, dated 2005-05-30.
# Copyright (C) 1990, 2004, 2005 Gerard D.C. Kuiken.
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
#
# These patterns are based on the Hyphenation Exception Log
# published in TUGboat, Volume 10 (1989), No. 3, pp. 337-341,
# and a large number of incorrectly hyphenated words not yet published.
"""
.con5gr .de5riva .dri5v4 .eth1y6l1 .eu4ler .ev2 .ever5si5b .ga4s1om1 .ge4ome
.ge5ot1 .he3mo1 .he3p6a .he3roe .in5u2t .kil2n3i .ko6r1te1 .le6ices .me4ga1l
.met4ala .mim5i2c1 .mi1s4ers .ne6o3f .noe1th .non1e2m .poly1s .post1am .pre1am
.rav5en1o .semi5 .sem4ic .semid6 .semip4 .semir4 .sem6is4 .semiv4 .sph6in1
.spin1o .ta5pes1tr .te3legr .to6pog .to2q .un3at5t .un5err5 .vi2c3ar .we2b1l
.re1e4c a5bolic a2cabl af6fish am1en3ta5b anal6ys ano5a2c ans5gr ans3v anti1d
an3ti1n2 anti1re a4pe5able ar3che5t ar2range as5ymptot ath3er1o1s at6tes.
augh4tl au5li5f av3iou back2er. ba6r1onie ba1thy bbi4t be2vie bi5d2if bil2lab
bio5m bi1orb bio1rh b1i3tive blan2d1 blin2d1 blon2d2 bor1no5 bo2t1u1l brus4q
bus6i2er bus6i2es buss4ing but2ed. but4ted cad5e1m cat1a1s2 4chs. chs3hu chie5vo
cig3a3r cin2q cle4ar co6ph1o3n cous2ti cri3tie croc1o1d cro5e2co c2tro3me6c
1cu2r1ance 2d3alone data1b dd5a5b d2d5ib de4als. de5clar1 de2c5lina de3fin3iti
de2mos des3ic de2tic dic1aid dif5fra 3di1methy di2ren di2rer 2d1lead 2d1li2e
3do5word dren1a5l drif2t1a d1ri3pleg5 drom3e5d d3tab du2al. du1op1o1l ea4n3ies
e3chas edg1l ed1uling eli2t1is e1loa en1dix eo3grap 1e6p3i3neph1 e2r3i4an.
e3spac6i eth1y6l1ene 5eu2clid1 feb1rua fermi1o 3fich fit5ted. fla1g6el flow2er.
3fluor gen2cy. ge3o1d ght1we g1lead get2ic. 4g1lish 5glo5bin 1g2nac gnet1ism
gno5mo g2n1or. g2noresp 2g1o4n3i1za graph5er. griev1 g1utan hair1s ha2p3ar5r
hatch1 hex2a3 hite3sid h3i5pel1a4 hnau3z ho6r1ic. h2t1eou hypo1tha id4ios
ifac1et ign4it ignit1er i4jk im3ped3a infra1s2 i5nitely. irre6v3oc i1tesima
ith5i2l itin5er5ar janu3a japan1e2s je1re1m 1ke6ling 1ki5netic 1kovian k3sha
la4c3i5e lai6n3ess lar5ce1n l3chai l3chil6d1 lead6er. lea4s1a 1lec3ta6b
le3g6en2dre 1le1noid lith1o5g ll1fl l2l3ish l5mo3nell lo1bot1o1 lo2ges. load4ed.
load6er. l3tea lth5i2ly lue1p 1lunk3er 1lum5bia. 3lyg1a1mi ly5styr ma1la1p m2an.
man3u1sc mar1gin1 medi2c med3i3cin medio6c1 me3gran3 m2en. 3mi3da5b 3milita
mil2l1ag mil5li5li mi6n3is. mi1n2ut1er mi1n2ut1est m3ma1b 5maph1ro1 5moc1ra1t
mo5e2las mol1e5c mon4ey1l mono3ch mo4no1en moro6n5is mono1s6 moth4et2 m1ou3sin
m5shack2 mu2dro mul2ti5u n3ar4chs. n3ch2es1t ne3back 2ne1ski n1dieck nd3thr
nfi6n3ites 4n5i4an. nge5nes ng1ho ng1spr nk3rup n5less 5noc3er1os nom1a6l
nom5e1no n1o1mist non1eq non1i4so 5nop1oly. no1vemb ns5ceiv ns4moo ntre1p
obli2g1 o3chas odel3li odit1ic oerst2 oke1st o3les3ter oli3gop1o1 o1lo3n4om
o3mecha6 onom1ic o3norma o3no2t1o3n o3nou op1ism. or4tho3ni4t orth1ri or5tively
o4s3pher o5test1er o5tes3tor oth3e1o1s ou3ba3do o6v3i4an. oxi6d1ic pal6mat
parag6ra4 par4a1le param4 para3me pee2v1 phi2l3ant phi5lat1e3l pi2c1a3d pli2c1ab
pli5nar poin3ca 1pole. poly1e po3lyph1ono 1prema3c pre1neu pres2pli pro2cess
proc3i3ty. pro2g1e 3pseu2d pseu3d6o3d2 pseu3d6o3f2 pto3mat4 p5trol3 pu5bes5c
quain2t1e qu6a3si3 quasir6 quasis6 quin5tes5s qui3v4ar r1abolic 3rab1o1loi
ra3chu r3a3dig radi1o6g r2amen 3ra4m5e1triz ra3mou ra5n2has ra1or r3bin1ge
re2c3i1pr rec5t6ang re4t1ribu r3ial. riv1o1l 6rk. rk1ho r1krau 6rks. r5le5qu
ro1bot1 ro5e2las ro5epide1 ro3mesh ro1tron r3pau5li rse1rad1i r1thou r1treu
r1veil rz1sc sales3c sales5w 5sa3par5il sca6p1er sca2t1ol s4chitz schro1ding1
1sci2utt scrap4er. scy4th1 sem1a1ph se3mes1t se1mi6t5ic sep3temb shoe1st sid2ed.
side5st side5sw si5resid sky1sc 3slova1kia 3s2og1a1my so2lute 3s2pace 1s2pacin
spe3cio spher1o spi2c1il spokes5w sports3c sports3w s3qui3to s2s1a3chu1 ss3hat
s2s3i4an. s5sign5a3b 1s2tamp s2t1ant5shi star3tli sta1ti st5b 1stor1ab strat1a1g
strib5ut st5scr stu1pi4d1 styl1is su2per1e6 1sync 1syth3i2 swimm6 5tab1o1lism
ta3gon. talk1a5 t1a1min t6ap6ath 5tar2rh tch1c tch3i1er t1cr teach4er. tele2g
tele1r6o 3ter1gei ter2ic. t3ess2es tha4l1am tho3don th1o5gen1i tho1k2er thy4l1an
thy3sc 2t3i4an. ti2n3o1m t1li2er tolo2gy tot3ic trai3tor1 tra1vers travers3a3b
treach1e tr4ial. 3tro1le1um trof4ic. tro3fit tro1p2is 3trop1o5les 3trop1o5lis
t1ro1pol3it tsch3ie ttrib1ut1 turn3ar t1wh ty2p5al ua3drati uad1ratu u5do3ny
uea1m u2r1al. uri4al. us2er. v1ativ v1oir5du1 va6guer vaude3v 1verely. v1er1eig
ves1tite vi1vip3a3r voice1p waste3w6a2 wave1g4 w3c week1n wide5sp wo4k1en
wrap3aro writ6er. x1q xquis3 y5che3d ym5e5try y1stro yes5ter1y z3ian. z3o1phr
z2z3w
""")
exceptions = """
as-so-ciate as-so-ciates dec-li-na-tion oblig-a-tory phil-an-thropic present
presents project projects reci-procity re-cog-ni-zance ref-or-ma-tion
ret-ri-bu-tion ta-ble
"""
hyphenator = Hyphenator(patterns, exceptions)
hyphenate_word = hyphenator.hyphenate_word
del patterns
del exceptions
| 68.042226 | 101 | 0.799154 |
acf081832315713ad9f800dd9969f7ff8a0e8d07 | 482 | py | Python | solvers/solver.py | Noah-Huppert/keep-talking-solver | d6e6b4f4be53f2c2ce76db18cb0682b77863593a | [
"MIT"
] | null | null | null | solvers/solver.py | Noah-Huppert/keep-talking-solver | d6e6b4f4be53f2c2ce76db18cb0682b77863593a | [
"MIT"
] | null | null | null | solvers/solver.py | Noah-Huppert/keep-talking-solver | d6e6b4f4be53f2c2ce76db18cb0682b77863593a | [
"MIT"
] | null | null | null | class Solver:
""" Abstract class for loading and running module solvers
"""
def solve(self):
""" Logic for solving the module
"""
raise NotImplementedError
def man_version(self) -> str:
""" Returns the bomb defusal manual version the solver is for
"""
raise NotImplementedError
def __str__(self) -> str:
""" Returns the name of the module the solver solves
"""
raise NotImplementedError
| 25.368421 | 69 | 0.603734 |
acf08241a362f92ea543666b1f8093d257afff87 | 261 | py | Python | geneGroove_project/manage.py | bobbybabra/codeGuild | 0b49dbb6a0f113e1179e28fd36d59e7c327c7d31 | [
"BSD-2-Clause"
] | null | null | null | geneGroove_project/manage.py | bobbybabra/codeGuild | 0b49dbb6a0f113e1179e28fd36d59e7c327c7d31 | [
"BSD-2-Clause"
] | null | null | null | geneGroove_project/manage.py | bobbybabra/codeGuild | 0b49dbb6a0f113e1179e28fd36d59e7c327c7d31 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geneGroove_project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.727273 | 82 | 0.781609 |
acf082b3ac11762ffbdb3a5e4f34ab355bd50702 | 3,303 | py | Python | app/api/images/mutation.py | lunyamwis/laylinks-bend | 04ff9ebb5da482e5b2642a89654a5b5f0128eaaa | [
"MIT"
] | null | null | null | app/api/images/mutation.py | lunyamwis/laylinks-bend | 04ff9ebb5da482e5b2642a89654a5b5f0128eaaa | [
"MIT"
] | null | null | null | app/api/images/mutation.py | lunyamwis/laylinks-bend | 04ff9ebb5da482e5b2642a89654a5b5f0128eaaa | [
"MIT"
] | null | null | null | import graphene
from graphql_extensions.auth.decorators import login_required
from ..helpers.permission_required import role_required, token_required
from ..helpers.validation_errors import error_dict
from ..helpers.constants import SUCCESS_ACTION
from .models import (
Images,Tags,Category,Premium
)
from .validators.validate_input import ImageValidations
from .object_types import (
ImageCategoryInput,ImageCategoryType,
ImageTagsInput,ImageTagsType,
ImagesInput,ImageType,
ImagePremiumType,ImagePremiumInput
)
from datetime import datetime
class CreateImages(graphene.Mutation):
'''Handle creation of a user and saving to the db'''
# items that the mutation will return
image = graphene.Field(ImageType)
status = graphene.String()
message = graphene.String()
class Arguments:
'''Arguments to be passed in during the user creation'''
input = ImagesInput(required=True)
@staticmethod
@token_required
@login_required
def mutate(self, info, **kwargs):
'''Mutation for user creation. Actual saving happens here'''
error_msg = error_dict['admin_only'].format("create a image")
role_required(info.context.user, ['admin', 'manager'], error_msg)
validator = ImageValidations()
data = validator.validate_image_data(
kwargs.get("input", ''))
tags = data.pop("tags",[])
categories = data.pop("categories",[])
new_image = Images(**data)
new_image.save()
for tag in tags:
tag_ = Tags.objects.get(id=tag)
new_image.tags.add(tag_)
for category in categories:
category_ = Category.objects.get(id=category)
new_image.categories.add(category_)
return CreateImages(status="Success", image=new_image,
message=SUCCESS_ACTION.format("Image created"))
class CreatePremiumImages(graphene.Mutation):
'''Handle addition of a book and handle saving it to the db'''
# items that the mutation will return
premium_image = graphene.Field(ImagePremiumType)
status = graphene.String()
message = graphene.String()
class Arguments:
'''Arguments to be passed in during the book creation'''
input = ImagePremiumInput(required=True)
@staticmethod
@token_required
@login_required
def mutate(self, info, **kwargs):
'''Mutation for user creation. Actual saving happens here'''
error_msg = error_dict['admin_only'].format("Add Premium Image")
role_required(info.context.user, ['admin', 'manager'], error_msg)
validator = ImageValidations()
data = validator.validate_premium_image_data(
kwargs.get("input", ''))
images = data.pop("content",[])
new_premium_image = Premium(**data)
new_premium_image.save()
for image in images:
image_ = Images(**image)
new_premium_image.content.add(image_)
return CreatePremiumImages(status="Success", premium_image=new_premium_image,
message=SUCCESS_ACTION.format("Premium Images added"))
class Mutation(graphene.ObjectType):
create_images = CreateImages.Field()
create_premium_images = CreatePremiumImages.Field() | 36.7 | 90 | 0.675749 |
acf087aef02dd5191890cc12920c39974e802553 | 1,032 | py | Python | api/routers/tracker.py | dok529/smart-social-distancing | fd054c92cf478cefd5326c7beaa288b24dd5110f | [
"Apache-2.0"
] | 113 | 2020-05-22T10:54:44.000Z | 2022-03-22T13:43:38.000Z | api/routers/tracker.py | dok529/smart-social-distancing | fd054c92cf478cefd5326c7beaa288b24dd5110f | [
"Apache-2.0"
] | 55 | 2020-05-20T20:16:40.000Z | 2021-10-13T10:00:56.000Z | api/routers/tracker.py | myunyui22/smart-social-distancing-dev | 2b71c4330420758a3ff6833923cf2ef81cdebdb1 | [
"Apache-2.0"
] | 37 | 2020-05-24T00:48:48.000Z | 2022-02-28T14:58:13.000Z | from fastapi import APIRouter
from typing import Optional
from api.models.tracker import TrackerDTO
from api.utils import (extract_config, handle_response, update_config,
map_section_from_config, map_to_config_file_format)
tracker_router = APIRouter()
@tracker_router.get("", response_model=TrackerDTO)
def get_tracker_config():
"""
Returns the tracker configuration of the processor
"""
return map_section_from_config("Tracker", extract_config())
@tracker_router.put("", response_model=TrackerDTO)
def update_tracker_config(tracker: TrackerDTO, reboot_processor: Optional[bool] = True):
"""
Updates the tracker configuration of the processor
"""
config_dict = extract_config()
tracker_dict = map_to_config_file_format(tracker)
config_dict["Tracker"] = tracker_dict
success = update_config(config_dict, reboot_processor)
if not success:
return handle_response(tracker_dict, success)
return map_section_from_config("Tracker", extract_config())
| 33.290323 | 88 | 0.754845 |
acf087ecf62d02745b03d513744002ca3c4128ed | 875 | py | Python | setup.py | itsrainingmani/py-ulid | f9f040e76dcd0da5fa3e7ae465438a234361b27c | [
"MIT"
] | 3 | 2019-09-04T14:31:00.000Z | 2020-06-05T08:32:48.000Z | setup.py | itsrainingmani/py-ulid | f9f040e76dcd0da5fa3e7ae465438a234361b27c | [
"MIT"
] | 2 | 2020-03-31T03:27:42.000Z | 2021-02-02T22:13:22.000Z | setup.py | itsrainingmani/py-ulid | f9f040e76dcd0da5fa3e7ae465438a234361b27c | [
"MIT"
] | null | null | null | import pathlib
import setuptools
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The contents of the Readme file
README = (HERE / "README.md").read_text()
setuptools.setup(
name="py-ulid",
version="1.0.3",
description="Python library that provides an implementation of the ULID Specification",
long_description=README,
long_description_content_type='text/markdown',
author="Manikandan Sundararajan",
author_email="me@tsmanikandan.com",
license="MIT",
url="https://github.com/tsmanikandan/py-ulid",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(exclude=("tests",)),
include_package_data=True,
)
| 29.166667 | 91 | 0.688 |
acf08888c7c5df1ab93530362633468ca4d45fe5 | 530 | py | Python | packages/python/plotly/plotly/validators/layout/scene/aspectratio/_x.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/layout/scene/aspectratio/_x.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/layout/scene/aspectratio/_x.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="x", parent_name="layout.scene.aspectratio", **kwargs
):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"^aspectmode": "manual"}),
min=kwargs.pop("min", 0),
**kwargs,
)
| 33.125 | 81 | 0.624528 |
acf08893fc3a7502f685684420e1f9abe3d4cd0b | 481 | py | Python | src/dataset_processing.py | Ophien/gpt-2-grpc-server | b464c0c487e5bfa8a695d45d8af27cc402b0d956 | [
"MIT"
] | 1 | 2020-01-21T13:53:19.000Z | 2020-01-21T13:53:19.000Z | src/dataset_processing.py | Ophien/gpt-2-grpc-server | b464c0c487e5bfa8a695d45d8af27cc402b0d956 | [
"MIT"
] | 1 | 2020-01-28T23:15:31.000Z | 2020-01-28T23:15:31.000Z | src/dataset_processing.py | Ophien/gpt-2-grpc-server | b464c0c487e5bfa8a695d45d8af27cc402b0d956 | [
"MIT"
] | null | null | null | import re
import sys
def process_dataset(path):
dataset = open(path, "r+")
dataset_string = dataset.read()
# remove unwanted referencing for papers and wikipedia text
dataset_string = re.sub(r"\\|\'|\*|\_|\{.*\}|\#|\+|\-*|\$|\"|\[.*\](:([0-9]*-*[0-9]*))*|\(.*\)", '', dataset_string)
dataset_string = re.sub(r"[ \f\t\v]$", '', dataset_string)
dataset_string
dataset.seek(0)
dataset.write(dataset_string)
dataset.truncate()
dataset.close() | 30.0625 | 120 | 0.592516 |
acf089674de137ed17e6a7b8400f0559b2d27545 | 409 | py | Python | core/tests.py | uktrade/lite-exporter-frontend | cf42ac37a21236486aa303c8935c44a7eba91ef5 | [
"MIT"
] | 3 | 2019-05-31T06:36:17.000Z | 2020-02-12T16:02:24.000Z | core/tests.py | uktrade/lite-exporter-frontend | cf42ac37a21236486aa303c8935c44a7eba91ef5 | [
"MIT"
] | 33 | 2019-03-28T10:20:14.000Z | 2020-07-16T15:12:43.000Z | core/tests.py | uktrade/lite-exporter-frontend | cf42ac37a21236486aa303c8935c44a7eba91ef5 | [
"MIT"
] | 1 | 2019-05-01T15:52:02.000Z | 2019-05-01T15:52:02.000Z | from django.test import TestCase
from core.helpers import convert_parameters_to_query_params
class TestHelpers(TestCase):
def test_convert_parameters_to_query_params(self):
params = {"request": "request", "org_type": ["individual", "commercial"], "page": 1, "empty": None}
self.assertEqual(convert_parameters_to_query_params(params), "?org_type=individual&org_type=commercial&page=1")
| 37.181818 | 119 | 0.760391 |
acf08972861f5ccf70588d2ad76a104456027684 | 1,139 | py | Python | test-examples/add_shapes2.py | tlambert03/image-demos | a2974bcc7f040fd4d14e659c4cbfeabcf726c707 | [
"BSD-3-Clause"
] | null | null | null | test-examples/add_shapes2.py | tlambert03/image-demos | a2974bcc7f040fd4d14e659c4cbfeabcf726c707 | [
"BSD-3-Clause"
] | null | null | null | test-examples/add_shapes2.py | tlambert03/image-demos | a2974bcc7f040fd4d14e659c4cbfeabcf726c707 | [
"BSD-3-Clause"
] | null | null | null | """
Display one shapes layer ontop of one image layer using the add_shapes and
add_image APIs. When the window is closed it will print the coordinates of
your shapes.
"""
import numpy as np
from skimage import data
import napari
# create the list of polygons
triangle = np.array([[11, 13], [111, 113], [22, 246]])
person = np.array([[505, 60], [402, 71], [383, 42], [251, 95], [212, 59],
[131, 137], [126, 187], [191, 204], [171, 248], [211, 260],
[273, 243], [264, 225], [430, 173], [512, 160]])
building = np.array([[310, 382], [229, 381], [209, 401], [221, 411],
[258, 411], [300, 412], [306, 435], [268, 434],
[265, 454], [298, 461], [307, 461], [307, 507],
[349, 510], [352, 369], [330, 366], [330, 366]])
polygons = [triangle, person, building]
with napari.gui_qt():
# add the image
viewer = napari.view_image(data.camera(), name='photographer')
# add the polygons
layer = viewer.add_shapes(polygons, shape_type='polygon', edge_width=5,
edge_color='coral', face_color='royalblue')
| 37.966667 | 78 | 0.568042 |
acf08adb6a8fd0c93db776bd4358b94df3971a50 | 387 | py | Python | inject/utils.py | 3lpsy/exutils | ce189a229b60aeda33374e438fca47338877fd98 | [
"MIT"
] | 5 | 2020-01-07T22:46:41.000Z | 2020-03-09T16:00:24.000Z | inject/utils.py | 3lpsy/exutils | ce189a229b60aeda33374e438fca47338877fd98 | [
"MIT"
] | null | null | null | inject/utils.py | 3lpsy/exutils | ce189a229b60aeda33374e438fca47338877fd98 | [
"MIT"
] | null | null | null | from utils import hexstr, byteslash, intbyte
def align(val_to_align, alignment):
return ((val_to_align + alignment - 1) // alignment) * alignment
def printhexi(msg, val):
print(f"[*] {msg}: {hexstr(val)} - {val}",)
def printsecnames(msg, sections):
print(
f"[*] {msg}",
" ".join([s.Name.decode() for s in sections]),
f"({len(sections)})",
)
| 21.5 | 68 | 0.589147 |
acf08be43de01e0d0f5014acba3a8306757a80cb | 1,089 | py | Python | setup_convert_gui_macos.py | r-koubou/XLS2ExpressionMap | f020c340aca9a72d4021c3d1ee26a0ba989c3447 | [
"MIT"
] | 1 | 2018-01-17T01:30:13.000Z | 2018-01-17T01:30:13.000Z | setup_convert_gui_macos.py | r-koubou/XLS2ExpressionMap | f020c340aca9a72d4021c3d1ee26a0ba989c3447 | [
"MIT"
] | 1 | 2017-08-24T18:19:54.000Z | 2017-08-26T19:58:54.000Z | setup_convert_gui_macos.py | r-koubou/XLS2ExpressionMap | f020c340aca9a72d4021c3d1ee26a0ba989c3447 | [
"MIT"
] | null | null | null | # coding: utf-8
import appinfo
from cx_Freeze import setup, Executable
APP_NAME = "XLS2ExpressionMap"
executable = APP_NAME
options = {
"include_files":[
( "LICENSE", "LICENSE" ),
( "NOTICE", "NOTICE" ),
( "convertgui.kv", "convertgui.kv" ),
( "resources/dropicon.png", "resources/dropicon.png" ),
],
"packages": [ "os", "kivy" ],
"excludes": [ "tkinter" ]
}
options_bdist_mac = {
"custom_info_plist": "macos/Info.plist",
"bundle_name": APP_NAME,
"iconfile": "macos/icon.icns",
}
exe = Executable(
script = 'convert_gui_main.py',
base = None,
copyright = appinfo.AUTHOR,
targetName = executable,
)
setup( name = APP_NAME,
version = appinfo.VERSION,
author = appinfo.AUTHOR,
description = 'Excel file(*.xlsx) to Cubase Expression Map file converter',
url = appinfo.URL,
options = {
"build_exe": options,
"bdist_mac": options_bdist_mac,
},
executables = [exe] )
| 25.325581 | 82 | 0.555556 |
acf08d9b21bcb71bacc8f025eaa5ec4b0c6a8717 | 43,875 | py | Python | hlapipeline/utils/astrometric_utils.py | spacetelescope/hla-pipeline | 1c689458bfc01f8cef6bcbefb40fc2ff54771dd4 | [
"BSD-3-Clause"
] | 3 | 2018-12-17T20:08:25.000Z | 2020-12-30T04:15:07.000Z | hlapipeline/utils/astrometric_utils.py | spacetelescope/hla-pipeline | 1c689458bfc01f8cef6bcbefb40fc2ff54771dd4 | [
"BSD-3-Clause"
] | 24 | 2018-10-12T00:51:12.000Z | 2021-06-04T15:54:21.000Z | hlapipeline/utils/astrometric_utils.py | spacetelescope/hla-pipeline | 1c689458bfc01f8cef6bcbefb40fc2ff54771dd4 | [
"BSD-3-Clause"
] | 8 | 2018-10-04T19:13:57.000Z | 2019-01-24T19:50:41.000Z | """Utilities to support creation of astrometrically accurate reference catalogs
The function, create_astrometric_catalog, allows the user to query an
astrometric catalog online to generate a catalog of astrometric sources that
should fall within the field-of-view of all the input images.
This module relies on the definition of an environment variable to specify
the URL of the astrometric catalog to use for generating this
reference catalog.
ASTROMETRIC_CATALOG_URL -- URL of web service that can be queried to
obtain listing of astrometric sources,
sky coordinates, and magnitudes.
"""
import os
from io import BytesIO
import csv
import requests
from lxml import etree
import inspect
import numpy as np
from stwcs.distortion import utils
from stwcs import wcsutil
from stsci.tools import fileutil as fu
from stsci.tools import parseinput
from astropy import units as u
from astropy.table import Table, vstack
from astropy.coordinates import SkyCoord
from astropy.io import fits as pf
from astropy.io import ascii
from astropy.nddata import NDData
from astropy.convolution import Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma
import photutils
from photutils import detect_sources, source_properties, deblend_sources
from photutils import Background2D, MedianBackground
from photutils import DAOStarFinder
from scipy import ndimage
import matplotlib.pyplot as plt
from astropy.visualization import SqrtStretch
from astropy.visualization.mpl_normalize import ImageNormalize
import pysynphot as S
from drizzlepac.tweakutils import build_xy_zeropoint
from hlapipeline.utils import bitmask
ASTROMETRIC_CAT_ENVVAR = "ASTROMETRIC_CATALOG_URL"
DEF_CAT_URL = 'http://gsss.stsci.edu/webservices'
if ASTROMETRIC_CAT_ENVVAR in os.environ:
SERVICELOCATION = os.environ[ASTROMETRIC_CAT_ENVVAR]
else:
SERVICELOCATION = DEF_CAT_URL
MODULE_PATH = os.path.dirname(inspect.getfile(inspect.currentframe()))
VEGASPEC = os.path.join(os.path.dirname(MODULE_PATH),
'data','alpha_lyr_stis_008.fits')
__all__ = ['create_astrometric_catalog', 'compute_radius', 'find_gsc_offset',
'extract_sources', 'find_hist2d_offset', 'generate_source_catalog',
'classify_sources', 'countExtn']
"""
Utility functions.
Many were originally released in stsci.tools.fileutil.
"""
def DEGTORAD(deg):
return (deg * np.pi / 180.)
def RADTODEG(rad):
return (rad * 180. / np.pi)
def buildRotMatrix(theta):
_theta = DEGTORAD(theta)
_mrot = np.zeros(shape=(2,2), dtype=np.float64)
_mrot[0] = (np.cos(_theta), np.sin(_theta))
_mrot[1] = (-np.sin(_theta), np.cos(_theta))
return _mrot
def countExtn(fimg, extname='SCI'):
"""
Return the number of 'extname' extensions, defaulting to counting the
number of SCI extensions.
"""
closefits = False
if isinstance(fimg, str):
fimg = pf.open(fimg)
closefits = True
n = 0
for e in fimg:
if 'extname' in e.header and e.header['extname'] == extname:
n += 1
if closefits:
fimg.close()
return n
"""
Primary function for creating an astrometric reference catalog.
"""
def create_astrometric_catalog(inputs, **pars):
"""Create an astrometric catalog that covers the inputs' field-of-view.
Parameters
===========
input : str
Filenames of images to be aligned to astrometric catalog
catalog : str, optional
Name of catalog to extract astrometric positions for sources in the
input images' field-of-view. Default: GAIADR2. Options available are
documented on the catalog web page.
output : str, optional
Filename to give to the astrometric catalog read in from the master
catalog web service. If 'None', no file will be written out.
Default: ref_cat.ecsv
gaia_only : bool, optional
Specify whether or not to only use sources from GAIA in output catalog
Default: False
existing_wcs : HST.wcs object
existing WCS object specified by the user
note ::
This function will point to astrometric catalog web service defined
through the use of the ASTROMETRIC_CATALOG_URL environment variable.
Returns
=======
ref_table : object
Astropy Table object of the catalog
"""
# interpret input parameters
catalog = pars.get("catalog", 'GAIADR2')
output = pars.get("output", 'ref_cat.ecsv')
gaia_only = pars.get("gaia_only", False)
table_format = pars.get("table_format", 'ascii.ecsv')
existing_wcs = pars.get("existing_wcs", None)
inputs, _ = parseinput.parseinput(inputs)
# start by creating a composite field-of-view for all inputs
# This default output WCS will have the same plate-scale and orientation
# as the first chip in the list, which for WFPC2 data means the PC.
# Fortunately, for alignment, this doesn't matter since no resampling of
# data will be performed
if existing_wcs:
outwcs = existing_wcs
else:
outwcs = build_reference_wcs(inputs)
radius = compute_radius(outwcs)
ra, dec = outwcs.wcs.crval
# perform query for this field-of-view
ref_dict = get_catalog(ra, dec, sr=radius, catalog=catalog)
colnames = ('ra','dec', 'mag', 'objID', 'GaiaID')
col_types = ('f8', 'f8', 'f4', 'U25', 'U25')
ref_table = Table(names = colnames, dtype=col_types)
# Add catalog name as meta data
ref_table.meta['catalog']=catalog
ref_table.meta['gaia_only'] = gaia_only
# rename coordinate columns to be consistent with tweakwcs
ref_table.rename_column('ra', 'RA')
ref_table.rename_column('dec', 'DEC')
# extract just the columns we want...
num_sources = 0
for source in ref_dict:
if 'GAIAsourceID' in source:
g = source['GAIAsourceID']
if gaia_only and g.strip() is '':
continue
else:
g = -1 # indicator for no source ID extracted
r = float(source['ra'])
d = float(source['dec'])
m = -999.9 # float(source['mag'])
o = source['objID']
num_sources += 1
ref_table.add_row((r,d,m,o,g))
# Write out table to a file, if specified
if output:
ref_table.write(output, format=table_format)
print("Created catalog '{}' with {} sources".format(output, num_sources))
return ref_table
def build_reference_wcs(inputs, sciname='sci'):
"""Create the reference WCS based on all the inputs for a field"""
# start by creating a composite field-of-view for all inputs
wcslist = []
for img in inputs:
nsci = countExtn(img)
for num in range(nsci):
extname = (sciname, num+1)
if sciname == 'sci':
extwcs = wcsutil.HSTWCS(img, ext=extname)
else:
# Working with HDRLET as input and do the best we can...
extwcs = read_hlet_wcs(img, ext=extname)
wcslist.append(extwcs)
# This default output WCS will have the same plate-scale and orientation
# as the first chip in the list, which for WFPC2 data means the PC.
# Fortunately, for alignment, this doesn't matter since no resampling of
# data will be performed
outwcs = utils.output_wcs(wcslist)
return outwcs
def get_catalog(ra, dec, sr=0.1, fmt='CSV', catalog='GSC241'):
""" Extract catalog from VO web service.
Parameters
----------
ra : float
Right Ascension (RA) of center of field-of-view (in decimal degrees)
dec : float
Declination (Dec) of center of field-of-view (in decimal degrees)
sr : float, optional
Search radius (in decimal degrees) from field-of-view center to use
for sources from catalog. Default: 0.1 degrees
fmt : str, optional
Format of output catalog to be returned. Options are determined by
web-service, and currently include (Default: CSV):
VOTABLE(default) | HTML | KML | CSV | TSV | JSON | TEXT
catalog : str, optional
Name of catalog to query, as defined by web-service. Default: 'GSC241'
Returns
-------
csv : obj
CSV object of returned sources with all columns as provided by catalog
"""
serviceType = 'vo/CatalogSearch.aspx'
spec_str = 'RA={}&DEC={}&SR={}&FORMAT={}&CAT={}&MINDET=5'
headers = {'Content-Type': 'text/csv'}
spec = spec_str.format(ra, dec, sr, fmt, catalog)
serviceUrl = '{}/{}?{}'.format(SERVICELOCATION, serviceType,spec)
rawcat = requests.get(serviceUrl, headers=headers)
r_contents = rawcat.content.decode() # convert from bytes to a String
rstr = r_contents.split('\r\n')
# remove initial line describing the number of sources returned
# CRITICAL to proper interpretation of CSV data
del rstr[0]
r_csv = csv.DictReader(rstr)
return r_csv
def compute_radius(wcs):
"""Compute the radius from the center to the furthest edge of the WCS."""
ra,dec = wcs.wcs.crval
img_center = SkyCoord(ra=ra*u.degree, dec=dec*u.degree)
wcs_foot = wcs.calc_footprint()
img_corners = SkyCoord(ra=wcs_foot[:,0]*u.degree,
dec=wcs_foot[:,1]*u.degree)
radius = img_center.separation(img_corners).max().value
return radius
def find_gsc_offset(image, input_catalog='GSC1', output_catalog='GAIA'):
"""Find the GSC to GAIA offset based on guide star coordinates
Parameters
----------
image : str
filename of image to be processed
Returns
-------
delta_ra,delta_dec : tuple of floats
Offset in decimal degrees of image based on correction to guide star
coordinates relative to GAIA
"""
serviceType = "GSCConvert/GSCconvert.aspx"
spec_str = "TRANSFORM={}-{}&IPPPSSOOT={}"
if 'rootname' in pf.getheader(image):
ippssoot = pf.getval(image, 'rootname').upper()
else:
ippssoot = fu.buildNewRootname(image).upper()
spec = spec_str.format(input_catalog, output_catalog, ippssoot)
serviceUrl = "{}/{}?{}".format(SERVICELOCATION, serviceType,spec)
rawcat = requests.get(serviceUrl)
if not rawcat.ok:
print("Problem accessing service with:\n{{}".format(serviceUrl))
raise ValueError
delta_ra = delta_dec = None
tree = BytesIO(rawcat.content)
for _,element in etree.iterparse(tree):
if element.tag == 'deltaRA':
delta_ra = float(element.text)
elif element.tag == 'deltaDEC':
delta_dec = float(element.text)
return delta_ra,delta_dec
def extract_sources(img, **pars):
"""Use photutils to find sources in image based on segmentation.
Parameters
==========
dqmask : array
Bitmask which identifies whether a pixel should be used (1) in source
identification or not(0). If provided, this mask will be applied to the
input array prior to source identification.
fwhm : float
Full-width half-maximum (fwhm) of the PSF in pixels.
Default: 3.0
threshold : float or None
Value from the image which serves as the limit for determining sources.
If None, compute a default value of (background+5*rms(background)).
If threshold < 0.0, use absolute value as scaling factor for default value.
Default: None
source_box : int
Size of box (in pixels) which defines the minimum size of a valid source
classify : boolean
Specify whether or not to apply classification based on invarient moments
of each source to determine whether or not a source is likely to be a
cosmic-ray, and not include those sources in the final catalog.
Default: True
centering_mode : {'segmentation', 'starfind'}
Algorithm to use when computing the positions of the detected sources.
Centering will only take place after `threshold` has been determined, and
sources are identified using segmentation. Centering using `segmentation`
will rely on `photutils.segmentation.source_properties` to generate the
properties for the source catalog. Centering using `starfind` will use
`photutils.IRAFStarFinder` to characterize each source in the catalog.
Default: 'starfind'
nlargest : int, None
Number of largest (brightest) sources in each chip/array to measure
when using 'starfind' mode. Default: None (all)
output : str
If specified, write out the catalog of sources to the file with this name
plot : boolean
Specify whether or not to create a plot of the sources on a view of the image
Default: False
vmax : float
If plotting the sources, scale the image to this maximum value.
"""
fwhm= pars.get('fwhm', 3.0)
threshold= pars.get('threshold', None)
source_box = pars.get('source_box', 7)
classify = pars.get('classify', True)
output = pars.get('output', None)
plot = pars.get('plot', False)
vmax = pars.get('vmax', None)
centering_mode = pars.get('centering_mode', 'starfind')
deblend = pars.get('deblend', False)
dqmask = pars.get('dqmask',None)
nlargest = pars.get('nlargest', None)
# apply any provided dqmask for segmentation only
if dqmask is not None:
imgarr = img.copy()
imgarr[dqmask] = 0
else:
imgarr = img
bkg_estimator = MedianBackground()
bkg = None
exclude_percentiles = [10,25,50,75]
for percentile in exclude_percentiles:
try:
bkg = Background2D(imgarr, (50, 50), filter_size=(3, 3),
bkg_estimator=bkg_estimator,
exclude_percentile=percentile)
# If it succeeds, stop and use that value
bkg_rms = (5. * bkg.background_rms)
bkg_rms_mean = bkg.background.mean() + 5. * bkg_rms.std()
default_threshold = bkg.background + bkg_rms
if threshold is None or threshold < 0.0:
if threshold is not None and threshold < 0.0:
threshold = -1*threshold*default_threshold
print("{} based on {}".format(threshold.max(), default_threshold.max()))
bkg_rms_mean = threshold.max()
else:
threshold = default_threshold
else:
bkg_rms_mean = 3. * threshold
if bkg_rms_mean < 0:
bkg_rms_mean = 0.
break
except Exception:
bkg = None
# If Background2D does not work at all, define default scalar values for
# the background to be used in source identification
if bkg is None:
bkg_rms_mean = max(0.01, imgarr.min())
bkg_rms = bkg_rms_mean * 5
sigma = fwhm * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma, x_size=source_box, y_size=source_box)
kernel.normalize()
segm = detect_sources(imgarr, threshold, npixels=source_box,
filter_kernel=kernel)
if deblend:
segm = deblend_sources(imgarr, segm, npixels=5,
filter_kernel=kernel, nlevels=16,
contrast=0.01)
# If classify is turned on, it should modify the segmentation map
if classify:
cat = source_properties(imgarr, segm)
# Remove likely cosmic-rays based on central_moments classification
bad_srcs = np.where(classify_sources(cat) == 0)[0]+1
segm.remove_labels(bad_srcs) # CAUTION: May be time-consuming!!!
# convert segm to mask for daofind
if centering_mode == 'starfind':
src_table = None
#daofind = IRAFStarFinder(fwhm=fwhm, threshold=5.*bkg.background_rms_median)
print("Setting up DAOStarFinder with: \n fwhm={} threshold={}".format(fwhm, bkg_rms_mean))
daofind = DAOStarFinder(fwhm=fwhm, threshold=bkg_rms_mean)
# Identify nbrightest/largest sources
if nlargest is not None:
if nlargest > len(segm.labels):
nlargest = len(segm.labels)
large_labels = np.flip(np.argsort(segm.areas)+1)[:nlargest]
print("Looking for sources in {} segments".format(len(segm.labels)))
for label in segm.labels:
if nlargest is not None and label not in large_labels:
continue # Move on to the next segment
# Get slice definition for the segment with this label
seg_slice = segm.segments[label-1].slices
seg_yoffset = seg_slice[0].start
seg_xoffset = seg_slice[1].start
#Define raw data from this slice
detection_img = img[seg_slice]
# zero out any pixels which do not have this segments label
detection_img[np.where(segm.data[seg_slice]==0)] = 0
# Detect sources in this specific segment
seg_table = daofind(detection_img)
# Pick out brightest source only
if src_table is None and len(seg_table) > 0:
# Initialize final master source list catalog
src_table = Table(names=seg_table.colnames,
dtype=[dt[1] for dt in seg_table.dtype.descr])
if len(seg_table) > 0:
max_row = np.where(seg_table['peak'] == seg_table['peak'].max())[0][0]
# Add row for detected source to master catalog
# apply offset to slice to convert positions into full-frame coordinates
seg_table['xcentroid'] += seg_xoffset
seg_table['ycentroid'] += seg_yoffset
src_table.add_row(seg_table[max_row])
else:
cat = source_properties(img, segm)
src_table = cat.to_table()
# Make column names consistent with IRAFStarFinder column names
src_table.rename_column('source_sum', 'flux')
src_table.rename_column('source_sum_err', 'flux_err')
if src_table is not None:
print("Total Number of detected sources: {}".format(len(src_table)))
else:
print("No detected sources!")
return None, None
# Move 'id' column from first to last position
# Makes it consistent for remainder of code
cnames = src_table.colnames
cnames.append(cnames[0])
del cnames[0]
tbl = src_table[cnames]
if output:
tbl['xcentroid'].info.format = '.10f' # optional format
tbl['ycentroid'].info.format = '.10f'
tbl['flux'].info.format = '.10f'
if not output.endswith('.cat'):
output += '.cat'
tbl.write(output, format='ascii.commented_header')
print("Wrote source catalog: {}".format(output))
if plot:
norm = None
if vmax is None:
norm = ImageNormalize(stretch=SqrtStretch())
fig, ax = plt.subplots(2, 2, figsize=(8, 8))
ax[0][0].imshow(imgarr, origin='lower', cmap='Greys_r', norm=norm, vmax=vmax)
ax[0][1].imshow(segm, origin='lower', cmap=segm.cmap(random_state=12345))
ax[0][1].set_title('Segmentation Map')
ax[1][0].imshow(bkg.background, origin='lower')
if not isinstance(threshold, float):
ax[1][1].imshow(threshold, origin='lower')
return tbl, segm
def classify_sources(catalog, sources=None):
""" Convert moments_central attribute for source catalog into star/cr flag
This algorithm interprets the central_moments from the source_properties
generated for the sources as more-likely a star or a cosmic-ray. It is not
intended or expected to be precise, merely a means of making a first cut at
removing likely cosmic-rays or other artifacts.
Parameters
-----------
catalog : object
The photutils.SourceCatalog object for the image/chip
sources : tuple
Range of objects from catalog to process as a tuple of (min, max).
Default: None which simply processes all sources.
Returns
-------
srctype : ndarray
An ndarray where a value of 1 indicates a likely valid, non-cosmic-ray
source, and a value of 0 indicates a likely cosmic-ray.
"""
moments = catalog.moments_central
if sources is None:
sources = (0,len(moments))
num_sources = sources[1] - sources[0]
srctype = np.zeros((num_sources,),np.int32)
for src in range(sources[0],sources[1]):
# Protect against spurious detections
src_x = catalog[src].xcentroid
src_y = catalog[src].ycentroid
if np.isnan(src_x) or np.isnan(src_y):
continue
x,y = np.where(moments[src] == moments[src].max())
if (x[0] > 1) and (y[0] > 1):
srctype[src] = 1
return srctype
def generate_source_catalog(image, **kwargs):
""" Build source catalogs for each chip using photutils.
The catalog returned by this function includes sources found in all chips
of the input image with the positions translated to the coordinate frame
defined by the reference WCS `refwcs`. The sources will be
- identified using photutils segmentation-based source finding code
- ignore any input pixel which has been flagged as 'bad' in the DQ
array, should a DQ array be found in the input HDUList.
- classified as probable cosmic-rays (if enabled) using central_moments
properties of each source, with these sources being removed from the
catalog.
Parameters
-----------
image : HDUList object
Input image as an astropy.io.fits HDUList object
dqname : string
EXTNAME for the DQ array, if present, in the input image HDUList.
output : boolean
Specify whether or not to write out a separate catalog file for all the
sources found in each chip. Default: None (False)
Optional Parameters
--------------------
threshold : float, optional
This parameter controls the threshold used for identifying sources in
the image relative to the background RMS.
If None, compute a default value of (background+3*rms(background)).
If threshold < 0.0, use absolute value as scaling factor for default value.
Default: None
fwhm : float, optional
FWHM (in pixels) of the expected sources from the image, comparable to the
'conv_width' parameter from 'tweakreg'. Objects with FWHM closest to
this value will be identified as sources in the catalog. Default: 3.0.
Returns
--------
source_cats : dict
Dict of astropy Tables identified by chip number with
each table containing sources from image extension `('sci',chip)`.
"""
if not isinstance(image, pf.HDUList):
raise ValueError("Input {} not fits.HDUList object".format(image))
dqname = kwargs.get('dqname','DQ')
output = kwargs.get('output',None)
# Build source catalog for entire image
source_cats = {}
numSci = countExtn(image, extname='SCI')
for chip in range(numSci):
chip += 1
# find sources in image
if output:
rootname = image[0].header['rootname']
outroot = '{}_sci{}_src'.format(rootname, chip)
kwargs['output'] = outroot
imgarr = image['sci',chip].data
if 'photmode' in image[0].header:
photmode = image[0].header['photmode']
else:
photmode = image['sci',chip].header['photmode']
# apply any DQ array, if available
dqmask = None
if image.index_of(dqname):
dqarr = image[dqname,chip].data
dqmask = bitmask.bitfield_to_boolean_mask(dqarr, good_mask_value=False)
seg_tab, segmap = extract_sources(imgarr, dqmask=dqmask, **kwargs)
seg_tab_phot = seg_tab #compute_photometry(seg_tab,photmode)
source_cats[chip] = seg_tab_phot
return source_cats
def generate_sky_catalog(image, refwcs, **kwargs):
"""Build source catalog from input image using photutils.
This script borrows heavily from build_source_catalog
The catalog returned by this function includes sources found in all chips
of the input image with the positions translated to the coordinate frame
defined by the reference WCS `refwcs`. The sources will be
- identified using photutils segmentation-based source finding code
- ignore any input pixel which has been flagged as 'bad' in the DQ
array, should a DQ array be found in the input HDUList.
- classified as probable cosmic-rays (if enabled) using central_moments
properties of each source, with these sources being removed from the
catalog.
Parameters
-----------
image : HDUList object
Input image as an astropy.io.fits HDUList object
refwcs : HSTWCS object
Definition of the reference frame WCS.
dqname : string
EXTNAME for the DQ array, if present, in the input image HDUList.
output : boolean
Specify whether or not to write out a separate catalog file for all the
sources found in each chip. Default: None (False)
Optional Parameters
--------------------
threshold : float, optional
This parameter controls the S/N threshold used for identifying sources in
the image relative to the background RMS in much the same way that
the 'threshold' parameter in 'tweakreg' works.
Default: 1000.
fwhm : float, optional
FWHM (in pixels) of the expected sources from the image, comparable to the
'conv_width' parameter from 'tweakreg'. Objects with FWHM closest to
this value will be identified as sources in the catalog. Default: 3.0.
Returns
--------
master_cat : astropy.Table object
Source catalog for all 'valid' sources identified from all chips of the
input image with positions translated to the reference WCS coordinate
frame.
"""
# Extract source catalogs for each chip
source_cats = generate_source_catalog(image, **kwargs)
# Build source catalog for entire image
master_cat = None
numSci = countExtn(image, extname='SCI')
# if no refwcs specified, build one now...
if refwcs is None:
refwcs = build_reference_wcs([image])
for chip in range(numSci):
chip += 1
# work with sources identified from this specific chip
seg_tab_phot = source_cats[chip]
if seg_tab_phot is None:
continue
# Convert pixel coordinates from this chip to sky coordinates
chip_wcs = wcsutil.HSTWCS(image,ext=('sci',chip))
seg_ra,seg_dec = chip_wcs.all_pix2world(seg_tab_phot['xcentroid'],seg_tab_phot['ycentroid'],1)
# Convert sky positions to pixel positions in the reference WCS frame
seg_xy_out = refwcs.all_world2pix(seg_ra,seg_dec,1)
seg_tab_phot['xcentroid'] = seg_xy_out[0]
seg_tab_phot['ycentroid'] = seg_xy_out[1]
if master_cat is None:
master_cat = seg_tab_phot
else:
master_cat = vstack([master_cat, seg_tab_phot])
return master_cat
def compute_photometry(catalog, photmode):
""" Compute magnitudes for sources from catalog based on observations photmode
Parameters
-----------
catalog : object
Astropy Table with 'source_sum' column for the measured flux for each source
photmode : String
Specification of the observation filter configuration used for the exposure
as reported by the 'PHOTMODE' keyword from the PRIMARY header.
Returns
--------
phot_cat : object
Astropy Table object of input source catalog with added column for
VEGAMAG photometry (in magnitudes).
"""
# Determine VEGAMAG zero-point using pysynphot for this photmode
photmode = photmode.replace(' ',',')
vega = S.FileSpectrum(VEGASPEC)
bp = S.ObsBandpass(photmode)
vegauvis = S.Observation(vega,bp)
vegazpt = 2.5*np.log10(vegauvis.countrate())
# Use zero-point to convert flux values from catalog into magnitudes
#source_phot = vegazpt - 2.5*np.log10(catalog['source_sum'])
source_phot = vegazpt - 2.5*np.log10(catalog['flux'])
source_phot.name = 'vegamag'
# Now add this new column to the catalog table
catalog.add_column(source_phot)
return catalog
def filter_catalog(catalog, **kwargs):
""" Create a new catalog selected from input based on photometry
Parameters
----------
bright_limit : float
Fraction of catalog based on brightness that should be retained.
Default: 1.00 (full catalog)
max_bright : int
Maximum number of sources to keep regardless of `bright_limit`
Default: 100
min_bright : int
Minimum number of sources to keep regardless of `bright_limit`
Default: 20
colname : string
Name of column to use for selection/sorting. Default: 'vegamag'
Returns
--------
new_catalog : Table
New table which only has the sources that meet the selection criteria
"""
# interpret input pars
bright_limit = kwargs.get('bright_limit',1.00)
max_bright = kwargs.get('max_bright',None)
min_bright = kwargs.get('min_bright',20)
colname = kwargs.get('colname','vegamag')
# sort by magnitude
phot_column = catalog[colname]
num_sources = len(phot_column)
sort_indx = np.argsort(phot_column)
if max_bright is None:
max_bright = num_sources
# apply limits, insuring no more than full catalog gets selected
limit_num = max(int(num_sources*bright_limit), min_bright)
limit_num = min(max_bright, limit_num, num_sources)
# Extract sources identified by selection
new_catalog = catalog[sort_indx[:limit_num]]
return new_catalog
def build_self_reference(filename, clean_wcs=False):
""" This function creates a reference, undistorted WCS that can be used to
apply a correction to the WCS of the input file.
PARAMETERS
----------
filename : str
Filename of image which will be corrected, and which will form the basis
of the undistorted WCS
clean_wcs : bool
Specify whether or not to return the WCS object without any distortion
information, or any history of the original input image. This converts
the output from `utils.output_wcs()` into a pristine `HSTWCS` object.
Returns
--------
customwcs : object
HSTWCS object which contains the undistorted WCS representing the entire
field-of-view for the input image
Syntax
-------
This function can be used with the following syntax to apply a shift/rot/scale
change to the same image:
>>> import buildref
>>> from drizzlepac import updatehdr
>>> filename = "jce501erq_flc.fits"
>>> wcslin = buildref.build_self_reference(filename)
>>> updatehdr.updatewcs_with_shift(filename,wcslin,xsh=49.5694, ysh=19.2203, rot = 359.998, scale = 0.9999964)
"""
if 'sipwcs' in filename:
sciname = 'sipwcs'
else:
sciname = 'sci'
wcslin = build_reference_wcs([filename], sciname=sciname)
if clean_wcs:
wcsbase = wcslin.wcs
customwcs = build_hstwcs(wcsbase.crval[0],wcsbase.crval[1],wcsbase.crpix[0],wcsbase.crpix[1],wcslin._naxis1,wcslin._naxis2,wcslin.pscale,wcslin.orientat)
else:
customwcs = wcslin
return customwcs
def read_hlet_wcs(filename, ext):
"""Insure HSTWCS includes all attributes of a full image WCS.
For headerlets, the WCS does not contain information about the size of the
image, as the image array is not present in the headerlet.
"""
hstwcs = wcsutil.HSTWCS(filename, ext=ext)
if hstwcs.naxis1 is None:
hstwcs.naxis1 = int(hstwcs.wcs.crpix[0]*2.) # Assume crpix is center of chip
hstwcs.naxis2 = int(hstwcs.wcs.crpix[1]*2.)
return hstwcs
def build_hstwcs(crval1, crval2, crpix1, crpix2, naxis1, naxis2, pscale, orientat):
""" Create an HSTWCS object for a default instrument without distortion
based on user provided parameter values.
.. note :: COPIED from drizzlepac.wcs_functions
"""
wcsout = wcsutil.HSTWCS()
wcsout.wcs.crval = np.array([crval1,crval2])
wcsout.wcs.crpix = np.array([crpix1,crpix2])
wcsout.naxis1 = naxis1
wcsout.naxis2 = naxis2
wcsout.wcs.cd = buildRotMatrix(orientat)*[-1,1]*pscale/3600.0
# Synchronize updates with PyWCS/WCSLIB objects
wcsout.wcs.set()
wcsout.setPscale()
wcsout.setOrient()
wcsout.wcs.ctype = ['RA---TAN','DEC--TAN']
return wcsout
def within_footprint(img, wcs, x, y):
"""Determine whether input x,y fall in the science area of the image.
Parameters
-----------
img : ndarray
ndarray of image where non-science areas are marked with value of NaN
wcs : obj
HSTWCS or WCS object with naxis terms defined
x,y : arrays
arrays of x,y positions for sources to be checked
Returns
-------
x,y : arrays
New arrays which have been trimmed of all sources that fall outside
the science areas of the image
"""
# start with limits of WCS shape
if hasattr(wcs, 'naxis1'):
naxis1 = wcs.naxis1
naxis2 = wcs.naxis2
elif hasattr(wcs, 'pixel_shape'):
naxis1, naxis2 = wcs.pixel_shape
else:
naxis1 = wcs._naxis1
naxis2 = wcs._naxis2
maskx = np.bitwise_or(x<0, x>naxis1)
masky = np.bitwise_or(y<0, y>naxis2)
mask = ~np.bitwise_or(maskx,masky)
x = x[mask]
y = y[mask]
# Now, confirm that these points fall within actual science area of WCS
img_mask = create_image_footprint(img, wcs, border=1.0)
inmask = np.where(img_mask[y.astype(np.int32),x.astype(np.int32)])[0]
x = x[inmask]
y = y[inmask]
return x,y
def create_image_footprint(image, refwcs, border=0.):
""" Create the footprint of the image in the reference WCS frame
Parameters
----------
image : HDUList or filename
Image to extract sources for matching to
the external astrometric catalog
refwcs : object
Reference WCS for coordinate frame of image
border : float
Buffer (in arcseconds) around edge of image to exclude astrometric
sources. Default: 0.
"""
# Interpret input image to generate initial source catalog and WCS
if isinstance(image, str):
image = pf.open(image)
numSci = countExtn(image, extname='SCI')
ref_x = refwcs._naxis1
ref_y = refwcs._naxis2
# convert border value into pixels
border_pixels = int(border/refwcs.pscale)
mask_arr = np.zeros((ref_y,ref_x),dtype=int)
for chip in range(numSci):
chip += 1
# Build arrays of pixel positions for all edges of chip
chip_y,chip_x = image['sci',chip].data.shape
chipwcs = wcsutil.HSTWCS(image,ext=('sci',chip))
xpix = np.arange(chip_x)+1
ypix = np.arange(chip_y)+1
edge_x = np.hstack([[1]*chip_y,xpix,[chip_x]*chip_y,xpix])
edge_y = np.hstack([ypix,[1]*chip_x,ypix,[chip_y]*chip_x])
edge_ra,edge_dec = chipwcs.all_pix2world(edge_x,edge_y,1)
edge_x_out,edge_y_out = refwcs.all_world2pix(edge_ra,edge_dec,0)
edge_x_out = np.clip(edge_x_out.astype(np.int32),0,ref_x-1)
edge_y_out = np.clip(edge_y_out.astype(np.int32),0,ref_y-1)
mask_arr[edge_y_out, edge_x_out] = 1
# Fill in outline of each chip
mask_arr = ndimage.binary_fill_holes(ndimage.binary_dilation(mask_arr,iterations=2))
if border > 0.:
mask_arr = ndimage.binary_erosion(mask_arr, iterations=border_pixels)
return mask_arr
def find_hist2d_offset(filename, reference, refwcs = None, refnames=['ra', 'dec'],
match_tolerance=5., chip_catalog=True, search_radius=15.0,
min_match=10, classify=True):
"""Iteratively look for the best cross-match between the catalog and ref.
Parameters
----------
filename : HDUList or filename
Single image to extract sources for matching to
the external astrometric catalog.
reference : str or object
Reference catalog, either as a filename or ``astropy.Table``
containing astrometrically accurate sky coordinates for astrometric
standard sources
refwcs : HSTWCS object
This WCS will define the coordinate frame which will
be used to determine the offset. If None is specified, use the
WCS from the input image `filename` to build this WCS using
`build_self_reference()`. Default: None.
refnames : list
List of table column names for sky coordinates of astrometric
standard sources from reference catalog
match_tolerance : float
Tolerance (in pixels) for recognizing that a source position matches
an astrometric catalog position. Larger values allow for lower
accuracy source positions to be compared to astrometric catalog
Default: 5 pixels
chip_catalog : boolean
Specify whether or not to write out individual source catalog for
each chip in the image. Default: True.
search_radius : float
Maximum separation (in arcseconds) from source positions to look
for valid cross-matches with reference source positions.
Default: 15.0 arcseconds.
min_match : int
Minimum number of cross-matches for an acceptable determination of
the offset. Default: 10.
classify : bool
Specify whether or not to use central_moments classification to
ignore likely cosmic-rays/bad-pixels when generating the source
catalog. Default: True
Returns
-------
best_offset : tuple
Offset in input image pixels between image source positions and
astrometric catalog positions that results in largest number of
matches of astrometric sources with image sources
seg_xy, ref_xy : astropy.Table
Source catalog and reference catalog, respectively, used for
determining the offset. Each catalog includes sources for the entire
field-of-view, not just a single chip.
"""
# Interpret input image to generate initial source catalog and WCS
if isinstance(filename, str):
image = pf.open(filename)
rootname = filename.split("_")[0]
else:
image = filename
rootname = image[0].header['rootname']
# check to see whether reference catalog can be found
if not os.path.exists(reference):
print("Could not find input reference catalog: {}".format(reference))
raise FileNotFoundError
# Extract reference WCS from image
if refwcs is None:
refwcs = build_self_reference(image, clean_wcs=True)
print("Computing offset for field-of-view defined by:")
print(refwcs)
# read in reference catalog
if isinstance(reference, str):
refcat = ascii.read(reference)
else:
refcat = reference
print("\nRead in reference catalog with {} sources.".format(len(refcat)))
ref_ra = refcat[refnames[0]]
ref_dec = refcat[refnames[1]]
# Build source catalog for entire image
img_cat = generate_source_catalog(image, refwcs, output=chip_catalog, classify=classify)
img_cat.write(filename.replace(".fits","_xy.cat"), format='ascii.no_header',
overwrite=True)
# Retrieve source XY positions in reference frame
seg_xy = np.column_stack((img_cat['xcentroid'], img_cat['ycentroid']))
seg_xy = seg_xy[~np.isnan(seg_xy[:, 0])]
# Translate reference catalog positions into input image coordinate frame
xref, yref = refwcs.all_world2pix(ref_ra, ref_dec, 1)
# look for only sources within the viewable area of the exposure to
# determine the offset
xref, yref = within_footprint(image, refwcs, xref, yref)
ref_xy = np.column_stack((xref, yref))
print("\nWorking with {} astrometric sources for this field".format(len(ref_xy)))
# write out astrometric reference catalog that was actually used
ref_ra_img, ref_dec_img = refwcs.all_pix2world(xref, yref, 1)
ref_tab = Table([ref_ra_img,ref_dec_img, xref, yref],names=['ra','dec', 'x', 'y'])
ref_tab.write(reference.replace('.cat','_{}.cat'.format(rootname)),
format='ascii.fast_commented_header', overwrite=True)
searchrad = search_radius / refwcs.pscale
# Use 2d-Histogram builder from drizzlepac.tweakreg -- for demo only...
xp,yp,nmatches,zpqual = build_xy_zeropoint(seg_xy, ref_xy,
searchrad=searchrad,
histplot=False,figure_id=1,
plotname=None, interactive=False)
hist2d_offset = (xp,yp)
print('best offset {} based on {} cross-matches'.format(hist2d_offset, nmatches))
return hist2d_offset, seg_xy, ref_xy
##############################
#
# Functions to support working with Tweakwcs
#
##############################
def build_nddata(image, group_id, source_catalog):
""" Return a list of NDData objects for all chips in an image.
Parameters
===========
image : filename, HDUList
Either filename or HDUList of a single HST observation
group_id : int
Integer ID for group this image should be associated with; primarily
used when separate chips are in separate files to treat them all as one
exposure.
source_catalog : dict, optional
If provided (default:None), these catalogs will be attached as `catalog`
entries in each chip's NDData.meta. It should be provided as a
dict of astropy Tables identified by chip number with
each table containing sources from image extension `('sci',chip)` as
generated by `generate_source_catalog()`.
Returns
========
ndlist : list
List of astropy NDData defined for all chips in input image
"""
open_file = False
if isinstance(image, str):
hdulist = pf.open(image)
open_file = True
elif isinstance(image, pf.HDUList):
hdulist = image
else:
print("Wrong type of input, {}, for build_nddata...".format(type(image)))
raise ValueError
images = []
numsci = countExtn(hdulist)
for chip in range(1,numsci+1):
im_data = hdulist[('SCI', chip)].data
dq_data = hdulist[('DQ', chip)].data
w = wcsutil.HSTWCS(hdulist, ('SCI', chip))
# Below, simply consider non-zero DQ data as invalid.
# A more sophisticated approach would use bitmask module.
# Also, here we set group ID to a different number for each image,
# but for ACS images, for example, we likely would assign
# the same group ID to the images corresponding to different
# SCI extensions *of the same FITS file* so that they can be
# aligned together.
img = NDData(data=im_data, mask=dq_data != 0, wcs=w,
meta={'chip': chip, 'group_id':group_id,
'filename':image})
# append source catalog, if provided
if source_catalog:
imcat = source_catalog[chip]
# rename xcentroid/ycentroid columns, if necessary, to be consistent with tweakwcs
if 'xcentroid' in imcat.colnames:
imcat.rename_column('xcentroid', 'x')
imcat.rename_column('ycentroid', 'y')
imcat.meta['name'] = 'im{:d} sources'.format(group_id)
img.meta['catalog'] = imcat
images.append(img)
if open_file:
hdulist.close()
return images
| 36.777033 | 161 | 0.653698 |
acf08da0a83e60db42ad9af4a7daa2dfcda2cf2d | 5,695 | py | Python | scripts/update-env-config.py | herrmann/contrib-helm-chart | 02bb00f01821c22844d05abe7e74a93c088b7967 | [
"Apache-2.0"
] | 102 | 2019-11-04T21:22:41.000Z | 2022-03-22T12:58:23.000Z | scripts/update-env-config.py | herrmann/contrib-helm-chart | 02bb00f01821c22844d05abe7e74a93c088b7967 | [
"Apache-2.0"
] | 92 | 2019-11-04T18:36:34.000Z | 2022-03-29T19:58:15.000Z | scripts/update-env-config.py | herrmann/contrib-helm-chart | 02bb00f01821c22844d05abe7e74a93c088b7967 | [
"Apache-2.0"
] | 99 | 2019-11-29T08:00:37.000Z | 2022-03-31T13:08:25.000Z | import urllib.request
import re
def upper_repl(match):
return match.group(1).upper()
vars = []
# Add some undocumented/operational values
vars.append(
{
"name": "secretKey",
"env": "REDASH_SECRET_KEY",
"desc": "Secret key used for data encryption",
"default": "",
"secret": True,
"required": True,
}
)
# Can remove once documented: https://github.com/getredash/website/issues/528
vars.append(
{
"name": "samlSchemeOverride",
"env": "REDASH_SAML_SCHEME_OVERRIDE",
"desc": "This setting will allow you to override the SAML Auth URL scheme that gets constructed by Flask. This is a useful feature if, for example, you're behind a Proxy Protocol enabled TCP load balancer (AWS ELB that terminates SSL) and your Nginx proxy or similar adds a X-Forwarded-Proto of HTTP even though your Redash URL for SAML auth is HTTPS.",
"default": "",
"secret": False,
"required": False,
}
)
# Parse the docs and build a list of environment variables
with urllib.request.urlopen(
"https://raw.githubusercontent.com/getredash/website/master/src/pages/kb/open-source/admin-guide/env-vars-settings.md"
) as response:
data = response.read().decode("utf-8")
for line in data.splitlines():
m = re.match(r"^\s*[|]\s*[`](REDASH_.+)[`]\s*[|]\s([^|]*)\s*[|]\s([^|]*)", line)
if m:
name = env = m.group(1).strip()
desc = m.group(2).strip()
default = m.group(3).strip()
if env in ["REDASH_REDIS_URL", "REDASH_DATABASE_URL"]:
continue
secret = False
if env in [
"REDASH_LDAP_BIND_DN_PASSWORD",
"REDASH_MAIL_PASSWORD",
"REDASH_GOOGLE_CLIENT_SECRET",
"REDASH_COOKIE_SECRET",
"REDASH_SECRET_KEY",
]:
secret = True
required = False
if env in ["REDASH_COOKIE_SECRET", "REDASH_SECRET_KEY"]:
required = True
# Unset default to make this clear
default = ""
name = re.sub(r"REDASH_", "", name).lower()
name = re.sub(r"_([a-z])", upper_repl, name)
vars.append(
{
"name": name,
"env": env,
"desc": desc,
"default": default,
"secret": secret,
"required": required,
}
)
# Replace lines between markers with config based on the docs
start_token = "## Start primary Redash configuration"
end_token = "## End primary Redash configuration"
print("values.yaml snippet")
print()
config = [start_token]
config.append(
" ## Note that we document the Redash defaults, but don't explicitly set them."
)
config.append(
" ## This allows multiple versions of Redash (which may have different defaults) to be more easily used"
)
for var in vars:
required = ""
if var["required"]:
required = "REQUIRED "
comment = " # -- %s`%s` value." % (
required,
var["env"],
)
if len(var["desc"]) > 0:
comment += " %s." % (var["desc"].capitalize())
if var["secret"]:
comment += " Stored as a Secret value."
if len(var["default"]) > 0:
comment += "\n # @default -- %s" % (var["default"])
config.append(comment)
config.append(' %s: ""' % (var["name"]))
config.append(" ## End primary Redash configuration")
values = open("values.yaml", "r+")
content = re.sub(
start_token + ".*" + end_token, "\n".join(config), values.read(), flags=re.DOTALL
)
values.seek(0)
values.truncate()
values.write(content)
values.close()
print("secrets.yaml snippet")
print()
config = [start_token]
for var in vars:
required = 'default "" '
if var["required"]:
config.append(
' {{ $null := required "A value for one of the following variables is required: redash.%s (secure random value), redash.existingSecret (secret name)" (or .Values.redash.%s .Values.redash.existingSecret) }}'
% (var["name"], var["name"])
)
if var["secret"]:
config.append(
' %s: {{ default "" .Values.redash.%s | b64enc | quote }}'
% (var["name"], var["name"])
)
config.append(" ## End primary Redash configuration")
secrets = open("templates/secrets.yaml", "r+")
content = re.sub(
start_token + ".*" + end_token, "\n".join(config), secrets.read(), flags=re.DOTALL
)
secrets.seek(0)
secrets.truncate()
secrets.write(content)
secrets.close()
print("_helpers.tpl snippet")
print()
config = [start_token]
for var in vars:
if var["secret"]:
config.append(
"{{- if or .Values.redash.%s .Values.redash.existingSecret }}"
% (var["name"])
)
config.append("- name: %s" % (var["env"]))
config.append(" valueFrom:")
config.append(" secretKeyRef:")
config.append(' name: {{ include "redash.secretName" . }}')
config.append(" key: %s" % (var["name"]))
else:
config.append("{{- if .Values.redash.%s }}" % (var["name"]))
config.append("- name: %s" % (var["env"]))
config.append(
" value: {{ default " " .Values.redash.%s | quote }}" % (var["name"])
)
config.append("{{- end }}")
config.append("## End primary Redash configuration")
secrets = open("templates/_helpers.tpl", "r+")
content = re.sub(
start_token + ".*" + end_token, "\n".join(config), secrets.read(), flags=re.DOTALL
)
secrets.seek(0)
secrets.truncate()
secrets.write(content)
secrets.close()
| 33.5 | 361 | 0.569447 |
acf08e446036b07d2df1a949db97817a37fc098f | 2,022 | py | Python | task_templates/transforms/python3_sklearn_transform/custom.py | andreakropp/datarobot-user-models | 423ab8c703a545491ad6013a0b7efa3119e2c0fc | [
"Apache-2.0"
] | null | null | null | task_templates/transforms/python3_sklearn_transform/custom.py | andreakropp/datarobot-user-models | 423ab8c703a545491ad6013a0b7efa3119e2c0fc | [
"Apache-2.0"
] | null | null | null | task_templates/transforms/python3_sklearn_transform/custom.py | andreakropp/datarobot-user-models | 423ab8c703a545491ad6013a0b7efa3119e2c0fc | [
"Apache-2.0"
] | null | null | null | import pickle
import pandas as pd
from scipy.sparse.csr import csr_matrix
from create_transform_pipeline import make_pipeline
def fit(
X: pd.DataFrame, y: pd.Series, output_dir: str, **kwargs,
):
"""
This hook must be implemented with your fitting code, for running drum in the fit mode.
This hook MUST ALWAYS be implemented for custom tasks. For custom transformers, the
transform hook below is also required.
For inference models, this hook can stick around unimplemented, and won’t be triggered.
Parameters
----------
X: pd.DataFrame - training data to perform fit on
y: pd.Series - target data to perform fit on
output_dir: the path to write output. This is the path provided in '--output' parameter of the
'drum fit' command.
kwargs: Added for forwards compatibility
Returns
-------
Nothing
"""
transformer = make_pipeline()
transformer.fit(X, y)
# You must serialize out your transformer to the output_dir given, however if you wish to change this
# code, you will probably have to add a load_model method to read the serialized model back in
# When prediction is done.
# Check out this doc for more information on serialization https://github.com/datarobot/custom-\
# model-templates/tree/master/custom_model_runner#python
# NOTE: We currently set a 10GB limit to the size of the serialized model or transformer
with open("{}/artifact.pkl".format(output_dir), "wb") as fp:
pickle.dump(transformer, fp)
def transform(X, transformer):
"""
Parameters
----------
X: pd.DataFrame - training data to perform transform on
transformer: object - trained transformer object
Returns
-------
transformed DataFrame resulting from applying transform to incoming data
"""
transformed = transformer.transform(X)
if type(transformed) == csr_matrix:
return pd.DataFrame.sparse.from_spmatrix(transformed)
else:
return pd.DataFrame(transformed)
| 34.271186 | 105 | 0.702275 |
acf08e869537c0c43f9fcd29fe175c16630eb860 | 705 | py | Python | active_learning_ts/training/training_strategy.py | hassberg/active_learning_ts | 7ebdabd3349d3ac4ea2761a8aa869b8d222a2d83 | [
"MIT"
] | null | null | null | active_learning_ts/training/training_strategy.py | hassberg/active_learning_ts | 7ebdabd3349d3ac4ea2761a8aa869b8d222a2d83 | [
"MIT"
] | null | null | null | active_learning_ts/training/training_strategy.py | hassberg/active_learning_ts | 7ebdabd3349d3ac4ea2761a8aa869b8d222a2d83 | [
"MIT"
] | null | null | null | from typing import Protocol
from active_learning_ts.data_blackboard import Blackboard
from active_learning_ts.surrogate_models.surrogate_model import SurrogateModel
class TrainingStrategy(Protocol):
"""
A training Strategy is responsible for giving Feedback to the surrogate model. Training Strategies may be specific
to specific SurrogateModels
Given data from the blackboard, the Training strategy creates feedback for the surrogate model, so that it may
train, or it may train the surrogate model itself.
"""
def train(self, blackboard: Blackboard):
pass
def post_init(self, surrogate_model: SurrogateModel):
self.surrogate_model = surrogate_model
| 33.571429 | 118 | 0.775887 |
acf08fd65bba019fd87c94dfe90e15c56293fe05 | 21,564 | py | Python | scripts/google_research/open-covid-19/generate_csv_mcf.py | sharadshriram/data | 1ea4d6b8ff5944b5fcc06959d47eb787cd30eb60 | [
"Apache-2.0"
] | 25 | 2020-07-18T04:44:57.000Z | 2022-03-03T14:44:20.000Z | scripts/google_research/open-covid-19/generate_csv_mcf.py | sharadshriram/data | 1ea4d6b8ff5944b5fcc06959d47eb787cd30eb60 | [
"Apache-2.0"
] | 276 | 2020-05-09T00:57:50.000Z | 2022-03-29T23:27:38.000Z | scripts/google_research/open-covid-19/generate_csv_mcf.py | sharadshriram/data | 1ea4d6b8ff5944b5fcc06959d47eb787cd30eb60 | [
"Apache-2.0"
] | 59 | 2020-05-09T00:45:30.000Z | 2022-03-28T16:03:53.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script for generating the Open COVID-19 Data open source at
https://github.com/google-research/open-covid-19-data under CC-BY license.
The script generates three files:
1) open_covid_19_data.csv
2) open_covid_19_data.tmcf
3) open_covid_19_data_geos.mcf
"open_covid_19_data_geos.mcf" contains node MCFs for geos in the dataset that do
not come up DCIDs for local resolution. "open_covid_19_data_geos_resolved.mcf"
contains the resolved geos.
"""
import re
import collections
from typing import Dict, List, Iterable
import pandas as pd
import numpy as np
import requests
import frozendict
# Data CSV
CSV_URL = 'https://raw.githubusercontent.com/google-research/open-covid-19-data/master/data/exports/cc_by/aggregated_cc_by.csv'
# CSV containing mappings from "region_code" to "datacommons_id"
LOCATIONS_URL = 'https://raw.githubusercontent.com/google-research/open-covid-19-data/master/data/exports/locations/locations.csv'
# Output filenames
CSV_OUT = 'open_covid_19_data.csv'
TMCF_OUT = 'open_covid_19_data.tmcf'
GEOS_OUT = 'open_covid_19_data_geos.mcf'
# Columns that contain integers
INTEGRAL_COLS = ('confirmed_cases', 'confirmed_deaths', 'cases_cumulative',
'cases_new', 'deaths_cumulative', 'deaths_new',
'tests_cumulative', 'tests_new', 'hospitalized_current',
'hospitalized_new', 'hospitalized_cumulative', 'icu_current',
'icu_cumulative', 'ventilator_current', 'school_closing',
'workplace_closing', 'restrictions_on_gatherings',
'close_public_transit', 'stay_at_home_requirements',
'restrictions_on_internal_movement', 'income_support',
'public_information_campaigns', 'school_closing_flag',
'workplace_closing_flag', 'restrictions_on_gatherings_flag',
'close_public_transit_flag', 'stay_at_home_requirements_flag',
'restrictions_on_internal_movement_flag',
'income_support_flag', 'public_information_campaigns_flag',
'international_travel_controls', 'debt_contract_relief',
'testing_policy', 'contact_tracing')
# Columns that do not contain integers
NON_INTEGRAL_COLS = (
'open_covid_region_code', 'region_name', 'date',
'cases_cumulative_per_million', 'cases_new_per_million',
'deaths_cumulative_per_million', 'deaths_new_per_million',
'tests_cumulative_per_thousand', 'tests_new_per_thousand', 'test_units',
'cancel_public_events_flag', 'fiscal_measures', 'international_support',
'emergency_investment_in_healthcare', 'investment_in_vaccines',
'stringency_index', 'stringency_index_for_display',
'government_response_index', 'government_response_index_for_display',
'containment_health_index', 'containment_health_index_for_display',
'economic_support_index', 'economic_support_index_for_display')
# Required columns
REQUIRED_COLS = INTEGRAL_COLS + NON_INTEGRAL_COLS
# Columns whose template MCFs can be produced without special processing
REGULAR_COLS = ('cases_cumulative', 'cases_new', 'deaths_cumulative',
'deaths_new', 'tests_cumulative_people_tested',
'tests_cumulative_samples_tested',
'tests_cumulative_tests_performed',
'tests_cumulative_units_unclear', 'tests_new_people_tested',
'tests_new_samples_tested', 'tests_new_tests_performed',
'tests_new_units_unclear', 'hospitalized_current',
'hospitalized_new', 'hospitalized_cumulative', 'icu_current',
'icu_cumulative', 'ventilator_current')
# Columns that contain computed indices
INDEX_COLS = ('stringency_index', 'government_response_index',
'containment_health_index', 'economic_support_index')
# Columns that contain computed indices that are smoothed for display.
# These columns have the measurementQualifier "SmoothedByRepeatingLatestPoint".
DISPLAY_COLS = ('stringency_index_for_display',
'government_response_index_for_display',
'containment_health_index_for_display',
'economic_support_index_for_display')
# Columns that contain confirmed numbers.
# These columns have the measurementMethod "OxCGRTViaOpenCovid19Data".
CONFIRMED_COLS = ('confirmed_cases', 'confirmed_deaths')
# Incomplete column names to StatVar DCIDs mappings.
# The policy columns need to be added.
COL_TO_STATVAR_PARTIAL = frozendict.frozendict({
'cases_cumulative':
'CumulativeCount_MedicalConditionIncident_COVID_19_ConfirmedCase',
'cases_new':
'IncrementalCount_MedicalConditionIncident_COVID_19_ConfirmedCase',
'deaths_cumulative':
'CumulativeCount_MedicalConditionIncident_COVID_19_PatientDeceased',
'deaths_new':
'IncrementalCount_MedicalConditionIncident_COVID_19_PatientDeceased',
'tests_cumulative_people_tested':
'CumulativeCount_Person_COVID_19_Tested_PCR',
'tests_cumulative_samples_tested':
'CumulativeCount_MedicalTest_COVID_19_PCR',
'tests_cumulative_tests_performed':
'CumulativeCount_MedicalTest_COVID_19_PCR',
'tests_cumulative_units_unclear':
'CumulativeCount_MedicalTest_COVID_19_PCR',
'tests_new_people_tested':
'IncrementalCount_Person_COVID_19_Tested_PCR',
'tests_new_samples_tested':
'IncrementalCount_MedicalTest_COVID_19_PCR',
'tests_new_tests_performed':
'IncrementalCount_MedicalTest_COVID_19_PCR',
'tests_new_units_unclear':
'IncrementalCount_MedicalTest_COVID_19_PCR',
'hospitalized_current':
'Count_MedicalConditionIncident_COVID_19_PatientHospitalized',
'hospitalized_new':
'IncrementalCount_MedicalConditionIncident_COVID_19_PatientHospitalized',
'hospitalized_cumulative':
'CumulativeCount_MedicalConditionIncident_COVID_19_PatientHospitalized',
'icu_current':
'Count_MedicalConditionIncident_COVID_19_PatientInICU',
'icu_cumulative':
'CumulativeCount_MedicalConditionIncident_COVID_19_PatientInICU',
'ventilator_current':
'Count_MedicalConditionIncident_COVID_19_PatientOnVentilator',
'international_travel_controls':
'PolicyExtent_Legislation_COVID19Pandemic_GovernmentOrganization_InternationalTravelRestriction',
'debt_contract_relief':
'PolicyExtent_Legislation_COVID19Pandemic_GovernmentOrganization_GovernmentBenefit_DebtOrContractRelief',
'testing_policy':
'PolicyExtent_Legislation_COVID19Pandemic_GovernmentOrganization_TestingEligibility',
'contact_tracing':
'PolicyExtent_Legislation_COVID19Pandemic_GovernmentOrganization_ContactTracing',
'emergency_investment_in_healthcare':
'Amount_Legislation_COVID19Pandemic_GovernmentOrganization_ShortTermSpending_HealthcareExpenditure',
'investment_in_vaccines':
'Amount_Legislation_COVID19Pandemic_GovernmentOrganization_ShortTermSpending_VaccineExpenditure',
'fiscal_measures':
'Amount_Legislation_COVID19Pandemic_GovernmentOrganization_ShortTermSpending_EconomicStimulusExpenditure',
'international_support':
'Amount_Legislation_COVID19Pandemic_GovernmentOrganization_ShortTermSpending_InternationalAidExpenditure',
'confirmed_cases':
'CumulativeCount_MedicalConditionIncident_COVID_19_ConfirmedCase',
'confirmed_deaths':
'CumulativeCount_MedicalConditionIncident_COVID_19_ConfirmedPatientDeceased',
'stringency_index':
'Covid19StringencyIndex_Legislation_COVID19Pandemic_GovernmentOrganization',
'stringency_index_for_display':
'Covid19StringencyIndex_Legislation_COVID19Pandemic_GovernmentOrganization',
'government_response_index':
'Covid19ResponseIndex_Legislation_COVID19Pandemic_GovernmentOrganization',
'government_response_index_for_display':
'Covid19ResponseIndex_Legislation_COVID19Pandemic_GovernmentOrganization',
'containment_health_index':
'Covid19ContainmentAndHealthIndex_Legislation_COVID19Pandemic_GovernmentOrganization',
'containment_health_index_for_display':
'Covid19ContainmentAndHealthIndex_Legislation_COVID19Pandemic_GovernmentOrganization',
'economic_support_index':
'Covid19EconomicSupportIndex_Legislation_COVID19Pandemic_GovernmentOrganization',
'economic_support_index_for_display':
'Covid19EconomicSupportIndex_Legislation_COVID19Pandemic_GovernmentOrganization',
})
# Policy column names to StatVar DCID prefixes.
# Flag values need to be appended to the key prefixes and spatial coverage
# suffixes need to be appended to the value prefixes to produce
# the correct mappings.
POLICY_COL_PREFIX_TO_STATVAR_PREFIX = frozendict.frozendict({
'school_closing':
'PolicyExtent_Legislation_COVID19Pandemic_GovernmentOrganization_SchoolClosure',
'workplace_closing':
'PolicyExtent_Legislation_COVID19Pandemic_GovernmentOrganization_WorkplaceClosure',
'restrictions_on_gatherings':
'PolicyExtent_Legislation_COVID19Pandemic_GovernmentOrganization_PrivateGatheringRestriction',
'close_public_transit':
'PolicyExtent_Legislation_COVID19Pandemic_GovernmentOrganization_PublicTransitClosure',
'stay_at_home_requirements':
'PolicyExtent_Legislation_COVID19Pandemic_GovernmentOrganization_StayAtHomeRequirement',
'restrictions_on_internal_movement':
'PolicyExtent_Legislation_COVID19Pandemic_GovernmentOrganization_InternalMovementRestriction',
'income_support':
'PolicyExtent_Legislation_COVID19Pandemic_GovernmentOrganization_GovernmentBenefit_IncomeSupport',
'public_information_campaigns':
'CampaignExtent_PublicInformationCampaign_COVID19Pandemic_GovernmentOrganization'
})
# Policy column name prefixes to their units.
# Flag values need to be appended to the prefixes to produce
# the correct columns.
POLICY_COL_PREFIX_TO_UNIT = frozendict.frozendict({
'school_closing':
'dcs:ExtentOfPolicySchoolClosure',
'workplace_closing':
'dcs:ExtentOfPolicyWorkplaceClosure',
'restrictions_on_gatherings':
'dcs:ExtentOfPolicyPrivateGatheringRestriction',
'close_public_transit':
'dcs:ExtentOfPolicyPublicTransitClosure',
'stay_at_home_requirements':
'dcs:ExtentOfPolicyStayAtHomeRequirement',
'restrictions_on_internal_movement':
'dcs:ExtentOfPolicyInternalMovementRestriction',
'income_support':
'dcs:ExtentOfPolicyIncomeSupport',
'public_information_campaigns':
'dcs:ExtentOfPublicInformationCampaign',
})
# Incomplete column names to units mappings.
COL_TO_UNIT_PARTIAL = frozendict.frozendict({
'emergency_investment_in_healthcare':
'schema:USDollar',
'investment_in_vaccines':
'schema:USDollar',
'fiscal_measures':
'schema:USDollar',
'international_support':
'schema:USDollar',
'international_travel_controls':
'dcs:ExtentOfPolicyInternationalTravelRestriction',
'debt_contract_relief':
'dcs:ExtentOfPolicyDebtOrContractRelief',
'testing_policy':
'dcs:ExtentOfPolicyTestingEligibility',
'contact_tracing':
'dcs:ExtentOfPolicyContactTracing'
})
def generate_df(cols_to_keep: List[str]) -> pd.DataFrame:
"""Generates the cleaned dataframe."""
df = pd.read_csv(CSV_URL)
for col in INTEGRAL_COLS:
df[col] = df[col].astype('Int64')
if col.endswith('_flag'):
assert get_unique_values(df[col]) == [0, 1, pd.NA]
df = pd.merge(df,
pd.read_csv(LOCATIONS_URL),
left_on='open_covid_region_code',
right_on='region_code')
df = df[df['region_code_type'] != 'other']
df['observationAbout'] = df[['region_code',
'datacommons_id']].apply(get_observation_about,
axis=1)
assert not any(pd.isna(df['observationAbout']))
assert get_unique_values(df['test_units']) == [
'people tested', 'people tested (incl. non-PCR)', 'samples tested',
'tests performed', 'units unclear', pd.NA
]
# Split the test columns by units
for col in ('tests_cumulative', 'tests_new'):
split_col(df, col, 'test_units')
validate_split(df, col, 'test_units')
# Split the policy columns by flags
for col in POLICY_COL_PREFIX_TO_STATVAR_PREFIX.keys():
by = col + '_flag'
split_col(df, col, by)
validate_split(df, col, by)
return df[cols_to_keep]
def get_observation_about(row: pd.Series) -> str:
"""Formats the observationAbout value for a place.
Args:
row: A pandas series of two fields 'region_code' and 'datacommons_id'.
Returns:
'dcid:' followed by the value of 'datacommons_id' if 'region_code' is
not NaN. Otherwise, 'l:' followed by the value of 'region_code' for
resolving the place locally.
"""
if pd.isna(row.datacommons_id):
return f'l:{row.region_code}'
return f'dcid:{row.datacommons_id}'
def clean_col_name(col: str) -> str:
"""Replaces characters that are not letters or digits with underscores."""
return re.sub(r'[^A-Za-z0-9]', '_', col)
def get_unique_values(series: pd.Series) -> List:
"""Returns a list of unique values in a series, including NaNs."""
vals = list(sorted(series.dropna().unique()))
if any(pd.isna(series)):
vals.append(pd.NA)
return vals
def get_col_value_name(col: str, value: str) -> str:
"""Formats column name with a value as suffix."""
return f'{col}_{clean_col_name(value)}'
def split_col(df: pd.DataFrame, col_to_split: str, col_by: str) -> List[str]:
"""Splits a column "col_to_split" into several columns by another column
"col_by".
The function looks at the unique values in "col_by" including NaN. For each
of these values, it creates a new column out of "col_to_split" with only
rows where "col_by" has the value. The other rows are marked NaNs. A new
column is named "{col_to_split}_{value}" by converting the value to a string
and replacing characters other than letters and digits with underscores.
Example:
df = col1 | col2
1 | 2
1 | 3
4 | 3
5 | NaN
split_col(df, 'col1', 'col2') modifies df to
col1 | col2 | col1_2 | col1_3 | col1_NaN
1 | 2 | 1 | NaN | NaN
1 | 3 | NaN | 1 | NaN
4 | 3 | NaN | 4 | NaN
5 | NaN | NaN | NaN | 5
Args:
df: The dataframe to operate on.
col_to_split: Name of the column to split.
col_by: Name of the column to split by.
Returns:
Names of the new columns added to the input dataframe.
"""
values = get_unique_values(df[col_by])
new_cols = []
for value in values:
new_col = get_col_value_name(col_to_split, str(value))
if pd.isna(value):
df[new_col] = df[pd.isna(df[col_by])][col_to_split]
else:
df[new_col] = df[df[col_by] == value][col_to_split]
new_cols.append(new_col)
return new_cols
def validate_split(df: pd.DataFrame, col_splitted: str, col_by: str):
"""Validates result of executing split_col."""
values = get_unique_values(df[col_by])
cols = list(
get_col_value_name(col_splitted, str(value)) for value in values)
for row in df[[col_splitted, col_by] + cols].to_dict(orient='records'):
# If the original column is NaN, the created columns are NaNs
if pd.isna(row[col_splitted]):
assert all(pd.isna(row[col]) for col in cols)
# If the original column is not NaN, only one of the created columns
# is not NaN and has the same value
else:
col = get_col_value_name(col_splitted, str(row[col_by]))
other_cols = list(cols)
other_cols.remove(col)
assert all(pd.isna(row[col]) for col in other_cols)
assert row[col_splitted] == row[col]
def get_policy_col_to_statvar(col_to_prefix: Dict[str, str]) -> Dict[str, str]:
"""Returns mappings from policy columns splitted by flags
to the DCIDs of their StatVars."""
col_to_statvar = {}
for col, prefix in POLICY_COL_PREFIX_TO_STATVAR_PREFIX.items():
col_to_statvar[f'{col}_0'] = f'{prefix}_SelectedAdministrativeAreas'
col_to_statvar[f'{col}_1'] = f'{prefix}_AllAdministrativeAreas'
col_to_statvar[f'{col}__NA_'] = f'{prefix}_SpatialCoverageUnknown'
return col_to_statvar
def generate_tmcfs(col_to_statvar: Dict[str, str]) -> List[str]:
"""Generates template MCFs."""
col_to_tmcf = generate_tmcfs_helper(REGULAR_COLS, col_to_statvar, 0,
'dcs:OpenCovid19Data')
col_to_unit = get_col_to_unit()
col_to_tmcf = {
**col_to_tmcf,
**generate_tmcfs_helper(col_to_unit.keys(), col_to_statvar,
len(col_to_tmcf), 'dcs:OpenCovid19Data', col_to_unit)
}
# Index columns
col_to_tmcf = {
**col_to_tmcf,
**generate_tmcfs_helper(INDEX_COLS, col_to_statvar, len(col_to_tmcf), 'dcs:OpenCovid19Data')
}
# Index for display columns
col_to_tmcf = {
**col_to_tmcf,
**generate_tmcfs_helper(DISPLAY_COLS,
col_to_statvar,
len(col_to_tmcf),
'dcs:OpenCovid19Data',
mqual='dcs:SmoothedByRepeatingLatestPoint')
}
# Confirmed cases and deaths
col_to_tmcf = {
**col_to_tmcf,
**generate_tmcfs_helper(CONFIRMED_COLS, col_to_statvar, len(col_to_tmcf), 'dcs:OxCGRTViaOpenCovid19Data')
}
assert col_to_statvar.keys() == col_to_tmcf.keys()
return col_to_tmcf.values()
def write_strs(strs: Iterable[str], dest: str) -> None:
"""Writes strings out to a file."""
with open(dest, 'w') as out:
for elem in strs:
out.write(elem)
out.write('\n')
def generate_geo_mcfs(observation_abouts: pd.Series) -> List[str]:
"""Generates node MCFs for geos that do not come with DCIDs
for local resolution."""
mcfs = []
template = ('Node: {iso}\n'
'typeOf: schema:Place\n'
'isoCode: "{iso}"\n')
for value in observation_abouts.unique():
if value.startswith('l:'):
mcfs.append(template.format_map({'iso': value[2:]}))
return mcfs
def generate_tmcfs_helper(cols: Iterable[str],
col_to_statvar: Dict[str, str],
starting_index: int,
mmethod: str,
col_to_unit: Dict[str, str] = None,
mqual: str = None) -> Dict[str, str]:
"""Generates template MCFs for columns that do not need
special processing.
Args:
cols: Column names.
col_to_statvar: Mappings from column names to StatVar DCIDs.
starting_index: Starting node index.
extra_line: Extra line to add to each template MCF.
Returns:
Mappings from column names to template MCFs.
"""
template = ('Node: E:open_covid_19_data->E{index}\n'
'typeOf: dcs:StatVarObservation\n'
'variableMeasured: dcs:{statvar}\n'
f'measurementMethod: {mmethod}\n'
'observationAbout: C:open_covid_19_data->observationAbout\n'
'observationDate: C:open_covid_19_data->date\n'
'value: C:open_covid_19_data->{column_name}\n'
'statType: dcs:measuredValue\n')
if mqual:
template += f'measurementQualifer: {mqual}\n'
col_to_tmcf = {}
for col in cols:
tmcf = template.format_map({
'index': starting_index,
'statvar': col_to_statvar[col],
'column_name': col
})
if col_to_unit:
tmcf += f'unit: {col_to_unit[col]}\n'
col_to_tmcf[col] = tmcf
starting_index += 1
return col_to_tmcf
def get_col_to_unit() -> Dict[str, str]:
"""Returns mappings from column names to units for columns
that need units."""
col_to_unit = dict(COL_TO_UNIT_PARTIAL)
for col, unit in POLICY_COL_PREFIX_TO_UNIT.items():
for suffix in ('_0', '_1', '__NA_'):
col_to_unit[col + suffix] = unit
return col_to_unit
def main():
"""Runs the script."""
col_to_statvar = {
**COL_TO_STATVAR_PARTIAL,
**get_policy_col_to_statvar(POLICY_COL_PREFIX_TO_STATVAR_PREFIX)
}
df = generate_df(['observationAbout', 'date'] + list(col_to_statvar.keys()))
df.to_csv(CSV_OUT, index=False)
write_strs(generate_tmcfs(col_to_statvar), TMCF_OUT)
write_strs(generate_geo_mcfs(df['observationAbout']), GEOS_OUT)
if __name__ == "__main__":
main()
| 42.785714 | 130 | 0.698804 |
acf08fd8a71f4ed398d46ef3b5242640d0edfb8a | 4,701 | py | Python | configs/hand/hrnetv2/onehand10k/hrnetv2_w18_onehand10k_256x256.py | carolchenyx/mmpose | cd74bf1d0b13954188cc678415fd0ef98a74b46b | [
"Apache-2.0"
] | 1 | 2021-04-20T08:39:34.000Z | 2021-04-20T08:39:34.000Z | configs/hand/hrnetv2/onehand10k/hrnetv2_w18_onehand10k_256x256.py | carolchenyx/mmpose | cd74bf1d0b13954188cc678415fd0ef98a74b46b | [
"Apache-2.0"
] | null | null | null | configs/hand/hrnetv2/onehand10k/hrnetv2_w18_onehand10k_256x256.py | carolchenyx/mmpose | cd74bf1d0b13954188cc678415fd0ef98a74b46b | [
"Apache-2.0"
] | null | null | null | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(
interval=10, metric=['PCK', 'AUC', 'EPE'], key_indicator='AUC')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=21,
dataset_joints=21,
dataset_channel=[
[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20
],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20
])
# model settings
model = dict(
type='TopDown',
pretrained='open-mmlab://msra/hrnetv2_w18',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(18, 36)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(18, 36, 72)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(18, 36, 72, 144),
multiscale_output=True),
upsample=dict(mode='bilinear', align_corners=False))),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=[18, 36, 72, 144],
in_index=(0, 1, 2, 3),
input_transform='resize_concat',
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(
final_conv_kernel=1, num_conv_layers=1, num_conv_kernels=(1, )),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[256, 256],
heatmap_size=[64, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'])
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=90, scale_factor=0.3),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=['image_file', 'center', 'scale', 'rotation', 'flip_pairs']),
]
test_pipeline = val_pipeline
data_root = 'data/onehand10k'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
train=dict(
type='OneHand10KDataset',
ann_file=f'{data_root}/annotations/onehand10k_train.json',
img_prefix=f'{data_root}/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='OneHand10KDataset',
ann_file=f'{data_root}/annotations/onehand10k_test.json',
img_prefix=f'{data_root}/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='OneHand10KDataset',
ann_file=f'{data_root}/annotations/onehand10k_test.json',
img_prefix=f'{data_root}/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 28.840491 | 79 | 0.570304 |
acf092567488c81362bf1022ca9127f56bb8d3e4 | 5,557 | py | Python | PyEIS/PyEIS_Advanced_tools.py | deanna-abrams/PyEIS | ad8db40fe74dccba3ef2005064a7ad5219364814 | [
"Apache-2.0"
] | null | null | null | PyEIS/PyEIS_Advanced_tools.py | deanna-abrams/PyEIS | ad8db40fe74dccba3ef2005064a7ad5219364814 | [
"Apache-2.0"
] | null | null | null | PyEIS/PyEIS_Advanced_tools.py | deanna-abrams/PyEIS | ad8db40fe74dccba3ef2005064a7ad5219364814 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 8 10:18:41 2018
This script contains tools for advanced EIS analysis. These scripts are meant for post treatment of
resistances and capacitors
@author: Kristian B. Knudsen (kknu@berkeley.edu / kristianbknudsen@gmail.com)
"""
import numpy as np
from scipy.constants import codata
F = codata.physical_constants['Faraday constant'][0]
qe = codata.physical_constants['elementary charge'][0]
R = codata.physical_constants['molar gas constant'][0]
kB = codata.physical_constants['Boltzmann constant'][0]
kB_eV = codata.physical_constants['Boltzmann constant in eV/K'][0]
N_A = codata.physical_constants['Avogadro constant'][0]
__all__ = ['norm_Fara_Q_C', 'norm_nonFara_Q_C']
# Normalization of constant phase elements
def norm_nonFara_Q_C(Rs, Q, n, L='none'):
"""
Normalziation of a non-faradaic interfacial capacitance (Blocking Electrode)
Following Brug and Hirschorn's normalization of distribtuion of relaxation times
Ref.:
- G.J.Brug et al., J.Electroanal. Chem. Interfacial Electrochem., 176, 275 (1984)
Kristian B. Knudsen (kknu@berkeley.edu || kristianbknudsen@gmail.com)
Inputs
------------------
Q = Constant phase element [s^n/ohm]
n = Exponent of CPE [-]
Rs = Series Resistance [ohm]
Optional Inputs
------------------
L = Thickness/length of electrode, used in Porous Electrode Theory [cm]
Returns
------------------
C_eff = normalized capacitance for a non-faradaic electrode [s/ohm = F]
"""
if L == 'none':
C_eff = (Q * Rs ** (1 - n)) ** (1 / n)
else:
C_eff = ((Q * L) * Rs ** (1 - n)) ** (1 / n)
return C_eff
def norm_Fara_Q_C(Rs, Rct, n, Q='none', fs='none', L='none'):
"""
Normalziation of a faradaic interfacial capacitance (Blocking Electrode)
Contains option to use summit frequency (fs) instead of CPE (Q) - valueable for outputs of fits
Following Brug and Hirschorn's normalization of distribtuion of relaxation times
Ref.:
- G.J.Brug et al., J.Electroanal. Chem. Interfacial Electrochem., 176, 275 (1984)
- B.Hirschorn, et al., ElectrochimicaActa, 55, 6218 (2010)
Kristian B. Knudsen (kknu@berkeley.edu || kristianbknudsen@gmail.com)
Inputs
----------
n = Exponent of CPE [-]
Rs = Series Resistance [ohm]
Rct = Charge Transfer Resistance [ohm]
Optional Inputs
------------------
Q = Constant phase element [s^n/ohm]
fs = summit frequencey of fitted spectra [Hz]
Returns
----------
C_eff = normalized capacitance for a faradaic electrode [s/ohm = F]
"""
if Q == 'none':
if L == 'none':
Q = (1 / (Rct * (2 * np.pi * fs) ** n))
C_eff = Q ** (1 / n) * ((Rs * Rct) / (Rs * Rct)) ** ((1 - n) / n)
if L != 'none':
Rct_norm = Rct / L
Q = (1 / (Rct_norm * (2 * np.pi * fs) ** n))
C_eff = Q ** (1 / n) * ((Rs * Rct_norm) / (Rs * Rct_norm)) ** ((1 - n) / n)
if fs == 'none':
C_eff = Q ** (1 / n) * ((Rs * Rct) / (Rs * Rct)) ** ((1 - n) / n)
return C_eff
# internal functions
def Theta(E, E0, n, T=298.15, F=F, R=R):
"""
See explantion in C_redox_Estep_semiinfinite()
Kristian B. Knudsen (kknu@berkeley.edu || Kristianbknudsen@gmail.com)
"""
return np.exp(((n * F) / (R * T)) * (E - E0))
def Varsigma(D_ox, D_red):
"""
See explantion in C_redox_Estep_semiinfinite()
Kristian B. Knudsen (kknu@berkeley.edu || Kristianbknudsen@gmail.com)
"""
return (D_ox / D_red) ** (1 / 2)
def C_redox_Estep_semiinfinite(E, E0, n, C_ox, D_ox, D_red, T=298.15, R=R, F=F):
"""
The concentration at the electrode surface (x=0) as a function of potential following Nernst eq.
during semi-infinite linear diffusion (Macro disk electrode)
O + ne- --> R
Ref: Bard A.J., Faulkner L. R., ISBN: 0-471-04372-9 (2001) "Electrochemical methods:
Fundamentals and applications". New York: Wiley.
Author: Kristian B. Knudsen (kknu@berkeley.edu || Kristianbknudsen@gmail.com)
returns
----------
[0] = C_red at x=0
[1] = C_ox at x=0
"""
C_red0 = C_ox * (Varsigma(D_ox=D_ox, D_red=D_red) / (
1 + (Varsigma(D_ox=D_ox, D_red=D_red) * Theta(E=E, E0=E0, n=n, T=T, F=F, R=R))))
C_ox0 = C_ox * ((Varsigma(D_ox=D_ox, D_red=D_red) * Theta(E=E, E0=E0, n=n, T=T, F=F, R=R)) / (
1 + (Varsigma(D_ox=D_ox, D_red=D_red) * Theta(E=E, E0=E0, n=n, T=T, F=F, R=R))))
return C_red0, C_ox0
def C_redox_Estep_semihemisperhical(E, E0, n, C_ox, D_ox, D_red, T=298.15, R=R, F=F):
"""
The concentration at the electrode surface (x=0) as a function of potential following Nernst eq.
during semi-infinite hemispherical diffusion (Micro disk electrode)
O + ne- --> R
Note: This equation applies only for a reversible system with rapid kinetics
Ref: Bard A.J., Faulkner L. R., ISBN: 0-471-04372-9 (2001) "Electrochemical methods:
Fundamentals and applications". New York: Wiley.
Author: Kristian B. Knudsen (kknu@berkeley.edu || Kristianbknudsen@gmail.com)
returns
----------
[0] = C_red at x=0
[1] = C_ox at x=0
"""
C_red0 = C_ox * ((Varsigma(D_ox, D_red) ** 2) / (
1 + Varsigma(D_ox, D_red) ** 2 * Theta(E, E0, n, T, F, R)))
C_ox0 = C_ox * (1 - (1 / (1 + Varsigma(D_ox, D_red) ** 2 * Theta(E, E0, n, T, F, R))))
return C_red0, C_ox0
| 34.515528 | 101 | 0.597445 |
acf092ba1f34bdc29a553faaae76bbb3f4ab79bf | 439 | py | Python | python/20200701/ex04.py | kogepanh/class-numeric | 4fd4cb56818339c6348f9f691c64fb33a09e1b69 | [
"MIT"
] | null | null | null | python/20200701/ex04.py | kogepanh/class-numeric | 4fd4cb56818339c6348f9f691c64fb33a09e1b69 | [
"MIT"
] | null | null | null | python/20200701/ex04.py | kogepanh/class-numeric | 4fd4cb56818339c6348f9f691c64fb33a09e1b69 | [
"MIT"
] | null | null | null | # ex04
class Counter:
def __init__(self):
self.count = 0
def __str__(self):
return f"現在のカウンタは {self.count} です。"
def up(self):
self.count += 1
print("カウンタの値を1増やしました。現在のカウンタは" ,self.count, "です。")
def down(self):
self.count -= 1
print("カウンタの値を1減らしました。現在のカウンタは" ,self.count, "です。")
# main
count1 = Counter()
count1.up()
count1.up()
print(count1)
count1.down()
print(count1)
| 17.56 | 59 | 0.596811 |
acf093072a6d741f1c15ce132e740c761af7a137 | 187 | py | Python | resqueue/utils.py | YSanchezAraujo/slurm_handler | a9bf967b00e74b0d2f14bd04d132f3728f4bf2d4 | [
"Apache-2.0"
] | 4 | 2018-02-10T16:40:09.000Z | 2022-03-31T17:55:26.000Z | resqueue/utils.py | YSanchezAraujo/resqueue | a9bf967b00e74b0d2f14bd04d132f3728f4bf2d4 | [
"Apache-2.0"
] | 1 | 2018-06-20T13:40:55.000Z | 2018-06-20T13:40:55.000Z | resqueue/utils.py | YSanchezAraujo/resqueue | a9bf967b00e74b0d2f14bd04d132f3728f4bf2d4 | [
"Apache-2.0"
] | 1 | 2021-05-21T13:33:06.000Z | 2021-05-21T13:33:06.000Z | import subprocess
def shell(cmd_split):
process = subprocess.Popen(cmd_split, stdout=subprocess.PIPE)
out, err = process.communicate()
return out.decode("utf-8").split("\n")
| 26.714286 | 65 | 0.71123 |
acf095098a5afa41a68a36c81b9663668a78102b | 31,385 | py | Python | v6.0.5/ips/fortios_ips_sensor.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 14 | 2018-09-25T20:35:25.000Z | 2021-07-14T04:30:54.000Z | v6.0.5/ips/fortios_ips_sensor.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 32 | 2018-10-09T04:13:42.000Z | 2020-05-11T07:20:28.000Z | v6.0.5/ips/fortios_ips_sensor.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 11 | 2018-10-09T00:14:53.000Z | 2021-11-03T10:54:09.000Z | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_ips_sensor
short_description: Configure IPS sensor in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify ips feature and sensor category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
version_added: 2.9
ips_sensor:
description:
- Configure IPS sensor.
default: null
type: dict
suboptions:
block_malicious_url:
description:
- Enable/disable malicious URL blocking.
type: str
choices:
- disable
- enable
comment:
description:
- Comment.
type: str
entries:
description:
- IPS sensor filter.
type: list
suboptions:
action:
description:
- Action taken with traffic in which signatures are detected.
type: str
choices:
- pass
- block
- reset
- default
application:
description:
- Applications to be protected. set application ? lists available applications. all includes all applications. other includes all
unlisted applications.
type: str
exempt_ip:
description:
- Traffic from selected source or destination IP addresses is exempt from this signature.
type: list
suboptions:
dst_ip:
description:
- Destination IP address and netmask.
type: str
id:
description:
- Exempt IP ID.
required: true
type: int
src_ip:
description:
- Source IP address and netmask.
type: str
id:
description:
- Rule ID in IPS database (0 - 4294967295).
required: true
type: int
location:
description:
- Protect client or server traffic.
type: str
log:
description:
- Enable/disable logging of signatures included in filter.
type: str
choices:
- disable
- enable
log_attack_context:
description:
- "Enable/disable logging of attack context: URL buffer, header buffer, body buffer, packet buffer."
type: str
choices:
- disable
- enable
log_packet:
description:
- Enable/disable packet logging. Enable to save the packet that triggers the filter. You can download the packets in pcap format
for diagnostic use.
type: str
choices:
- disable
- enable
os:
description:
- Operating systems to be protected. all includes all operating systems. other includes all unlisted operating systems.
type: str
protocol:
description:
- Protocols to be examined. set protocol ? lists available protocols. all includes all protocols. other includes all unlisted
protocols.
type: str
quarantine:
description:
- Quarantine method.
type: str
choices:
- none
- attacker
quarantine_expiry:
description:
- Duration of quarantine. (Format ###d##h##m, minimum 1m, maximum 364d23h59m). Requires quarantine set to attacker.
type: str
quarantine_log:
description:
- Enable/disable quarantine logging.
type: str
choices:
- disable
- enable
rate_count:
description:
- Count of the rate.
type: int
rate_duration:
description:
- Duration (sec) of the rate.
type: int
rate_mode:
description:
- Rate limit mode.
type: str
choices:
- periodical
- continuous
rate_track:
description:
- Track the packet protocol field.
type: str
choices:
- none
- src-ip
- dest-ip
- dhcp-client-mac
- dns-domain
rule:
description:
- Identifies the predefined or custom IPS signatures to add to the sensor.
type: list
suboptions:
id:
description:
- Rule IPS.
required: true
type: int
severity:
description:
- Relative severity of the signature, from info to critical. Log messages generated by the signature include the severity.
type: str
status:
description:
- Status of the signatures included in filter. default enables the filter and only use filters with default status of enable.
Filters with default status of disable will not be used.
type: str
choices:
- disable
- enable
- default
extended_log:
description:
- Enable/disable extended logging.
type: str
choices:
- enable
- disable
filter:
description:
- IPS sensor filter.
type: list
suboptions:
action:
description:
- Action of selected rules.
type: str
choices:
- pass
- block
- reset
- default
application:
description:
- Vulnerable application filter.
type: str
location:
description:
- Vulnerability location filter.
type: str
log:
description:
- Enable/disable logging of selected rules.
type: str
choices:
- disable
- enable
log_packet:
description:
- Enable/disable packet logging of selected rules.
type: str
choices:
- disable
- enable
name:
description:
- Filter name.
required: true
type: str
os:
description:
- Vulnerable OS filter.
type: str
protocol:
description:
- Vulnerable protocol filter.
type: str
quarantine:
description:
- Quarantine IP or interface.
type: str
choices:
- none
- attacker
quarantine_expiry:
description:
- Duration of quarantine in minute.
type: int
quarantine_log:
description:
- Enable/disable logging of selected quarantine.
type: str
choices:
- disable
- enable
severity:
description:
- Vulnerability severity filter.
type: str
status:
description:
- Selected rules status.
type: str
choices:
- disable
- enable
- default
name:
description:
- Sensor name.
required: true
type: str
override:
description:
- IPS override rule.
type: list
suboptions:
action:
description:
- Action of override rule.
type: str
choices:
- pass
- block
- reset
exempt_ip:
description:
- Exempted IP.
type: list
suboptions:
dst_ip:
description:
- Destination IP address and netmask.
type: str
id:
description:
- Exempt IP ID.
required: true
type: int
src_ip:
description:
- Source IP address and netmask.
type: str
log:
description:
- Enable/disable logging.
type: str
choices:
- disable
- enable
log_packet:
description:
- Enable/disable packet logging.
type: str
choices:
- disable
- enable
quarantine:
description:
- Quarantine IP or interface.
type: str
choices:
- none
- attacker
quarantine_expiry:
description:
- Duration of quarantine in minute.
type: int
quarantine_log:
description:
- Enable/disable logging of selected quarantine.
type: str
choices:
- disable
- enable
rule_id:
description:
- Override rule ID.
type: int
status:
description:
- Enable/disable status of override rule.
type: str
choices:
- disable
- enable
replacemsg_group:
description:
- Replacement message group. Source system.replacemsg-group.name.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure IPS sensor.
fortios_ips_sensor:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
ips_sensor:
block_malicious_url: "disable"
comment: "Comment."
entries:
-
action: "pass"
application: "<your_own_value>"
exempt_ip:
-
dst_ip: "<your_own_value>"
id: "10"
src_ip: "<your_own_value>"
id: "12"
location: "<your_own_value>"
log: "disable"
log_attack_context: "disable"
log_packet: "disable"
os: "<your_own_value>"
protocol: "<your_own_value>"
quarantine: "none"
quarantine_expiry: "<your_own_value>"
quarantine_log: "disable"
rate_count: "22"
rate_duration: "23"
rate_mode: "periodical"
rate_track: "none"
rule:
-
id: "27"
severity: "<your_own_value>"
status: "disable"
extended_log: "enable"
filter:
-
action: "pass"
application: "<your_own_value>"
location: "<your_own_value>"
log: "disable"
log_packet: "disable"
name: "default_name_37"
os: "<your_own_value>"
protocol: "<your_own_value>"
quarantine: "none"
quarantine_expiry: "41"
quarantine_log: "disable"
severity: "<your_own_value>"
status: "disable"
name: "default_name_45"
override:
-
action: "pass"
exempt_ip:
-
dst_ip: "<your_own_value>"
id: "50"
src_ip: "<your_own_value>"
log: "disable"
log_packet: "disable"
quarantine: "none"
quarantine_expiry: "55"
quarantine_log: "disable"
rule_id: "57"
status: "disable"
replacemsg_group: "<your_own_value> (source system.replacemsg-group.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_ips_sensor_data(json):
option_list = ['block_malicious_url', 'comment', 'entries',
'extended_log', 'filter', 'name',
'override', 'replacemsg_group']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def ips_sensor(data, fos):
vdom = data['vdom']
state = data['state']
ips_sensor_data = data['ips_sensor']
filtered_data = underscore_to_hyphen(filter_ips_sensor_data(ips_sensor_data))
if state == "present":
return fos.set('ips',
'sensor',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('ips',
'sensor',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_ips(data, fos):
if data['ips_sensor']:
resp = ips_sensor(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"ips_sensor": {
"required": False, "type": "dict", "default": None,
"options": {
"block_malicious_url": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"comment": {"required": False, "type": "str"},
"entries": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["pass", "block", "reset",
"default"]},
"application": {"required": False, "type": "str"},
"exempt_ip": {"required": False, "type": "list",
"options": {
"dst_ip": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"src_ip": {"required": False, "type": "str"}
}},
"id": {"required": True, "type": "int"},
"location": {"required": False, "type": "str"},
"log": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"log_attack_context": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"log_packet": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"os": {"required": False, "type": "str"},
"protocol": {"required": False, "type": "str"},
"quarantine": {"required": False, "type": "str",
"choices": ["none", "attacker"]},
"quarantine_expiry": {"required": False, "type": "str"},
"quarantine_log": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"rate_count": {"required": False, "type": "int"},
"rate_duration": {"required": False, "type": "int"},
"rate_mode": {"required": False, "type": "str",
"choices": ["periodical", "continuous"]},
"rate_track": {"required": False, "type": "str",
"choices": ["none", "src-ip", "dest-ip",
"dhcp-client-mac", "dns-domain"]},
"rule": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"}
}},
"severity": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["disable", "enable", "default"]}
}},
"extended_log": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"filter": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["pass", "block", "reset",
"default"]},
"application": {"required": False, "type": "str"},
"location": {"required": False, "type": "str"},
"log": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"log_packet": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"name": {"required": True, "type": "str"},
"os": {"required": False, "type": "str"},
"protocol": {"required": False, "type": "str"},
"quarantine": {"required": False, "type": "str",
"choices": ["none", "attacker"]},
"quarantine_expiry": {"required": False, "type": "int"},
"quarantine_log": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"severity": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["disable", "enable", "default"]}
}},
"name": {"required": True, "type": "str"},
"override": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["pass", "block", "reset"]},
"exempt_ip": {"required": False, "type": "list",
"options": {
"dst_ip": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"src_ip": {"required": False, "type": "str"}
}},
"log": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"log_packet": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"quarantine": {"required": False, "type": "str",
"choices": ["none", "attacker"]},
"quarantine_expiry": {"required": False, "type": "int"},
"quarantine_log": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"rule_id": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["disable", "enable"]}
}},
"replacemsg_group": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_ips(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_ips(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 39.82868 | 157 | 0.41026 |
acf09588383f0465b47c7c38d944d952e9dcdc57 | 2,028 | py | Python | tensorflow/vision/TFRecord_dataset/CreateImagesTFRecord.py | Metwalli/cs230-code-examples | 725c77331b7d2112d20de277d26a66b76277914f | [
"MIT"
] | null | null | null | tensorflow/vision/TFRecord_dataset/CreateImagesTFRecord.py | Metwalli/cs230-code-examples | 725c77331b7d2112d20de277d26a66b76277914f | [
"MIT"
] | null | null | null | tensorflow/vision/TFRecord_dataset/CreateImagesTFRecord.py | Metwalli/cs230-code-examples | 725c77331b7d2112d20de277d26a66b76277914f | [
"MIT"
] | null | null | null | import tensorflow as tf
import os
import matplotlib.image as mpimg
class GenerateTFRecord:
def __init__(self, labels):
self.labels = labels
def convert_image_folder(self, img_folder, tfrecord_file_name):
# Get all file names of images present in folder
img_paths = os.listdir(img_folder)
img_paths = [os.path.abspath(os.path.join(img_folder, i)) for i in img_paths]
with tf.python_io.TFRecordWriter(tfrecord_file_name) as writer:
for img_path in img_paths:
example = self._convert_image(img_path)
writer.write(example.SerializeToString())
def _convert_image(self, img_path):
label = self._get_label_with_filename(img_path)
image_data = mpimg.imread(img_path)
# Convert image to string data
image_str = image_data.tostring()
# Store shape of image for reconstruction purposes
img_shape = image_data.shape
# Get filename
filename = os.path.basename(img_path)
example = tf.train.Example(features=tf.train.Features(feature={
'filename': tf.train.Feature(bytes_list=tf.train.BytesList(value=[filename.encode('utf-8')])),
'rows': tf.train.Feature(int64_list=tf.train.Int64List(value=[img_shape[0]])),
'cols': tf.train.Feature(int64_list=tf.train.Int64List(value=[img_shape[1]])),
'channels': tf.train.Feature(int64_list=tf.train.Int64List(value=[img_shape[2]])),
'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_str])),
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[label]))
}))
return example
def _get_label_with_filename(self, filename):
basename = os.path.basename(filename).split('.')[0]
basename = basename.split('_')[0]
return self.labels[basename]
if __name__ == '__main__':
labels = {'cat': 0, 'dog': 1}
t = GenerateTFRecord(labels)
t.convert_image_folder('Images', 'images.tfrecord') | 41.387755 | 106 | 0.662229 |
acf095ba205138219e022f27b19cea5849f76413 | 4,078 | py | Python | e2db/settings.py | ron-wolf/e2db | 768732f8660d6012161754659947d3c794710d36 | [
"MIT"
] | 1 | 2019-01-12T17:08:16.000Z | 2019-01-12T17:08:16.000Z | e2db/settings.py | ron-wolf/e2db | 768732f8660d6012161754659947d3c794710d36 | [
"MIT"
] | null | null | null | e2db/settings.py | ron-wolf/e2db | 768732f8660d6012161754659947d3c794710d36 | [
"MIT"
] | null | null | null | """
Django settings for e2db project on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "jz+(s_k(fb3n5qjt#0a(!6i$xsd4u+8!v!^c9=5z664$lmovn9"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'e2db.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'e2db.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| 30.207407 | 91 | 0.716037 |
acf09686b2cde6b589a0a0668c7fc251be906cfe | 5,912 | py | Python | notebooks/demo_video_dect.py | soarflighting/objection_detection_video | 08b9b5131e42b65645b24bf5534df20d5e9b81e6 | [
"Apache-2.0"
] | 2 | 2020-09-21T05:54:23.000Z | 2020-09-27T08:03:01.000Z | notebooks/demo_video_dect.py | soarflighting/objection_detection_video | 08b9b5131e42b65645b24bf5534df20d5e9b81e6 | [
"Apache-2.0"
] | null | null | null | notebooks/demo_video_dect.py | soarflighting/objection_detection_video | 08b9b5131e42b65645b24bf5534df20d5e9b81e6 | [
"Apache-2.0"
] | 1 | 2021-04-06T09:17:24.000Z | 2021-04-06T09:17:24.000Z | '''
对视频中的帧进行检测,并将检测后的帧进行保存
'''
'''
视频检测,视频目标追踪
'''
import os
import cv2
import random
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.cm as mpcm
import matplotlib.image as mpimg
from notebooks import visualization
from nets import ssd_vgg_300,ssd_common,np_methods
from preprocessing import ssd_vgg_preprocessing
import sys
# 当引用模块和运行的脚本不在同一个目录下,需在脚本开头添加如下代码:
sys.path.append('./SSD-Tensorflow/')
slim = tf.contrib.slim
# TensorFlow session
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)
isess = tf.InteractiveSession(config=config)
l_VOC_CLASS = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningTable', 'dog', 'horse', 'motorbike', 'person',
'pottedPlant', 'sheep', 'sofa', 'train', 'TV']
# 定义数据格式,设置占位符
net_shape = (300, 300)
# 预处理,以Tensorflow backend, 将输入图片大小改成 300x300,作为下一步输入
img_input = tf.placeholder(tf.uint8, shape=(None, None, 3))
# 输入图像的通道排列形式,'NHWC'表示 [batch_size,height,width,channel]
data_format = 'NHWC'
# 数据预处理,将img_input输入的图像resize为300大小,labels_pre,bboxes_pre,bbox_img待解析
image_pre, labels_pre, bboxes_pre, bbox_img = ssd_vgg_preprocessing.preprocess_for_eval(
img_input, None, None, net_shape, data_format,
resize=ssd_vgg_preprocessing.Resize.WARP_RESIZE)
# 拓展为4维变量用于输入
image_4d = tf.expand_dims(image_pre, 0)
# 定义SSD模型
# 是否复用,目前我们没有在训练所以为None
reuse = True if 'ssd_net' in locals() else None
# 调出基于VGG神经网络的SSD模型对象,注意这是一个自定义类对象
ssd_net = ssd_vgg_300.SSDNet()
# 得到预测类和预测坐标的Tensor对象,这两个就是神经网络模型的计算流程
with slim.arg_scope(ssd_net.arg_scope(data_format=data_format)):
predictions, localisations, _, _ = ssd_net.net(image_4d, is_training=False, reuse=reuse)
# 导入官方给出的 SSD 模型参数
ckpt_filename = '../checkpoints/ssd_300_vgg.ckpt'
# ckpt_filename = '../checkpoints/VGG_VOC0712_SSD_300x300_ft_iter_120000.ckpt'
isess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(isess, ckpt_filename)
print("模型加载完成...")
# 在网络模型结构中,提取搜索网格的位置
# 根据模型超参数,得到每个特征层(这里用了6个特征层,分别是4,7,8,9,10,11)的anchors_boxes
ssd_anchors = ssd_net.anchors(net_shape)
"""
每层的anchors_boxes包含4个arrayList,前两个List分别是该特征层下x,y坐标轴对于原图(300x300)大小的映射
第三,四个List为anchor_box的长度和宽度,同样是经过归一化映射的,根据每个特征层box数量的不同,这两个List元素
个数会变化。其中,长宽的值根据超参数anchor_sizes和anchor_ratios制定。
"""
# 加载辅助作图函数
def colors_subselect(colors, num_classes=21):
dt = len(colors) // num_classes
sub_colors = []
for i in range(num_classes):
color = colors[i * dt]
if isinstance(color[0], float):
sub_colors.append([int(c * 255) for c in color])
else:
sub_colors.append([c for c in color])
return sub_colors
def bboxes_draw_on_img(img, classes, scores, bboxes, colors, thickness=1):
shape = img.shape
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
color = colors[classes[i]]
# Draw bounding box...
p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
# Draw text...
s = '%s/%.3f' % (l_VOC_CLASS[int(classes[i]) - 1], scores[i])
p1 = (p1[0] - 5, p1[1])
cv2.putText(img, s, p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 1, color, 1)
colors_plasma = colors_subselect(mpcm.plasma.colors, num_classes=21)
# 主流程函数
def process_image(video_path,outPutDirName,select_threshold=0.2, nms_threshold=.1, net_shape=(300, 300)):
print("开始识别...")
# select_threshold:box阈值——每个像素的box分类预测数据的得分会与box阈值比较,高于一个box阈值则认为这个box成功框到了一个对象
# nms_threshold:重合度阈值——同一对象的两个框的重合度高于该阈值,则运行下面去重函数
times = 0
#提取视频的频率,每2帧提取一个
frameFrequency=4
cap = cv2.VideoCapture(video_path)
while True:
times += 1
ret, image_np = cap.read()
if not ret:
print('not res , not image')
break
# 执行SSD模型,得到4维输入变量,分类预测,坐标预测,rbbox_img参数为最大检测范围,本文固定为[0,0,1,1]即全图
rimg, rpredictions, rlocalisations, rbbox_img = isess.run([image_4d, predictions, localisations, bbox_img],
feed_dict={img_input: image_np})
# ssd_bboxes_select函数根据每个特征层的分类预测分数,归一化后的映射坐标,
# ancohor_box的大小,通过设定一个阈值计算得到每个特征层检测到的对象以及其分类和坐标
rclasses, rscores, rbboxes = np_methods.ssd_bboxes_select(rpredictions, rlocalisations, ssd_anchors,
select_threshold=select_threshold,
img_shape=net_shape,
num_classes=21, decode=True)
# 检测有没有超出检测边缘
rbboxes = np_methods.bboxes_clip(rbbox_img, rbboxes)
rclasses, rscores, rbboxes = np_methods.bboxes_sort(rclasses, rscores, rbboxes, top_k=400)
# 去重,将重复检测到的目标去掉
rclasses, rscores, rbboxes = np_methods.bboxes_nms(rclasses, rscores, rbboxes, nms_threshold=nms_threshold)
# 将box的坐标重新映射到原图上(上文所有的坐标都进行了归一化,所以要逆操作一次)
rbboxes = np_methods.bboxes_resize(rbbox_img, rbboxes)
bboxes_draw_on_img(image_np, rclasses, rscores, rbboxes, colors_plasma, thickness=8)
if times % frameFrequency == 0:
print("正在保存第{}张图片...".format(times))
cv2.imwrite(outPutDirName + str(times)+'.jpg', image_np)
print("图片识别完毕,,,")
cap.release()
return times
if __name__ == '__main__':
input_video_path = "d:/input_data/posture_video/000099/P01_s10_09_0._color.avi"
output_video_path = "../output/picture/"
times = process_image(input_video_path,output_video_path)
print("本视频一共识别{}张图片...".format(times))
| 37.656051 | 116 | 0.659506 |
acf09751b8d3534c66bdc51b11a5b965124c6f65 | 3,100 | py | Python | scrim/__main__.py | danbradham/scrim | 982a5db1db6e4ef40267f15642af2c7ea0e803ae | [
"MIT"
] | 4 | 2018-06-21T20:14:11.000Z | 2021-04-28T20:34:43.000Z | scrim/__main__.py | danbradham/scrim | 982a5db1db6e4ef40267f15642af2c7ea0e803ae | [
"MIT"
] | null | null | null | scrim/__main__.py | danbradham/scrim | 982a5db1db6e4ef40267f15642af2c7ea0e803ae | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from pprint import pprint
import click
from scrim.utils import copy_templates, parse_setup, get_console_scripts
@click.group()
def cli():
'''Provides transparent scripts to wrap python cli tools.'''
@cli.command()
@click.option('--entry_point', default=None)
@click.option('--all_entry_points', is_flag=True, default=False)
@click.option('--auto_write', is_flag=True, default=True)
@click.option('--scripts_path', default='bin')
def add(entry_point, all_entry_points, auto_write, scripts_path):
'''Add Scrim scripts for a python project'''
click.echo()
if not entry_point and not all_entry_points:
raise click.UsageError(
'Missing required option: --entry_point or --all_entry_points'
)
if not os.path.exists('setup.py'):
raise click.UsageError('No setup.py found.')
setup_data = parse_setup('setup.py')
console_scripts = get_console_scripts(setup_data)
scripts = []
if all_entry_points and console_scripts:
# Make sure our entry points start with py
for entry in console_scripts:
if not entry.startswith('py'):
click.echo('Your python entry_points must start with py.')
click.echo('Found: ' + entry)
raise click.Abort()
for entry in console_scripts:
click.echo('Found entry_point: ' + entry)
py_entry_point = entry
entry_point = entry[2:]
more_scripts = copy_templates(
entry_point,
py_entry_point,
auto_write,
scripts_path
)
for script in more_scripts:
click.echo(' Created ' + script)
scripts.extend(more_scripts)
elif entry_point:
if not entry_point.startswith('py'):
click.echo('Your python entry_points must start with py.')
raise click.Abort()
if entry_point not in console_scripts:
click.echo(entry_point + ' not found in your setups entry_points')
click.echo('You will need to add it afterward if you continue...')
click.echo('')
click.confirm('Do you want to continue?', abort=True)
click.echo('\nCreating scripts for: ' + entry_point)
py_entry_point = entry_point
entry_point = entry_point[2:]
more_scripts = copy_templates(
entry_point,
py_entry_point,
auto_write,
scripts_path
)
for script in more_scripts:
click.echo(' Created ' + script)
scripts.extend(more_scripts)
click.echo('\n\nAdd the following section to your package setup:\n')
click.echo('scripts=[')
for script in scripts:
click.echo(" '{}',".format(script))
click.echo('],')
@cli.command()
def print_setup():
'''Print setup.py setup kwargs'''
setup_data = parse_setup('setup.py')
pprint(setup_data)
if __name__ == '__main__':
cli()
| 30.693069 | 78 | 0.616774 |
acf0985d9ba5d5fd30572957ab58c52162b9ea4e | 906 | py | Python | kubernetes/test/test_v1_pod_spec.py | sgwilliams-ebsco/python | 35e6406536c96d4769ff7e2a02bf0fdcb902a509 | [
"Apache-2.0"
] | 1 | 2021-06-10T23:44:11.000Z | 2021-06-10T23:44:11.000Z | kubernetes/test/test_v1_pod_spec.py | sgwilliams-ebsco/python | 35e6406536c96d4769ff7e2a02bf0fdcb902a509 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_pod_spec.py | sgwilliams-ebsco/python | 35e6406536c96d4769ff7e2a02bf0fdcb902a509 | [
"Apache-2.0"
] | 1 | 2018-11-06T16:33:43.000Z | 2018-11-06T16:33:43.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_pod_spec import V1PodSpec
class TestV1PodSpec(unittest.TestCase):
""" V1PodSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1PodSpec(self):
"""
Test V1PodSpec
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_pod_spec.V1PodSpec()
pass
if __name__ == '__main__':
unittest.main()
| 20.133333 | 105 | 0.690949 |
acf09868fc3ab10bb269c060b61b2db2a9f62e5b | 1,553 | py | Python | app/lib/result.py | lambda47/LightBlog | 948212e1a9b73c91287cbe888245b9aa53dd31fe | [
"Apache-2.0"
] | 1 | 2017-09-18T23:52:32.000Z | 2017-09-18T23:52:32.000Z | app/lib/result.py | lambda47/LightBlog | 948212e1a9b73c91287cbe888245b9aa53dd31fe | [
"Apache-2.0"
] | 6 | 2021-06-08T19:01:59.000Z | 2022-03-11T23:16:17.000Z | app/lib/result.py | lambda47/LightBlog | 948212e1a9b73c91287cbe888245b9aa53dd31fe | [
"Apache-2.0"
] | null | null | null | from pymongo.cursor import Cursor
from functools import wraps
def result(result, t):
""" 包装查找结果
:param result: 查找结果
:param t: 数据类型
:return: 包装后结果
"""
if result is None:
return None
# 查找结果为单个文档
elif isinstance(result, dict):
return t(result)
# 查找结果为多个文档集合
elif isinstance(result, Cursor):
return ResultList(t, result)
else:
raise Exception('参数类型错误')
class ResultList:
""" 查询结果的可迭代对象,成员为指定类型的实例"""
def __init__(self, t, results_list):
self.results = results_list
self.type = t
self.iter = iter(self.results)
def __getattr__(self, item):
"""映射 pymongo Cursor;方法和属性"""
attr = getattr(self.results, item)
if callable(attr):
@wraps(attr)
def func(*args, **kwargs):
result = attr(*args, **kwargs)
if isinstance(result, Cursor):
self.results = result
return self
else:
return result
return func
else:
return attr
def __iter__(self):
return self
def __next__(self):
return self.type(next(self.iter))
def filter(self, *args, **kwargs):
"""
过滤返回结果(调用Model filter函数)
:result 返回array类型
"""
result = []
try:
while True:
t = self.__next__()
result.append(t.filter(*args, **kwargs))
except StopIteration:
return result
| 23.530303 | 56 | 0.526722 |
acf0992688af3e95e246e4361d81c235d518dd53 | 2,397 | py | Python | netnir/plugins/netconf.py | jtdub/netnir | 9d2c3467cf558895af16cd2450198d51f8c4a3d4 | [
"MIT"
] | null | null | null | netnir/plugins/netconf.py | jtdub/netnir | 9d2c3467cf558895af16cd2450198d51f8c4a3d4 | [
"MIT"
] | null | null | null | netnir/plugins/netconf.py | jtdub/netnir | 9d2c3467cf558895af16cd2450198d51f8c4a3d4 | [
"MIT"
] | 1 | 2021-04-09T18:06:08.000Z | 2021-04-09T18:06:08.000Z | from nornir.core.task import Task, Result
from typing import Any
def netconf_capabilities(task: Task, **kwargs: Any) -> Result:
"""nornir get netconf capabilities
:params task: type object
:returns: nornir result object
"""
conn = task.host.get_connection(
connection="netconf", configuration=task.nornir.config
)
results = [capability for capability in conn.server_capabilities]
return Result(host=task.host, result=results)
def netconf_get_config(
task: Task,
source: str = "running",
nc_filter: str = None,
nc_filter_type: str = None,
**kwargs: Any
) -> Result:
"""nornir netconf get config task
:params task: type object
:params source: type str - configuration source
:params nc_filter: type str - netconf filter
:params nc_filter_type: type str
:returns: nornir result object
"""
conn = task.host.get_connection(
connection="netconf", configuration=task.nornir.config
)
if nc_filter and nc_filter_type:
with open(nc_filter) as xml:
nc_filter = xml.read()
result = conn.get_config(source=source, filter=(nc_filter_type, nc_filter))
else:
result = conn.get_config(source=source)
return Result(result=result, host=task.host)
def netconf_edit_config(
task: Task, target: str = "running", nc_config: str = None
) -> Result:
"""nornir netconf edit config task
:params task: type object
:params target: type str - configuration target
:params nc_config: type str - yang config model
:returns: nornir result object
"""
conn = task.host.get_connection(
connection="netconf", configuration=task.nornir.config
)
with open(nc_config) as xml:
nc_config = xml.read()
config_response = conn.edit_config(target=target, config=nc_config)
config_validate = conn.validate(source=target)
if config_response.ok and config_validate.ok:
result = {
"config_response": config_response.ok,
"config_validate": config_validate.ok,
}
failed = False
conn.commit()
else:
result = {
"config_response": config_response.error,
"config_validate": config_validate.error,
}
failed = True
conn.discard_changes()
return Result(result=result, host=task.host, failed=failed)
| 28.2 | 83 | 0.660409 |
acf0996ff1266d915aeabb9ef3a8ff306d1e9121 | 43,346 | py | Python | cookbook/lib/python3.7/site-packages/google/cloud/firestore_admin_v1/proto/operation_pb2.py | ethanga12/cookbooktbd | bc310546f4b05d29a24eff79242c252a086d7260 | [
"Apache-2.0"
] | 1 | 2021-01-15T18:00:01.000Z | 2021-01-15T18:00:01.000Z | cookbook/lib/python3.7/site-packages/google/cloud/firestore_admin_v1/proto/operation_pb2.py | ethanga12/cookbooktbd | bc310546f4b05d29a24eff79242c252a086d7260 | [
"Apache-2.0"
] | null | null | null | cookbook/lib/python3.7/site-packages/google/cloud/firestore_admin_v1/proto/operation_pb2.py | ethanga12/cookbooktbd | bc310546f4b05d29a24eff79242c252a086d7260 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/firestore_admin_v1/proto/operation.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.cloud.firestore_admin_v1.proto import (
index_pb2 as google_dot_cloud_dot_firestore__admin__v1_dot_proto_dot_index__pb2,
)
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/firestore_admin_v1/proto/operation.proto",
package="google.firestore.admin.v1",
syntax="proto3",
serialized_options=b"\n\035com.google.firestore.admin.v1B\016OperationProtoP\001Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\242\002\004GCFS\252\002\037Google.Cloud.Firestore.Admin.V1\312\002\037Google\\Cloud\\Firestore\\Admin\\V1\352\002#Google::Cloud::Firestore::Admin::V1",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n5google/cloud/firestore_admin_v1/proto/operation.proto\x12\x19google.firestore.admin.v1\x1a\x31google/cloud/firestore_admin_v1/proto/index.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xbd\x02\n\x16IndexOperationMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05index\x18\x03 \x01(\t\x12\x38\n\x05state\x18\x04 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x05 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x06 \x01(\x0b\x32#.google.firestore.admin.v1.Progress"\x88\x05\n\x16\x46ieldOperationMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05\x66ield\x18\x03 \x01(\t\x12_\n\x13index_config_deltas\x18\x04 \x03(\x0b\x32\x42.google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta\x12\x38\n\x05state\x18\x05 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x06 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x07 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x1a\xe7\x01\n\x10IndexConfigDelta\x12\x62\n\x0b\x63hange_type\x18\x01 \x01(\x0e\x32M.google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.ChangeType\x12/\n\x05index\x18\x02 \x01(\x0b\x32 .google.firestore.admin.v1.Index">\n\nChangeType\x12\x1b\n\x17\x43HANGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41\x44\x44\x10\x01\x12\n\n\x06REMOVE\x10\x02"\xec\x02\n\x17\x45xportDocumentsMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x0foperation_state\x18\x03 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x04 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x05 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12\x16\n\x0e\x63ollection_ids\x18\x06 \x03(\t\x12\x19\n\x11output_uri_prefix\x18\x07 \x01(\t"\xeb\x02\n\x17ImportDocumentsMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x0foperation_state\x18\x03 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x04 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x05 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12\x16\n\x0e\x63ollection_ids\x18\x06 \x03(\t\x12\x18\n\x10input_uri_prefix\x18\x07 \x01(\t"4\n\x17\x45xportDocumentsResponse\x12\x19\n\x11output_uri_prefix\x18\x01 \x01(\t":\n\x08Progress\x12\x16\n\x0e\x65stimated_work\x18\x01 \x01(\x03\x12\x16\n\x0e\x63ompleted_work\x18\x02 \x01(\x03*\x9e\x01\n\x0eOperationState\x12\x1f\n\x1bOPERATION_STATE_UNSPECIFIED\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x0e\n\nPROCESSING\x10\x02\x12\x0e\n\nCANCELLING\x10\x03\x12\x0e\n\nFINALIZING\x10\x04\x12\x0e\n\nSUCCESSFUL\x10\x05\x12\n\n\x06\x46\x41ILED\x10\x06\x12\r\n\tCANCELLED\x10\x07\x42\xe2\x01\n\x1d\x63om.google.firestore.admin.v1B\x0eOperationProtoP\x01Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\xa2\x02\x04GCFS\xaa\x02\x1fGoogle.Cloud.Firestore.Admin.V1\xca\x02\x1fGoogle\\Cloud\\Firestore\\Admin\\V1\xea\x02#Google::Cloud::Firestore::Admin::V1b\x06proto3',
dependencies=[
google_dot_cloud_dot_firestore__admin__v1_dot_proto_dot_index__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
_OPERATIONSTATE = _descriptor.EnumDescriptor(
name="OperationState",
full_name="google.firestore.admin.v1.OperationState",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="OPERATION_STATE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="INITIALIZING",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="PROCESSING",
index=2,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="CANCELLING",
index=3,
number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="FINALIZING",
index=4,
number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="SUCCESSFUL",
index=5,
number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="FAILED",
index=6,
number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="CANCELLED",
index=7,
number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=2017,
serialized_end=2175,
)
_sym_db.RegisterEnumDescriptor(_OPERATIONSTATE)
OperationState = enum_type_wrapper.EnumTypeWrapper(_OPERATIONSTATE)
OPERATION_STATE_UNSPECIFIED = 0
INITIALIZING = 1
PROCESSING = 2
CANCELLING = 3
FINALIZING = 4
SUCCESSFUL = 5
FAILED = 6
CANCELLED = 7
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE = _descriptor.EnumDescriptor(
name="ChangeType",
full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.ChangeType",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="CHANGE_TYPE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="ADD",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="REMOVE",
index=2,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=1105,
serialized_end=1167,
)
_sym_db.RegisterEnumDescriptor(_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE)
_INDEXOPERATIONMETADATA = _descriptor.Descriptor(
name="IndexOperationMetadata",
full_name="google.firestore.admin.v1.IndexOperationMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.firestore.admin.v1.IndexOperationMetadata.start_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.firestore.admin.v1.IndexOperationMetadata.end_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="index",
full_name="google.firestore.admin.v1.IndexOperationMetadata.index",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.firestore.admin.v1.IndexOperationMetadata.state",
index=3,
number=4,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_documents",
full_name="google.firestore.admin.v1.IndexOperationMetadata.progress_documents",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_bytes",
full_name="google.firestore.admin.v1.IndexOperationMetadata.progress_bytes",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=199,
serialized_end=516,
)
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA = _descriptor.Descriptor(
name="IndexConfigDelta",
full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="change_type",
full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.change_type",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="index",
full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.index",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE,],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=936,
serialized_end=1167,
)
_FIELDOPERATIONMETADATA = _descriptor.Descriptor(
name="FieldOperationMetadata",
full_name="google.firestore.admin.v1.FieldOperationMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.firestore.admin.v1.FieldOperationMetadata.start_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.firestore.admin.v1.FieldOperationMetadata.end_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="field",
full_name="google.firestore.admin.v1.FieldOperationMetadata.field",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="index_config_deltas",
full_name="google.firestore.admin.v1.FieldOperationMetadata.index_config_deltas",
index=3,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.firestore.admin.v1.FieldOperationMetadata.state",
index=4,
number=5,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_documents",
full_name="google.firestore.admin.v1.FieldOperationMetadata.progress_documents",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_bytes",
full_name="google.firestore.admin.v1.FieldOperationMetadata.progress_bytes",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=519,
serialized_end=1167,
)
_EXPORTDOCUMENTSMETADATA = _descriptor.Descriptor(
name="ExportDocumentsMetadata",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.start_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.end_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="operation_state",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.operation_state",
index=2,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_documents",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.progress_documents",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_bytes",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.progress_bytes",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="collection_ids",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.collection_ids",
index=5,
number=6,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="output_uri_prefix",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.output_uri_prefix",
index=6,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1170,
serialized_end=1534,
)
_IMPORTDOCUMENTSMETADATA = _descriptor.Descriptor(
name="ImportDocumentsMetadata",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.start_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.end_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="operation_state",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.operation_state",
index=2,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_documents",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.progress_documents",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_bytes",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.progress_bytes",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="collection_ids",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.collection_ids",
index=5,
number=6,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="input_uri_prefix",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.input_uri_prefix",
index=6,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1537,
serialized_end=1900,
)
_EXPORTDOCUMENTSRESPONSE = _descriptor.Descriptor(
name="ExportDocumentsResponse",
full_name="google.firestore.admin.v1.ExportDocumentsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="output_uri_prefix",
full_name="google.firestore.admin.v1.ExportDocumentsResponse.output_uri_prefix",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1902,
serialized_end=1954,
)
_PROGRESS = _descriptor.Descriptor(
name="Progress",
full_name="google.firestore.admin.v1.Progress",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="estimated_work",
full_name="google.firestore.admin.v1.Progress.estimated_work",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="completed_work",
full_name="google.firestore.admin.v1.Progress.completed_work",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1956,
serialized_end=2014,
)
_INDEXOPERATIONMETADATA.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_INDEXOPERATIONMETADATA.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_INDEXOPERATIONMETADATA.fields_by_name["state"].enum_type = _OPERATIONSTATE
_INDEXOPERATIONMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS
_INDEXOPERATIONMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA.fields_by_name[
"change_type"
].enum_type = _FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA.fields_by_name[
"index"
].message_type = (
google_dot_cloud_dot_firestore__admin__v1_dot_proto_dot_index__pb2._INDEX
)
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA.containing_type = _FIELDOPERATIONMETADATA
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE.containing_type = (
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA
)
_FIELDOPERATIONMETADATA.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_FIELDOPERATIONMETADATA.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_FIELDOPERATIONMETADATA.fields_by_name[
"index_config_deltas"
].message_type = _FIELDOPERATIONMETADATA_INDEXCONFIGDELTA
_FIELDOPERATIONMETADATA.fields_by_name["state"].enum_type = _OPERATIONSTATE
_FIELDOPERATIONMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS
_FIELDOPERATIONMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS
_EXPORTDOCUMENTSMETADATA.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_EXPORTDOCUMENTSMETADATA.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_EXPORTDOCUMENTSMETADATA.fields_by_name["operation_state"].enum_type = _OPERATIONSTATE
_EXPORTDOCUMENTSMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS
_EXPORTDOCUMENTSMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS
_IMPORTDOCUMENTSMETADATA.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_IMPORTDOCUMENTSMETADATA.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_IMPORTDOCUMENTSMETADATA.fields_by_name["operation_state"].enum_type = _OPERATIONSTATE
_IMPORTDOCUMENTSMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS
_IMPORTDOCUMENTSMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS
DESCRIPTOR.message_types_by_name["IndexOperationMetadata"] = _INDEXOPERATIONMETADATA
DESCRIPTOR.message_types_by_name["FieldOperationMetadata"] = _FIELDOPERATIONMETADATA
DESCRIPTOR.message_types_by_name["ExportDocumentsMetadata"] = _EXPORTDOCUMENTSMETADATA
DESCRIPTOR.message_types_by_name["ImportDocumentsMetadata"] = _IMPORTDOCUMENTSMETADATA
DESCRIPTOR.message_types_by_name["ExportDocumentsResponse"] = _EXPORTDOCUMENTSRESPONSE
DESCRIPTOR.message_types_by_name["Progress"] = _PROGRESS
DESCRIPTOR.enum_types_by_name["OperationState"] = _OPERATIONSTATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IndexOperationMetadata = _reflection.GeneratedProtocolMessageType(
"IndexOperationMetadata",
(_message.Message,),
{
"DESCRIPTOR": _INDEXOPERATIONMETADATA,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Metadata for
[google.longrunning.Operation][google.longrunning.Operation] results
from [FirestoreAdmin.CreateIndex][google.firestore.admin.v1.FirestoreA
dmin.CreateIndex].
Attributes:
start_time:
The time this operation started.
end_time:
The time this operation completed. Will be unset if operation
still in progress.
index:
The index resource that this operation is acting on. For
example: ``projects/{project_id}/databases/{database_id}/colle
ctionGroups/{collection_id}/indexes/{index_id}``
state:
The state of the operation.
progress_documents:
The progress, in documents, of this operation.
progress_bytes:
The progress, in bytes, of this operation.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.IndexOperationMetadata)
},
)
_sym_db.RegisterMessage(IndexOperationMetadata)
FieldOperationMetadata = _reflection.GeneratedProtocolMessageType(
"FieldOperationMetadata",
(_message.Message,),
{
"IndexConfigDelta": _reflection.GeneratedProtocolMessageType(
"IndexConfigDelta",
(_message.Message,),
{
"DESCRIPTOR": _FIELDOPERATIONMETADATA_INDEXCONFIGDELTA,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Information about an index configuration change.
Attributes:
change_type:
Specifies how the index is changing.
index:
The index being changed.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta)
},
),
"DESCRIPTOR": _FIELDOPERATIONMETADATA,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Metadata for
[google.longrunning.Operation][google.longrunning.Operation] results
from [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreA
dmin.UpdateField].
Attributes:
start_time:
The time this operation started.
end_time:
The time this operation completed. Will be unset if operation
still in progress.
field:
The field resource that this operation is acting on. For
example: ``projects/{project_id}/databases/{database_id}/colle
ctionGroups/{collection_id}/fields/{field_path}``
index_config_deltas:
A list of [IndexConfigDelta][google.firestore.admin.v1.FieldOp
erationMetadata.IndexConfigDelta], which describe the intent
of this operation.
state:
The state of the operation.
progress_documents:
The progress, in documents, of this operation.
progress_bytes:
The progress, in bytes, of this operation.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.FieldOperationMetadata)
},
)
_sym_db.RegisterMessage(FieldOperationMetadata)
_sym_db.RegisterMessage(FieldOperationMetadata.IndexConfigDelta)
ExportDocumentsMetadata = _reflection.GeneratedProtocolMessageType(
"ExportDocumentsMetadata",
(_message.Message,),
{
"DESCRIPTOR": _EXPORTDOCUMENTSMETADATA,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Metadata for
[google.longrunning.Operation][google.longrunning.Operation] results
from [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1.Firest
oreAdmin.ExportDocuments].
Attributes:
start_time:
The time this operation started.
end_time:
The time this operation completed. Will be unset if operation
still in progress.
operation_state:
The state of the export operation.
progress_documents:
The progress, in documents, of this operation.
progress_bytes:
The progress, in bytes, of this operation.
collection_ids:
Which collection ids are being exported.
output_uri_prefix:
Where the entities are being exported to.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ExportDocumentsMetadata)
},
)
_sym_db.RegisterMessage(ExportDocumentsMetadata)
ImportDocumentsMetadata = _reflection.GeneratedProtocolMessageType(
"ImportDocumentsMetadata",
(_message.Message,),
{
"DESCRIPTOR": _IMPORTDOCUMENTSMETADATA,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Metadata for
[google.longrunning.Operation][google.longrunning.Operation] results
from [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1.Firest
oreAdmin.ImportDocuments].
Attributes:
start_time:
The time this operation started.
end_time:
The time this operation completed. Will be unset if operation
still in progress.
operation_state:
The state of the import operation.
progress_documents:
The progress, in documents, of this operation.
progress_bytes:
The progress, in bytes, of this operation.
collection_ids:
Which collection ids are being imported.
input_uri_prefix:
The location of the documents being imported.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ImportDocumentsMetadata)
},
)
_sym_db.RegisterMessage(ImportDocumentsMetadata)
ExportDocumentsResponse = _reflection.GeneratedProtocolMessageType(
"ExportDocumentsResponse",
(_message.Message,),
{
"DESCRIPTOR": _EXPORTDOCUMENTSRESPONSE,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Returned in the
[google.longrunning.Operation][google.longrunning.Operation] response
field.
Attributes:
output_uri_prefix:
Location of the output files. This can be used to begin an
import into Cloud Firestore (this project or another project)
after the operation completes successfully.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ExportDocumentsResponse)
},
)
_sym_db.RegisterMessage(ExportDocumentsResponse)
Progress = _reflection.GeneratedProtocolMessageType(
"Progress",
(_message.Message,),
{
"DESCRIPTOR": _PROGRESS,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Describes the progress of the operation. Unit of work is generic and
must be interpreted based on where
[Progress][google.firestore.admin.v1.Progress] is used.
Attributes:
estimated_work:
The amount of work estimated.
completed_work:
The amount of work completed.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.Progress)
},
)
_sym_db.RegisterMessage(Progress)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 36.548061 | 3,531 | 0.643289 |
acf0999088449d6a1ac622babea01d457b0e31c8 | 118 | py | Python | util/tripFlow/MySQLOpts.py | hijiangtao/M3C | 2c3221e0503fd948cd5be904120bb4886b9bd106 | [
"Apache-2.0"
] | null | null | null | util/tripFlow/MySQLOpts.py | hijiangtao/M3C | 2c3221e0503fd948cd5be904120bb4886b9bd106 | [
"Apache-2.0"
] | null | null | null | util/tripFlow/MySQLOpts.py | hijiangtao/M3C | 2c3221e0503fd948cd5be904120bb4886b9bd106 | [
"Apache-2.0"
] | 1 | 2020-04-02T13:16:19.000Z | 2020-04-02T13:16:19.000Z |
def getTopNRecords(tableName='midTripFlow', N=100):
# 连接数据库
# 获得数据列表
# 返回数据
res = []
return res | 13.111111 | 51 | 0.576271 |
acf099918e7468f51be332cebc7960efb11448dd | 16,300 | py | Python | numpyro/distributions/constraints.py | quattro/numpyro | b7b6e937297ea47c55760446134f84fc82936a9d | [
"Apache-2.0"
] | null | null | null | numpyro/distributions/constraints.py | quattro/numpyro | b7b6e937297ea47c55760446134f84fc82936a9d | [
"Apache-2.0"
] | null | null | null | numpyro/distributions/constraints.py | quattro/numpyro | b7b6e937297ea47c55760446134f84fc82936a9d | [
"Apache-2.0"
] | null | null | null | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
# The implementation follows the design in PyTorch: torch.distributions.constraints.py
#
# Copyright (c) 2016- Facebook, Inc (Adam Paszke)
# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (Clement Farabet)
# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__all__ = [
"boolean",
"circular",
"corr_cholesky",
"corr_matrix",
"dependent",
"greater_than",
"integer_interval",
"integer_greater_than",
"interval",
"is_dependent",
"l1_ball",
"less_than",
"lower_cholesky",
"multinomial",
"nonnegative_integer",
"positive",
"positive_definite",
"positive_integer",
"real",
"real_vector",
"scaled_unit_lower_cholesky",
"simplex",
"sphere",
"softplus_lower_cholesky",
"softplus_positive",
"unit_interval",
"Constraint",
]
import math
import numpy as np
import jax.numpy
class Constraint(object):
"""
Abstract base class for constraints.
A constraint object represents a region over which a variable is valid,
e.g. within which a variable can be optimized.
"""
is_discrete = False
event_dim = 0
def __call__(self, x):
raise NotImplementedError
def check(self, value):
"""
Returns a byte tensor of `sample_shape + batch_shape` indicating
whether each event in value satisfies this constraint.
"""
return self(value)
def feasible_like(self, prototype):
"""
Get a feasible value which has the same shape as dtype as `prototype`.
"""
raise NotImplementedError
class _Boolean(Constraint):
is_discrete = True
def __call__(self, x):
return (x == 0) | (x == 1)
def feasible_like(self, prototype):
return jax.numpy.zeros_like(prototype)
class _CorrCholesky(Constraint):
event_dim = 2
def __call__(self, x):
jnp = np if isinstance(x, (np.ndarray, np.generic)) else jax.numpy
tril = jnp.tril(x)
lower_triangular = jnp.all(
jnp.reshape(tril == x, x.shape[:-2] + (-1,)), axis=-1
)
positive_diagonal = jnp.all(jnp.diagonal(x, axis1=-2, axis2=-1) > 0, axis=-1)
x_norm = jnp.linalg.norm(x, axis=-1)
unit_norm_row = jnp.all((x_norm <= 1) & (x_norm > 1 - 1e-6), axis=-1)
return lower_triangular & positive_diagonal & unit_norm_row
def feasible_like(self, prototype):
return jax.numpy.broadcast_to(
jax.numpy.eye(prototype.shape[-1]), prototype.shape
)
class _CorrMatrix(Constraint):
event_dim = 2
def __call__(self, x):
jnp = np if isinstance(x, (np.ndarray, np.generic)) else jax.numpy
# check for symmetric
symmetric = jnp.all(jnp.all(x == jnp.swapaxes(x, -2, -1), axis=-1), axis=-1)
# check for the smallest eigenvalue is positive
positive = jnp.linalg.eigh(x)[0][..., 0] > 0
# check for diagonal equal to 1
unit_variance = jnp.all(
jnp.abs(jnp.diagonal(x, axis1=-2, axis2=-1) - 1) < 1e-6, axis=-1
)
return symmetric & positive & unit_variance
def feasible_like(self, prototype):
return jax.numpy.broadcast_to(
jax.numpy.eye(prototype.shape[-1]), prototype.shape
)
class _Dependent(Constraint):
"""
Placeholder for variables whose support depends on other variables.
These variables obey no simple coordinate-wise constraints.
:param bool is_discrete: Optional value of ``.is_discrete`` in case this
can be computed statically. If not provided, access to the
``.is_discrete`` attribute will raise a NotImplementedError.
:param int event_dim: Optional value of ``.event_dim`` in case this can be
computed statically. If not provided, access to the ``.event_dim``
attribute will raise a NotImplementedError.
"""
def __init__(self, *, is_discrete=NotImplemented, event_dim=NotImplemented):
self._is_discrete = is_discrete
self._event_dim = event_dim
super().__init__()
@property
def is_discrete(self):
if self._is_discrete is NotImplemented:
raise NotImplementedError(".is_discrete cannot be determined statically")
return self._is_discrete
@property
def event_dim(self):
if self._event_dim is NotImplemented:
raise NotImplementedError(".event_dim cannot be determined statically")
return self._event_dim
def __call__(self, x=None, *, is_discrete=NotImplemented, event_dim=NotImplemented):
if x is not None:
raise ValueError("Cannot determine validity of dependent constraint")
# Support for syntax to customize static attributes::
# constraints.dependent(is_discrete=True, event_dim=1)
if is_discrete is NotImplemented:
is_discrete = self._is_discrete
if event_dim is NotImplemented:
event_dim = self._event_dim
return _Dependent(is_discrete=is_discrete, event_dim=event_dim)
class dependent_property(property, _Dependent):
def __init__(
self, fn=None, *, is_discrete=NotImplemented, event_dim=NotImplemented
):
super().__init__(fn)
self._is_discrete = is_discrete
self._event_dim = event_dim
def __call__(self, x):
if not callable(x):
return super().__call__(x)
# Support for syntax to customize static attributes::
# @constraints.dependent_property(is_discrete=True, event_dim=1)
# def support(self):
# ...
return dependent_property(
x, is_discrete=self._is_discrete, event_dim=self._event_dim
)
def is_dependent(constraint):
return isinstance(constraint, _Dependent)
class _GreaterThan(Constraint):
def __init__(self, lower_bound):
self.lower_bound = lower_bound
def __call__(self, x):
return x > self.lower_bound
def feasible_like(self, prototype):
return jax.numpy.broadcast_to(self.lower_bound + 1, jax.numpy.shape(prototype))
class _IndependentConstraint(Constraint):
"""
Wraps a constraint by aggregating over ``reinterpreted_batch_ndims``-many
dims in :meth:`check`, so that an event is valid only if all its
independent entries are valid.
"""
def __init__(self, base_constraint, reinterpreted_batch_ndims):
assert isinstance(base_constraint, Constraint)
assert isinstance(reinterpreted_batch_ndims, int)
assert reinterpreted_batch_ndims >= 0
if isinstance(base_constraint, _IndependentConstraint):
reinterpreted_batch_ndims = (
reinterpreted_batch_ndims + base_constraint.reinterpreted_batch_ndims
)
base_constraint = base_constraint.base_constraint
self.base_constraint = base_constraint
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
super().__init__()
@property
def event_dim(self):
return self.base_constraint.event_dim + self.reinterpreted_batch_ndims
def __call__(self, value):
result = self.base_constraint(value)
if self.reinterpreted_batch_ndims == 0:
return result
elif jax.numpy.ndim(result) < self.reinterpreted_batch_ndims:
expected = self.event_dim
raise ValueError(
f"Expected value.dim() >= {expected} but got {jax.numpy.ndim(value)}"
)
result = result.reshape(
jax.numpy.shape(result)[
: jax.numpy.ndim(result) - self.reinterpreted_batch_ndims
]
+ (-1,)
)
result = result.all(-1)
return result
def feasible_like(self, prototype):
return self.base_constraint.feasible_like(prototype)
class _LessThan(Constraint):
def __init__(self, upper_bound):
self.upper_bound = upper_bound
def __call__(self, x):
return x < self.upper_bound
def feasible_like(self, prototype):
return jax.numpy.broadcast_to(self.upper_bound - 1, jax.numpy.shape(prototype))
class _IntegerInterval(Constraint):
is_discrete = True
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def __call__(self, x):
return (x >= self.lower_bound) & (x <= self.upper_bound) & (x % 1 == 0)
def feasible_like(self, prototype):
return jax.numpy.broadcast_to(self.lower_bound, jax.numpy.shape(prototype))
class _IntegerGreaterThan(Constraint):
is_discrete = True
def __init__(self, lower_bound):
self.lower_bound = lower_bound
def __call__(self, x):
return (x % 1 == 0) & (x >= self.lower_bound)
def feasible_like(self, prototype):
return jax.numpy.broadcast_to(self.lower_bound, jax.numpy.shape(prototype))
class _Interval(Constraint):
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def __call__(self, x):
return (x >= self.lower_bound) & (x <= self.upper_bound)
def feasible_like(self, prototype):
return jax.numpy.broadcast_to(
(self.lower_bound + self.upper_bound) / 2, jax.numpy.shape(prototype)
)
class _LowerCholesky(Constraint):
event_dim = 2
def __call__(self, x):
jnp = np if isinstance(x, (np.ndarray, np.generic)) else jax.numpy
tril = jnp.tril(x)
lower_triangular = jnp.all(
jnp.reshape(tril == x, x.shape[:-2] + (-1,)), axis=-1
)
positive_diagonal = jnp.all(jnp.diagonal(x, axis1=-2, axis2=-1) > 0, axis=-1)
return lower_triangular & positive_diagonal
def feasible_like(self, prototype):
return jax.numpy.broadcast_to(
jax.numpy.eye(prototype.shape[-1]), prototype.shape
)
class _Multinomial(Constraint):
is_discrete = True
event_dim = 1
def __init__(self, upper_bound):
self.upper_bound = upper_bound
def __call__(self, x):
return (x >= 0).all(axis=-1) & (x.sum(axis=-1) == self.upper_bound)
def feasible_like(self, prototype):
pad_width = ((0, 0),) * jax.numpy.ndim(self.upper_bound) + (
(0, prototype.shape[-1] - 1),
)
value = jax.numpy.pad(jax.numpy.expand_dims(self.upper_bound, -1), pad_width)
return jax.numpy.broadcast_to(value, prototype.shape)
class _L1Ball(Constraint):
"""
Constrain to the L1 ball of any dimension.
"""
event_dim = 1
reltol = 10.0 # Relative to finfo.eps.
def __call__(self, x):
jnp = np if isinstance(x, (np.ndarray, np.generic)) else jax.numpy
eps = jnp.finfo(x.dtype).eps
return jnp.abs(x).sum(axis=-1) < 1 + self.reltol * eps
def feasible_like(self, prototype):
return jax.numpy.zeros_like(prototype)
class _OrderedVector(Constraint):
event_dim = 1
def __call__(self, x):
return (x[..., 1:] > x[..., :-1]).all(axis=-1)
def feasible_like(self, prototype):
return jax.numpy.broadcast_to(
jax.numpy.arange(float(prototype.shape[-1])), prototype.shape
)
class _PositiveDefinite(Constraint):
event_dim = 2
def __call__(self, x):
jnp = np if isinstance(x, (np.ndarray, np.generic)) else jax.numpy
# check for symmetric
symmetric = jnp.all(jnp.all(x == jnp.swapaxes(x, -2, -1), axis=-1), axis=-1)
# check for the smallest eigenvalue is positive
positive = jnp.linalg.eigh(x)[0][..., 0] > 0
return symmetric & positive
def feasible_like(self, prototype):
return jax.numpy.broadcast_to(
jax.numpy.eye(prototype.shape[-1]), prototype.shape
)
class _PositiveOrderedVector(Constraint):
"""
Constrains to a positive real-valued tensor where the elements are monotonically
increasing along the `event_shape` dimension.
"""
event_dim = 1
def __call__(self, x):
return ordered_vector.check(x) & independent(positive, 1).check(x)
def feasible_like(self, prototype):
return jax.numpy.broadcast_to(
jax.numpy.exp(jax.numpy.arange(float(prototype.shape[-1]))), prototype.shape
)
class _Real(Constraint):
def __call__(self, x):
# XXX: consider to relax this condition to [-inf, inf] interval
return (x == x) & (x != float("inf")) & (x != float("-inf"))
def feasible_like(self, prototype):
return jax.numpy.zeros_like(prototype)
class _Simplex(Constraint):
event_dim = 1
def __call__(self, x):
x_sum = x.sum(axis=-1)
return (x >= 0).all(axis=-1) & (x_sum < 1 + 1e-6) & (x_sum > 1 - 1e-6)
def feasible_like(self, prototype):
return jax.numpy.full_like(prototype, 1 / prototype.shape[-1])
class _SoftplusPositive(_GreaterThan):
def __init__(self):
super().__init__(lower_bound=0.0)
def feasible_like(self, prototype):
return jax.numpy.full(jax.numpy.shape(prototype), np.log(2))
class _SoftplusLowerCholesky(_LowerCholesky):
def feasible_like(self, prototype):
return jax.numpy.broadcast_to(
jax.numpy.eye(prototype.shape[-1]) * np.log(2), prototype.shape
)
class _ScaledUnitLowerCholesky(_LowerCholesky):
pass
class _Sphere(Constraint):
"""
Constrain to the Euclidean sphere of any dimension.
"""
event_dim = 1
reltol = 10.0 # Relative to finfo.eps.
def __call__(self, x):
jnp = np if isinstance(x, (np.ndarray, np.generic)) else jax.numpy
eps = jnp.finfo(x.dtype).eps
norm = jnp.linalg.norm(x, axis=-1)
error = jnp.abs(norm - 1)
return error < self.reltol * eps * x.shape[-1] ** 0.5
def feasible_like(self, prototype):
return jax.numpy.full_like(prototype, prototype.shape[-1] ** (-0.5))
# TODO: Make types consistent
# See https://github.com/pytorch/pytorch/issues/50616
boolean = _Boolean()
circular = _Interval(-math.pi, math.pi)
corr_cholesky = _CorrCholesky()
corr_matrix = _CorrMatrix()
dependent = _Dependent()
greater_than = _GreaterThan
less_than = _LessThan
independent = _IndependentConstraint
integer_interval = _IntegerInterval
integer_greater_than = _IntegerGreaterThan
interval = _Interval
l1_ball = _L1Ball()
lower_cholesky = _LowerCholesky()
scaled_unit_lower_cholesky = _ScaledUnitLowerCholesky()
multinomial = _Multinomial
nonnegative_integer = _IntegerGreaterThan(0)
ordered_vector = _OrderedVector()
positive = _GreaterThan(0.0)
positive_definite = _PositiveDefinite()
positive_integer = _IntegerGreaterThan(1)
positive_ordered_vector = _PositiveOrderedVector()
real = _Real()
real_vector = independent(real, 1)
simplex = _Simplex()
softplus_lower_cholesky = _SoftplusLowerCholesky()
softplus_positive = _SoftplusPositive()
sphere = _Sphere()
unit_interval = _Interval(0.0, 1.0)
| 31.960784 | 108 | 0.662209 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.