id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
5187107 | from django.http import JsonResponse
from django.views.generic import UpdateView
from .models import Organization
class AvatarUploadView(UpdateView):
model = Organization
fields = ["avatar_image"]
def form_valid(self, form):
"""
override this to simply return a status 200 instead of a redirect to success url
"""
# TODO check permission
self.object = form.save()
# todo thumbnail this image right here?
return JsonResponse(data={"url": self.object.avatar_image.url}, status=200)
| StarcoderdataPython |
3523543 | <filename>src/serialbox-python/sdb/sdbgui/popuphalodescriptorwidget.py<gh_stars>1-10
#!/usr/bin/python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
##
## S E R I A L B O X
##
## This file is distributed under terms of BSD license.
## See LICENSE.txt for more information.
##
##===------------------------------------------------------------------------------------------===##
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtWidgets import (QLabel, QVBoxLayout, QHBoxLayout, QHeaderView, QSizePolicy, QTableView,
QPushButton)
from sdbcore.halos import Halos
from sdbcore.logger import Logger
from sdbgui.icon import Icon
from sdbgui.popupwidget import PopupWidget
class PopupHaloDescriptorWidget(PopupWidget):
def __init__(self, parent):
super().__init__(parent, 0.25, 0.5)
self.setWindowTitle("Halo descriptor")
self.__widget_label_title = QLabel("Set the halos for each dimension.", parent=self)
self.__halo_model = QStandardItemModel(0, 2)
self.__halo_model.setHorizontalHeaderLabels(["Minus", "Plus"])
self.__halo_model.horizontalHeaderItem(0).setToolTip("Negative halo extent")
self.__halo_model.horizontalHeaderItem(1).setToolTip("Positive halo extent")
self.__widget_table = QTableView(self)
self.__widget_table.setModel(self.__halo_model)
self.__widget_table.setStyleSheet(
'''
QTableWidget::item:selected:active {
background: transparent;
border-width: 0px;
}
''')
self.__widget_table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
# Assume 3 dimension by default
for i in range(3):
self.add_row()
self.__widget_button_add = QPushButton(self)
self.__widget_button_add.setIcon(Icon("edit_add.png"))
self.__widget_button_add.clicked.connect(self.add_row)
self.__widget_button_add.setToolTip("Add a dimension")
self.__widget_button_remove = QPushButton(self)
self.__widget_button_remove.setIcon(Icon("edit_remove.png"))
self.__widget_button_remove.clicked.connect(self.remove_row)
self.__widget_button_remove.setToolTip("Remove last dimension")
self.__widget_button_ok = QPushButton("Done", parent=self)
self.__widget_button_ok.clicked.connect(self.done)
hbox_bottom = QHBoxLayout()
hbox_bottom.addWidget(self.__widget_button_add)
hbox_bottom.addWidget(self.__widget_button_remove)
hbox_bottom.addStretch(1)
hbox_bottom.addWidget(self.__widget_button_ok)
vbox = QVBoxLayout()
vbox.addWidget(self.__widget_label_title)
vbox.addWidget(self.__widget_table)
vbox.addLayout(hbox_bottom)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.setLayout(vbox)
def add_row(self):
Logger.info("Adding row")
minus = QStandardItem()
minus.setTextAlignment(Qt.AlignCenter)
minus.setText("0")
plus = QStandardItem()
plus.setTextAlignment(Qt.AlignCenter)
plus.setText("0")
self.__halo_model.appendRow([minus, plus])
def remove_row(self):
Logger.info("Removing row")
self.__halo_model.removeRow(self.__halo_model.rowCount() - 1)
def done(self):
self.close()
def get_halos(self):
halos = []
try:
for i in range(self.__halo_model.rowCount()):
minus = int(self.__halo_model.item(i, 0).text())
plus = int(self.__halo_model.item(i, 1).text())
if minus < 0 or plus < 0:
raise RuntimeError(
"Invalid halo boundary (%i, %i) in dimension %s: halo boundaries must be positive numbers." % (
minus, plus, i))
halos += [[minus, None if plus is 0 else plus]]
except ValueError as e:
raise RuntimeError(str(e))
return Halos(halos)
| StarcoderdataPython |
162686 | <filename>ndflow/tools/match.py
import argparse
import multiprocessing
import os
import pickle
from ndflow import api
def match_single(source_gmm_path: str, target_gmm_path: str, output_path: str):
with open(source_gmm_path, 'rb') as f:
source_gmm = pickle.load(f)['gmm']
with open(target_gmm_path, 'rb') as f:
target_gmm = pickle.load(f)['gmm']
print(f"Aligning GMM {source_gmm_path} to {target_gmm_path}...")
aligned_gmm, alignment = api.align(source_gmm, target_gmm)
matched_gmm = api.match(aligned_gmm, target_gmm)
with open(output_path, 'wb') as f:
pickle.dump({'alignment': alignment,
'aligned_gmm': aligned_gmm,
'matched_gmm': matched_gmm},
f, pickle.HIGHEST_PROTOCOL)
print(f"Matched GMM saved to {output_path}")
def _gmm_and_match_paths(gmm_filename, gmms_dir, matches_dir):
gmm_path = os.path.join(gmms_dir, gmm_filename)
match_filename = gmm_filename.replace(api.GMM_FILENAME_SUFFIX, api.MATCH_FILENAME_SUFFIX)
match_path = os.path.join(matches_dir, match_filename)
return gmm_path, match_path
def _match_single(args):
return match_single(*args)
def match_group(source_gmms_dir: str, target_gmm_path: str, output_dir: str):
os.makedirs(output_dir, exist_ok=True)
def args_generator():
for gmm_filename in os.listdir(source_gmms_dir):
if gmm_filename.endswith(api.GMM_FILENAME_SUFFIX):
source_gmm_path, output_path = \
_gmm_and_match_paths(gmm_filename, source_gmms_dir, output_dir)
yield source_gmm_path, target_gmm_path, output_path
with multiprocessing.Pool() as pool:
list(pool.imap_unordered(_match_single, args_generator()))
def main():
parser = argparse.ArgumentParser(description="NDFlow - density matching")
parser.add_argument('source',
help="source GMM file or directory. If a directory is given, "
"will process all GMM files inside it.")
parser.add_argument('target',
help="target GMM file")
parser.add_argument('-o', '--output',
help="output directory. Defaults to source directory.")
args = parser.parse_args()
single = os.path.isfile(args.source)
source_gmms_dir, source_gmm_filename = os.path.split(args.source)
target_gmm_path = args.target
if single:
output_dir = source_gmms_dir if args.output is None else args.output
source_gmm_path, output_path = \
_gmm_and_match_paths(source_gmm_filename, source_gmms_dir, output_dir)
match_single(source_gmm_path, target_gmm_path, output_path)
else:
source_gmms_dir = args.source
output_dir = source_gmms_dir if args.output is None else args.output
match_group(source_gmms_dir, target_gmm_path, output_dir)
if __name__ == '__main__':
main()
| StarcoderdataPython |
221982 | from typing import NamedTuple, List, Dict, Any
import tensorflow as tf
import logging
import argparse
import datetime
import numpy as np
import json
import numbers
from kite.asserts.asserts import FieldValidator
from kite.model.model import TrainInputs, Config as BaseConfig, AdamTrainer, Model as BaseModel
from kite.utils.aggregator import SummaryInfo
from kite.data.line_feeder import LineFeeder
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s')
class Feed(NamedTuple):
logits: List[List[int]]
labels: List[int]
temperature_types: List[List[int]]
@classmethod
def from_json(cls, d: dict) -> 'Feed':
v = FieldValidator(cls, d)
return Feed(
logits=[v.get_list('logits', numbers.Real)],
labels=[v.get('label', int)],
temperature_types=[v.get_list('temperature_types', int)],
)
def append(self, other: 'Feed'):
self.logits.extend(other.logits)
self.labels.extend(other.labels)
self.temperature_types.extend(other.temperature_types)
def validate(self):
assert len(self.labels) == len(self.logits) == len(self.temperature_types), f'{len(self.labels)}, {len(self.logits)}, {len(self.temperature_types)}'
for i,l in enumerate(self.logits):
assert len(l) == len(self.logits[0]) == len(self.temperature_types[i]), f'{len(l)}, {len(self.logits[0])}, {len(self.temperature_types[i])}'
class Feeder(LineFeeder):
@staticmethod
def _from_lines(lines: List[str]) -> Feed:
base = Feed(logits=[], labels=[], temperature_types=[])
for line in lines:
base.append(Feed.from_json(json.loads(line)))
return base
def next(self) -> Feed:
return self._from_lines(super().next())
def all(self) -> Feed:
return self._from_lines(super().all())
class Placeholders(object):
def __init__(self):
with tf.name_scope('placeholders'):
# shape [batch, vocab]
self.logits = tf.placeholder(tf.float32, [None, None], name='logits')
# shape [batch]
self.labels = tf.placeholder(tf.int64, [None], name='labels')
# shape [batch, vocab], elements are either 0 or 1
self.temperature_types = tf.placeholder(tf.int64, [None, None], name='temperature_types')
def feed_dict(self, feed: Feed) -> Dict[tf.Tensor, Any]:
return {
self.temperature_types: feed.temperature_types,
self.labels: feed.labels,
self.logits: feed.logits,
}
class Model(BaseModel):
def __init__(self, num_temperature_types: int):
self._placeholders = Placeholders()
self._build_model(num_temperature_types)
self._build_loss()
self._build_scalars()
self._summaries = {k: v for k, v in self._scalars.items()}
def _build_model(self, num_temperature_types: int):
with tf.name_scope('model'):
# shape [num_temperature_types]
# models are typically over confident so initialize all temps to 1.5
self._temperatures = tf.get_variable(
name='temperatures', shape=[num_temperature_types], dtype=tf.float32,
initializer=tf.constant_initializer(1.5 * np.ones(shape=[num_temperature_types], dtype=np.float32)),
)
def _build_loss(self):
with tf.name_scope('loss'):
# scale logits by inverse temperature
# [batch, vocab]
temperatures = tf.gather(self._temperatures, self._placeholders.temperature_types, name='temperatures')
# [batch, vocab] / [batch, vocab]
self._logits = tf.identity(
self._placeholders.logits / temperatures, name='scaled_logits',
)
# NLL loss == cross entropy loss with one hot encoding for labels
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self._placeholders.labels, logits=self._logits, name='cross_entropy'
)
self._loss = tf.reduce_mean(ce, name='loss')
def _build_scalars(self):
with tf.name_scope('scalars'):
# accuracy should not change, so we just add it as a sanity check
# [batch]
predicted = tf.argmax(self._logits, axis=-1, name='predicted')
equal_first = tf.cast(tf.equal(predicted, self._placeholders.labels), tf.float32, name='acc_equal')
acc = tf.reduce_mean(equal_first, name='acc')
self._scalars = dict(acc=acc)
def temperatures(self) -> tf.Tensor:
return self._temperatures
def feed_dict(self, feed: Feed, train: bool) -> Dict[tf.Tensor, Any]:
return self._placeholders.feed_dict(feed)
def loss(self) -> tf.Tensor:
return self._loss
def summary_infos(self) -> List[SummaryInfo]:
return [SummaryInfo(k) for k in self._scalars]
def summaries_to_fetch(self) -> Dict[str, tf.Tensor]:
return self._summaries
def train_gradient_descent(train: str, validate: str, tensorboard: str, steps: int, num_temp_types: int) -> np.ndarray:
model = Model(num_temperature_types=num_temp_types)
trainer = AdamTrainer(model, BaseConfig(steps=steps, learning_rate=0.001, skip_grad_summaries=True))
sw = tf.summary.FileWriter(tensorboard)
train_feeder = Feeder(in_dir=train, cycle=False, batch_size=100)
validate_feeder = Feeder(in_dir=validate, cycle=False, batch_size=100)
with tf.Session() as sess:
try:
start = datetime.datetime.now()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
starting_step = 0
ti = TrainInputs(session=sess, train_feeder=train_feeder, val_feeder=validate_feeder,
summary_writer=sw, summary_interval=10, validation_interval=30,
starting_step=starting_step, checkpoint_interval=int(500))
trainer.train(ti)
end = datetime.datetime.now()
logging.info(f'Done training, took {end-start}')
weights = sess.run(model.temperatures())
finally:
train_feeder.stop()
validate_feeder.stop()
return weights
def train_lbfgs(train: str, steps: int, num_temp_types: int) -> np.ndarray:
# based on:
# https://github.com/markdtw/temperature-scaling-tensorflow/blob/master/temp_scaling.py
# https://github.com/gpleiss/temperature_scaling/blob/master/temperature_scaling.py
train_feeder = Feeder(in_dir=train)
# build one large batch for the optimizer
batch = train_feeder.all()
batch.validate()
model = Model(num_temperature_types=num_temp_types)
with tf.Session() as sess:
try:
start = datetime.datetime.now()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
fd = model.feed_dict(batch, True)
nll = sess.run([model.loss()], feed_dict=fd)
logging.info(f'nll before {nll}')
optimizer = tf.contrib.opt.ScipyOptimizerInterface(
model.loss(), options={'maxiter': steps},
)
optimizer.minimize(sess, feed_dict=fd)
nll = sess.run([model.loss()], feed_dict=fd)
logging.info(f'nll afer {nll}')
end = datetime.datetime.now()
logging.info(f'Done training, took {end-start}')
weights = sess.run(model.temperatures())
finally:
train_feeder.stop()
return weights
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--insearch', type=str, required=True)
parser.add_argument('--outsearch', type=str, required=True)
parser.add_argument('--tensorboard', type=str, default='tensorboard')
parser.add_argument('--steps', type=int, default=1000)
parser.add_argument('--train_samples', type=str, default='../traindata/train_samples/')
parser.add_argument('--validate_samples', type=str, default='../traindata/validate_samples/')
parser.add_argument('--gradient_descent', type=bool, default=False)
parser.add_argument('--num_temperature_types', type=int, default=2)
args = parser.parse_args()
if args.gradient_descent:
weights = train_gradient_descent(
args.train_samples, args.validate_samples, args.tensorboard, args.steps, args.num_temperature_types)
else:
weights = train_lbfgs(args.train_samples, args.steps, args.num_temperature_types)
logging.info(f'Done! Got weights: {weights}')
with open(args.insearch, 'r') as f:
old = json.load(f)
old['IdentTemperature'] = float(weights[0])
old['LexicalTemperature'] = float(weights[1])
old['UseTemperatureScaling'] = True
with open(args.outsearch, 'w') as f:
json.dump(old, f)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1971189 | # Copyright 2021 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import string
import sys
import typing
from html.parser import HTMLParser
from pathlib import Path
import numpy as np
import resampy
import tokenizer
import torch
from flask import current_app
from proto.tiro.tts import voice_pb2
from src import ffmpeg
from src.frontend.grapheme_to_phoneme import GraphemeToPhonemeTranslatorBase
from src.frontend.lexicon import LangID, SimpleInMemoryLexicon
from src.frontend.normalization import (
BasicNormalizer,
GrammatekNormalizer,
NormalizerBase,
)
from src.frontend.phonemes import IPA_XSAMPA_MAP, XSAMPA_IPA_MAP, align_ipa_from_xsampa
from src.frontend.words import WORD_SENTENCE_SEPARATOR, Word
from .voice_base import OutputFormat, VoiceBase, VoiceProperties
if True: # noqa: E402
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../lib/fastspeech"))
from src.lib.fastspeech import hparams as hp
from src.lib.fastspeech import utils
from src.lib.fastspeech.align_phonemes import Aligner
from src.lib.fastspeech.g2p_is import translate as g2p
from src.lib.fastspeech.synthesize import get_FastSpeech2
from src.lib.fastspeech.text import text_to_sequence
class SSMLParser(HTMLParser):
_ALLOWED_TAGS = ["speak", "phoneme"]
_first_tag_seen: bool
_tags_queue: typing.List[str]
_prepared_fastspeech_strings: typing.List[str]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._first_tag_seen = False
self._tags_queue = []
self._prepared_fastspeech_strings = []
def _check_first_tag(self, tag):
if not self._first_tag_seen:
if tag != "speak":
raise ValueError("Start tag is not <speak>")
self._first_tag_seen = True
def handle_starttag(self, tag, attrs):
self._check_first_tag(tag)
if tag not in SSMLParser._ALLOWED_TAGS:
raise ValueError("Unsupported tag encountered: '{}'".format(tag))
if tag == "phoneme":
attrs_map = dict(attrs)
if attrs_map.get("alphabet") != "x-sampa" or "ph" not in attrs_map:
raise ValueError(
"'phoneme' tag has to have 'alphabet' and 'ph' attributes"
)
self._prepared_fastspeech_strings.append(
"{%s}" % align_ipa_from_xsampa(attrs_map["ph"])
)
self._tags_queue.append(tag)
def handle_endtag(self, tag):
open_tag = self._tags_queue.pop()
if open_tag != tag:
raise ValueError("Invalid closing tag '{}' for '{}'".format(tag, open_tag))
def handle_data(self, data):
# Raise a ValueError if we haven't seen the initial <speak> tag
self._check_first_tag("")
if self._tags_queue[-1] != "phoneme":
self._prepared_fastspeech_strings.append(data.strip())
def get_fastspeech_string(self):
return " ".join(self._prepared_fastspeech_strings)
class FastSpeech2Synthesizer:
"""A synthesizer wrapper around Fastspeech2 using MelGAN as a vocoder."""
_device: torch.device
_melgan_model: torch.jit.RecursiveScriptModule
_fs_model: torch.jit.RecursiveScriptModule
_phonetizer: GraphemeToPhonemeTranslatorBase
_normalizer: NormalizerBase
def __init__(
self,
melgan_vocoder_path: os.PathLike,
fastspeech_model_path: os.PathLike,
phonetizer: GraphemeToPhonemeTranslatorBase,
normalizer: NormalizerBase,
):
"""Initialize a FastSpeech2Synthesizer.
Args:
melgan_vocoder_path: Path to the TorchScript MelGAN vocoder for this voice.
See https://github.com/seungwonpark/melgan and the script
melgan_convert.py.
fastspeech_model_path: Path to the TorchScript fastspeech model for this.
See https://github.com/cadia-lvl/FastSpeech2 and the script
fastspeech_convert.py.
phonetizer: A GraphemeToPhonemeTranslator to use for the input text.
normalizer: A Normalizer used to normalize the input text prior to synthesis
"""
self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self._melgan_model = torch.jit.load(
melgan_vocoder_path,
map_location=self._device
)
self._fs_model = torch.jit.load(
fastspeech_model_path,
map_location=self._device
)
self._phonetizer = phonetizer
self._normalizer = normalizer
self._max_words_per_segment = 30
def _add_phonemes(self, words: typing.Iterable[Word]) -> typing.Iterable[Word]:
for word in words:
if not word == WORD_SENTENCE_SEPARATOR:
# TODO(rkjaran): Cover more punctuation (Unicode)
punctuation = re.sub(r"[{}\[\]]", "", string.punctuation)
g2p_word = re.sub(r"([{}])".format(punctuation), r" \1 ", word.symbol)
# TODO(rkjaran): The language code shouldn't be hardcoded here. Should
# it be here at all?
word.phone_sequence = self._phonetizer.translate(
g2p_word, LangID("is-IS")
)
yield word
def _do_vocoder_pass(self, mel: torch.Tensor) -> torch.Tensor:
"""Perform a vocoder pass, returning int16 samples at 22050 Hz."""
return self._melgan_model.inference(mel).to(torch.int16)
@staticmethod
def _wavarray_to_pcm(
array: np.ndarray, src_sample_rate=22050, dst_sample_rate=22050
) -> bytes:
"""Convert a NDArray (int16) to a PCM byte chunk, resampling if necessary."""
def to_pcm_bytes(array1d):
return array1d.view("b").data.tobytes()
if sys.byteorder == "big":
array.byteswap()
orig_samples = array.ravel()
if src_sample_rate == dst_sample_rate:
return to_pcm_bytes(orig_samples)
return to_pcm_bytes(
resampy.resample(orig_samples, src_sample_rate, dst_sample_rate)
)
def synthesize(
self,
text_string: str,
emit_speech_marks=False,
sample_rate=22050,
# TODO(rkjaran): remove once we support normalization with SSML in a generic
# way. Also remove it from FastSpeech2Voice
handle_embedded_phonemes=False,
) -> typing.Iterable[bytes]:
"""Synthesize 16 bit PCM samples or a stream of JSON speech marks.
Args:
text_string: Text to be synthesized, can contain embedded phoneme
strings in {}
emit_speech_marks: Whether to generate speech marks or PCM samples
sample_rate: Sample rate of the returned PCM chunks
Yields:
bytes: PCM chunk of synthesized audio, or JSON encoded speech marks
"""
duration_control = 1.0
pitch_control = 1.0
energy_control = 1.0
# TODO(rkjaran): remove conditional once we remove the
# `handle_embedded_phonemes` argument
normalize_fn = (
self._normalizer.normalize
if not handle_embedded_phonemes
else BasicNormalizer().normalize
)
words = list(self._add_phonemes(normalize_fn(text_string)))
sentences: typing.List[typing.List[Word]] = [[]]
for idx, word in enumerate(words):
if word == WORD_SENTENCE_SEPARATOR:
if idx != len(words) - 1:
sentences.append([])
else:
sentences[-1].append(word)
# Segment to decrease latency and memory usage
duration_time_offset = 0
for sentence in sentences:
for idx in range(0, len(sentence), self._max_words_per_segment):
segment_words = sentence[idx : idx + self._max_words_per_segment]
phone_counts: typing.List[int] = []
phone_seq = []
for word in segment_words:
phone_counts.append(len(word.phone_sequence))
phone_seq.extend(word.phone_sequence)
if not phone_seq:
# If none of the words in this segment got a phone sequence we skip the
# rest
continue
text_seq = torch.tensor(
[text_to_sequence("{%s}" % " ".join(phone_seq), hp.text_cleaners)],
dtype=torch.int64,
device=self._device,
)
(
mel_postnet,
# Duration of each phoneme in log(millisec)
log_duration_output
) = self._fs_model.inference(
text_seq,
d_control=duration_control,
p_control=pitch_control,
e_control=energy_control,
)
if emit_speech_marks:
# The model uses 10 ms as the unit (or, technically, log(dur*10ms))
phone_durations = (
10
* torch.exp(log_duration_output.detach()[0].to(torch.float32))
).tolist()
word_durations = []
offset = 0
for count in phone_counts:
word_durations.append(
# type: ignore
sum(phone_durations[offset : offset + count])
)
offset += count
segment_duration_time_offset: int = duration_time_offset
for idx, dur in enumerate(word_durations):
segment_words[
idx
].start_time_milli = segment_duration_time_offset
segment_duration_time_offset += dur
for word in segment_words:
if word.is_spoken():
yield word.to_json().encode("utf-8") + b"\n"
duration_time_offset += segment_duration_time_offset
else:
# 22050 Hz 16 bit linear PCM chunks
wav = self._do_vocoder_pass(mel_postnet).numpy()
yield FastSpeech2Synthesizer._wavarray_to_pcm(
wav, src_sample_rate=22050, dst_sample_rate=sample_rate
)
class FastSpeech2Voice(VoiceBase):
_backend: FastSpeech2Synthesizer
_properties: VoiceProperties
def __init__(self, properties: VoiceProperties, backend):
"""Initialize a fixed voice with a FastSpeech2 backend."""
self._backend = backend
self._properties = properties
def _is_valid(self, **kwargs) -> bool:
# Some sanity checks
try:
return (
kwargs["OutputFormat"] in ("pcm", "ogg_vorbis", "mp3", "json")
and kwargs["VoiceId"] == self._properties.voice_id
and "Text" in kwargs
)
except KeyError:
return False
def _synthesize(
self, text: str, handle_embedded_phonemes=False, **kwargs
) -> typing.Iterable[bytes]:
if not self._is_valid(**kwargs):
raise ValueError("Synthesize request not valid")
for chunk in self._backend.synthesize(
text,
emit_speech_marks=kwargs["OutputFormat"] == "json",
sample_rate=int(kwargs["SampleRate"]),
handle_embedded_phonemes=handle_embedded_phonemes,
):
if current_app.config["USE_FFMPEG"]:
if kwargs["OutputFormat"] == "ogg_vorbis":
yield ffmpeg.to_ogg_vorbis(
chunk,
src_sample_rate=kwargs["SampleRate"],
sample_rate=kwargs["SampleRate"],
)
elif kwargs["OutputFormat"] == "mp3":
yield ffmpeg.to_mp3(
chunk,
src_sample_rate=kwargs["SampleRate"],
sample_rate=kwargs["SampleRate"],
)
if kwargs["OutputFormat"] in ("pcm", "json"):
yield chunk
def synthesize(self, text: str, **kwargs) -> typing.Iterable[bytes]:
"""Synthesize audio from a string of characters."""
return self._synthesize(text, **kwargs)
def synthesize_from_ssml(self, ssml: str, **kwargs) -> typing.Iterable[bytes]:
"""Synthesize audio from SSML markup."""
# TODO(rkjaran): Move SSML parser out of here and make it more general
parser = SSMLParser()
parser.feed(ssml)
text = parser.get_fastspeech_string()
parser.close()
return self._synthesize(text=text, handle_embedded_phonemes=True, **kwargs)
@property
def properties(self) -> VoiceProperties:
return self._properties
_OGG_VORBIS_SAMPLE_RATES = ["8000", "16000", "22050", "24000"]
_MP3_SAMPLE_RATES = ["8000", "16000", "22050", "24000"]
_PCM_SAMPLE_RATES = ["8000", "16000", "22050"]
SUPPORTED_OUTPUT_FORMATS = [
OutputFormat(output_format="pcm", supported_sample_rates=_PCM_SAMPLE_RATES),
OutputFormat(
output_format="ogg_vorbis", supported_sample_rates=_OGG_VORBIS_SAMPLE_RATES
),
OutputFormat(output_format="mp3", supported_sample_rates=_MP3_SAMPLE_RATES),
OutputFormat(output_format="json", supported_sample_rates=[]),
]
| StarcoderdataPython |
11338342 | import subprocess
import click
from catwatch.lib.db_seed import seed_database
from catwatch.app import create_app
from catwatch.extensions import db
# Create a context for the database connection.
app = create_app()
db.app = app
SQLALCHEMY_DATABASE_URI = app.config.get('SQLALCHEMY_DATABASE_URI', None)
class PostgresDatabase(object):
def __init__(self, databases):
self._config = _parse_database_uri(SQLALCHEMY_DATABASE_URI)
self._psql = 'psql -U postgres'
self._createdb = 'createdb -U postgres'
self._dropdb = 'dropdb -U postgres'
self._container = _get_postgres_container()
self._docker_exec = 'docker exec {0}'.format(self._container)
self.databases = databases
if databases == ():
self.databases = [self._config.get('database')]
@property
def config(self):
"""
Return the parsed database URI.
:return: str
"""
return self._config
@property
def container(self):
"""
Return the docker container ID of the running postgres instance.
:return: str
"""
return self._container
def psql(self, command):
"""
Run a psql command and return its results.
:return: str
"""
pg = '{0} -c "{1}"'.format(self._psql, command)
shell_command = '{0} {1}'.format(self._docker_exec, pg)
return subprocess.call(shell_command, shell=True)
def _user(self):
"""
Create a database role (user).
:return: None
"""
create_role = "CREATE USER {0} WITH PASSWORD '{1}'".format(
self.config.get('username'), self.config.get('password'))
return self.psql(create_role)
def list(self):
"""
List all databases.
:return: psql result
"""
return self.psql('\l')
def create(self):
"""
Create each database.
:return: None
"""
self._user()
for database in self.databases:
pg = '{0} "{1}"'.format(self._createdb, database)
command = '{0} {1}'.format(self._docker_exec, pg)
subprocess.call(command, shell=True)
return None
def drop(self):
"""
Drop each database.
:return: None
"""
for database in self.databases:
pg = '{0} "{1}"'.format(self._dropdb, database)
command = '{0} {1}'.format(self._docker_exec, pg)
subprocess.call(command, shell=True)
return None
def _parse_database_uri(uri):
"""
Parse a SQLAlchemy database URI.
:param uri: Postgres URI
:type uri: str
:return: Dict filled with the URI information
"""
db_engine = {}
uri_parts = uri.split('://')
db_engine['protocol'] = uri_parts[0]
uri_without_protocol_parts = uri_parts[1].split('@')
user_and_pass = uri_without_protocol_parts[0].split(':')
domain_and_db = uri_without_protocol_parts[1].split('/')
db_engine['username'] = user_and_pass[0]
db_engine['password'] = <PASSWORD>]
db_engine['host'] = domain_and_db[0].split(':')[0]
db_engine['port'] = domain_and_db[0].split(':')[1]
db_engine['database'] = domain_and_db[1]
return db_engine
def _get_postgres_container():
"""
Find the name of the postgres container.
:return: str
"""
find_container = '''
for i in $(docker ps | grep "postgres" | cut -f1 -d" ");
do echo $i;
done
'''
container_id = subprocess.check_output(find_container, shell=True)[:-1]
return container_id
@click.group()
def cli():
""" Run PostgreSQL related tasks. """
pass
@click.command()
def list():
"""
List all databases.
:return: psql result
"""
database = PostgresDatabase(())
database.list()
@click.command()
@click.argument('command')
def psql(command):
"""
Exec a psql command against the database.
Example, to list all users:
run db psql "\du"
Delete a specific user:
run db psql "DROP USER foobar"
:return: psql result
"""
database = PostgresDatabase(())
database.psql(command)
@click.command()
@click.argument('databases', nargs=-1)
def create(databases):
"""
Create 1 or more databases.
:return: db session create_all result
"""
database = PostgresDatabase(databases)
database.create()
# We also do a create all to load the initial schema from our models.
return db.create_all()
@click.command()
@click.argument('databases', nargs=-1)
def drop(databases):
"""
Drop 1 or more databases.
:return: None
"""
database = PostgresDatabase(databases)
database.drop()
return None
@click.command()
@click.argument('databases', nargs=-1)
@click.pass_context
def reset(ctx, databases):
"""
Run drop, create and seed automatically.
:return: None
"""
ctx.invoke(drop, databases=databases)
ctx.invoke(create, databases=databases)
ctx.invoke(seed)
return None
@click.command()
def init():
"""
Initialize the database.
:return: db session create_all result
"""
db.drop_all()
return db.create_all()
@click.command()
def seed():
"""
Seed the database with your own data.
"""
return seed_database()
cli.add_command(list)
cli.add_command(psql)
cli.add_command(create)
cli.add_command(drop)
cli.add_command(reset)
cli.add_command(init)
cli.add_command(seed)
| StarcoderdataPython |
385495 | # -*- coding: utf-8 -*-
"""
Created on Tue May 20 15:17:37 2014
@author: timothyh
"""
import argparse
from tempfile import NamedTemporaryFile
from ped_parser import family, individual, parser
def main():
argparser = argparse.ArgumentParser(description="Call denovo variants on a VCF file containing a trio")
argparser.add_argument('--denovogear',
type=str, nargs=1,
required=True,
help='Path to the denovogear binary for example: /Users/timothyh/home/binHTS/denovogear 0.5.4/bin/denovogear'
)
argparser.add_argument('vcf_file',
type=str, nargs=1,
help='A variant file in VCF format containing the genotypes of the trio'
)
argparser.add_argument('ped_file',
type=str, nargs=1,
help='A pedigree file in .ped format containing the samples to be extracted from the VCF file'
)
print("Hello")
# Check that the PED file only contains a trio and identify sex of child as this is required for calling denovogear correctly
# Check that the VCF file contains the same samples as thePED file
# Run denovogear
#fam_id = '1'
#someFamily = family.Family(family_id = fam_id)
#print(someFamily)
trio_lines = ['#Standard trio\n',
'#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n',
'healthyParentsAffectedSon\tproband\tfather\tmother\t1\t2\n',
'healthyParentsAffectedSon\tmother\t0\t0\t2\t1\n',
'healthyParentsAffectedSon\tfather\t0\t0\t1\t1\n',
'healthyParentsAffectedSon\tdaughter\tfather\tmother\t2\t1\n',
]
trio_file = NamedTemporaryFile(mode='w+t', delete=False, suffix='.vcf')
trio_file.writelines(trio_lines)
trio_file.seek(0)
trio_file.close()
family_parser = parser.FamilyParser(trio_file.name)
trio_family = family_parser.families['healthyParentsAffectedSon']
print(trio_family.)
if __name__ == '__main__':
main() | StarcoderdataPython |
77631 | <reponame>cash/chepstow<gh_stars>0
import chepstow
import random
import time
import unittest
class BigTest(unittest.TestCase):
def test_something(self):
# randomly fails for testing purposes
agent = chepstow.Agent()
delay = random.randint(0, 10)
start = time.time()
agent.run(delay)
stop = time.time()
total = stop - start
self.assertLess(total, 7, "The something test now takes {} seconds".format(total))
| StarcoderdataPython |
5072742 | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Zope-specific Python Expression Handler
Handler for Python expressions that uses the RestrictedPython package.
"""
from AccessControl import safe_builtins
from AccessControl.ZopeGuards import guarded_getattr, get_safe_globals
from RestrictedPython import compile_restricted_eval
from zope.tales.tales import CompilerError
from zope.tales.pythonexpr import PythonExpr
class PythonExpr(PythonExpr):
_globals = get_safe_globals()
_globals['_getattr_'] = guarded_getattr
_globals['__debug__' ] = __debug__
def __init__(self, name, expr, engine):
self.text = self.expr = text = expr.strip().replace('\n', ' ')
# Unicode expression are not handled properly by RestrictedPython
# We convert the expression to UTF-8 (ajung)
if isinstance(text, unicode):
text = text.encode('utf-8')
code, err, warn, use = compile_restricted_eval(text,
self.__class__.__name__)
if err:
raise engine.getCompilerError()('Python expression error:\n%s' %
'\n'.join(err))
self._varnames = use.keys()
self._code = code
def __call__(self, econtext):
__traceback_info__ = self.text
vars = self._bind_used_names(econtext, {})
vars.update(self._globals)
return eval(self._code, vars, {})
class _SecureModuleImporter:
__allow_access_to_unprotected_subobjects__ = True
def __getitem__(self, module):
mod = safe_builtins['__import__'](module)
path = module.split('.')
for name in path[1:]:
mod = getattr(mod, name)
return mod
from DocumentTemplate.DT_Util import TemplateDict, InstanceDict
from DocumentTemplate.security import RestrictedDTML
class Rtd(RestrictedDTML, TemplateDict):
this = None
def call_with_ns(f, ns, arg=1):
td = Rtd()
# prefer 'context' to 'here'; fall back to 'None'
this = ns.get('context', ns.get('here'))
td.this = this
request = ns.get('request', {})
if hasattr(request, 'taintWrapper'):
request = request.taintWrapper()
td._push(request)
td._push(InstanceDict(td.this, td))
td._push(ns)
try:
if arg==2:
return f(None, td)
else:
return f(td)
finally:
td._pop(3)
| StarcoderdataPython |
8034677 | # Dépendances
from tkinter import *
from modules.jeu import *
# Variables globales
win = Tk() # Fenêtre Tkinter
# Configuration de la fenêtre Tkinter
win.title("FlickColor")
win.resizable(False, False)
win.configure(background = "white")
win.iconphoto(False, Pil_imageTk.PhotoImage(file = "./img/icon.png"))
# Appel du menu
menu(win)
win.mainloop() | StarcoderdataPython |
5138027 | def ask_ok(prompt, retries=4, reminder='Please try again!'):
while True:
ok = input(prompt)
if ok in ('y', 'yes', 'yep'):
return True
if ok in ('n', 'no', 'nop', 'nope'):
return False
retries = retries - 1
if retries < 0:
raise ValueError('Invalid user response')
print(reminder)
ask_ok('Do you really want to quit ?')
i = 5
def f(arg=i):
print(arg)
i = 6
print('Default value is other valuable scope:')
f()
def f(a, L=[]):
L.append(a)
return L
print('Default value is assigned only one time:', f(1))
print('Default value is assigned only one time:', f(2))
print('Default value is assigned only one time:', f(3))
| StarcoderdataPython |
1976869 | import sys
import collections
import logging
from dynamo.fileop.base import FileQuery
from dynamo.fileop.transfer import FileTransferOperation, FileTransferQuery
from dynamo.fileop.deletion import FileDeletionOperation, FileDeletionQuery
from dynamo.utils.interface.mysql import MySQL
from dynamo.dataformat import File
LOG = logging.getLogger(__name__)
class StandaloneFileOperation(FileTransferOperation, FileTransferQuery, FileDeletionOperation, FileDeletionQuery):
"""
Interface to in-house transfer & deletion daemon using MySQL for bookkeeping.
"""
def __init__(self, config):
FileTransferOperation.__init__(self, config)
FileTransferQuery.__init__(self, config)
FileDeletionOperation.__init__(self, config)
FileDeletionQuery.__init__(self, config)
self.db = MySQL(config.db_params)
def num_pending_transfers(self): #override
# FOD can throttle itself.
return 0
def num_pending_deletions(self): #override
# FOD can throttle itself.
return 0
def form_batches(self, tasks): #override
if len(tasks) == 0:
return []
if hasattr(tasks[0], 'source'):
# These are transfer tasks
by_endpoints = collections.defaultdict(list)
for task in tasks:
endpoints = (task.source, task.subscription.destination)
by_endpoints[endpoints].append(task)
return by_endpoints.values()
else:
by_endpoint = collections.defaultdict(list)
for task in tasks:
by_endpoint[task.desubscription.site].append(task)
return by_endpoint.values()
def start_transfers(self, batch_id, batch_tasks): #override
if len(batch_tasks) == 0:
return {}
result = {}
# tasks should all have the same source and destination
source = batch_tasks[0].source
destination = batch_tasks[0].subscription.destination
fields = ('id', 'source', 'destination', 'checksum_algo', 'checksum')
def yield_task_entry():
for task in batch_tasks:
lfile = task.subscription.file
lfn = lfile.lfn
source_pfn = source.to_pfn(lfn, 'gfal2')
dest_pfn = destination.to_pfn(lfn, 'gfal2')
if source_pfn is None or dest_pfn is None:
# either gfal2 is not supported or lfn could not be mapped
result[task] = False
continue
if self.checksum_algorithm:
checksum = (self.checksum_algorithm, str(lfile.checksum[self.checksum_index]))
else:
checksum = (None, None)
result[task] = True
yield (task.id, source_pfn, dest_pfn) + checksum
if not self._read_only:
sql = 'INSERT INTO `standalone_transfer_batches` (`batch_id`, `source_site`, `destination_site`) VALUES (%s, %s, %s)'
self.db.query(sql, batch_id, source.name, destination.name)
self.db.insert_many('standalone_transfer_tasks', fields, None, yield_task_entry())
LOG.debug('Inserted %d entries to standalone_transfer_tasks for batch %d.', len(batch_tasks), batch_id)
return result
def start_deletions(self, batch_id, batch_tasks): #override
if len(batch_tasks) == 0:
return {}
result = {}
# tasks should all have the same target site
site = batch_tasks[0].desubscription.site
fields = ('id', 'file')
def yield_task_entry():
for task in batch_tasks:
lfn = task.desubscription.file.lfn
pfn = site.to_pfn(lfn, 'gfal2')
if pfn is None:
# either gfal2 is not supported or lfn could not be mapped
result[task] = False
continue
result[task] = True
yield (task.id, pfn)
if not self._read_only:
sql = 'INSERT INTO `standalone_deletion_batches` (`batch_id`, `site`) VALUES (%s, %s)'
self.db.query(sql, batch_id, site.name)
self.db.insert_many('standalone_deletion_tasks', fields, None, yield_task_entry())
LOG.debug('Inserted %d entries to standalone_deletion_tasks for batch %d.', len(batch_tasks), batch_id)
return result
def cancel_transfers(self, task_ids): #override
return self._cancel(task_ids, 'transfer')
def cancel_deletions(self, task_ids): #override
return self._cancel(task_ids, 'deletion')
def cleanup(self): #override
sql = 'DELETE FROM f USING `standalone_transfer_tasks` AS f LEFT JOIN `transfer_tasks` AS t ON t.`id` = f.`id` WHERE t.`id` IS NULL'
self.db.query(sql)
sql = 'DELETE FROM f USING `standalone_deletion_tasks` AS f LEFT JOIN `deletion_tasks` AS t ON t.`id` = f.`id` WHERE t.`id` IS NULL'
self.db.query(sql)
sql = 'DELETE FROM f USING `standalone_transfer_batches` AS f LEFT JOIN `transfer_batches` AS t ON t.`id` = f.`batch_id` WHERE t.`id` IS NULL'
self.db.query(sql)
sql = 'DELETE FROM f USING `standalone_deletion_batches` AS f LEFT JOIN `deletion_batches` AS t ON t.`id` = f.`batch_id` WHERE t.`id` IS NULL'
self.db.query(sql)
# Delete the source tasks - caution: wipes out all tasks when switching the operation backend
sql = 'DELETE FROM t USING `transfer_tasks` AS t'
sql += ' LEFT JOIN `standalone_transfer_tasks` AS f ON f.`id` = t.`id`'
sql += ' WHERE f.`id` IS NULL'
self.db.query(sql)
sql = 'DELETE FROM t USING `deletion_tasks` AS t'
sql += ' LEFT JOIN `standalone_deletion_tasks` AS f ON f.`id` = t.`id`'
sql += ' WHERE f.`id` IS NULL'
self.db.query(sql)
def get_transfer_status(self, batch_id): #override
return self._get_status(batch_id, 'transfer')
def get_deletion_status(self, batch_id): #override
return self._get_status(batch_id, 'deletion')
def write_transfer_history(self, history_db, task_id, history_id): #override
pass
def write_deletion_history(self, history_db, task_id, history_id): #override
pass
def forget_transfer_status(self, task_id): #override
return self._forget_status(task_id, 'transfer')
def forget_deletion_status(self, task_id): #override
return self._forget_status(task_id, 'deletion')
def forget_transfer_batch(self, batch_id): #override
return self._forget_batch(batch_id, 'transfer')
def forget_deletion_batch(self, batch_id): #override
return self._forget_batch(batch_id, 'deletion')
def _cancel(self, task_ids, optype):
sql = 'UPDATE `standalone_{op}_tasks` SET `status` = \'cancelled\''.format(op = optype)
self.db.execute_many(sql, 'id', task_ids, ['`status` IN (\'new\', \'queued\')'])
def _get_status(self, batch_id, optype):
sql = 'SELECT q.`id`, a.`status`, a.`exitcode`, a.`message`, UNIX_TIMESTAMP(a.`start_time`), UNIX_TIMESTAMP(a.`finish_time`) FROM `standalone_{op}_tasks` AS a'
sql += ' INNER JOIN `{op}_tasks` AS q ON q.`id` = a.`id`'
sql += ' WHERE q.`batch_id` = %s'
sql = sql.format(op = optype)
return [(i, FileQuery.status_val(s), c, m, t, f) for (i, s, c, m, t, f) in self.db.xquery(sql, batch_id)]
def _forget_status(self, task_id, optype):
if self._read_only:
return
sql = 'DELETE FROM `standalone_{op}_tasks` WHERE `id` = %s'.format(op = optype)
self.db.query(sql, task_id)
def _forget_batch(self, batch_id, optype):
if self._read_only:
return
sql = 'DELETE FROM `standalone_{op}_batches` WHERE `batch_id` = %s'
self.db.query(sql.format(op = optype), batch_id)
| StarcoderdataPython |
1700551 | <filename>solarpv/training/s2/train_S2_unet.py<gh_stars>10-100
# Train S2-UNET
# built-in
import pickle, copy, logging, os, sys
# packages
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
# ML
import tensorflow as tf
from tensorflow.python import keras
from keras.layers import Input, Dense, Activation, Cropping2D, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, Concatenate
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D, UpSampling2D
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model, to_categorical, Sequence
from keras.optimizers import Adam
from keras.metrics import categorical_accuracy
from keras.models import model_from_json
from random import shuffle
from keras.callbacks import CSVLogger, Callback, ModelCheckpoint
import keras.backend as K
# conf
K.set_image_data_format('channels_last')
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
print ('gpu',tf.test.is_gpu_available())
#
class HistPlot(Callback):
def __init__(self, validation_generator, outfile='metrics_hist'):
self.validation_generator = validation_generator
self.batch_size = validation_generator.batch_size
self.outfile = outfile
#x,y = self.validation_generator.__getitem__(0)
#print ('nit shapes', x.shape, y.shape)
def on_epoch_end(self, epoch, logs={}):
#print (dir(model))
print (logs)
Y = np.zeros((200,200,200,2))
X = np.zeros((200,200,200,14))
for ii in range(10):
#print (ii)
x,y = self.validation_generator.__getitem__(ii)
X[ii*self.batch_size:(ii+1)*self.batch_size,:,:,:] = x
Y[ii*self.batch_size:(ii+1)*self.batch_size,:,:,:] = y
#print ('xy shape', x.shape, y.shape)
#print (X.shape, Y.shape)
Y_pred = self.model.predict_generator(self.validation_generator, steps=20)
### gen hist plot
if epoch >0:
print (self.model.history.history)
epochs = np.arange(len(self.model.history.history['val_loss']))
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(epochs, self.model.history.history['loss'], label='loss', color='red')
ax1.plot(epochs, self.model.history.history['val_loss'], label='val_loss', color='orange')
ax1.set_ylabel('loss')
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width, box.height*0.9])
ax2 = ax1.twinx()
ax2.plot(epochs, self.model.history.history['categorical_accuracy'], label='cat_accuracy', color='blue')
ax2.plot(epochs, self.model.history.history['val_categorical_accuracy'], label='val_cat_accuracy', color='green')
ax2.set_position([box.x0, box.y0, box.width, box.height*0.9])
h1,l1 = ax1.get_legend_handles_labels()
h2,l2 = ax2.get_legend_handles_labels()
ax1.legend(handles = h1+h2, labels=l1+l2, loc='upper center', bbox_to_anchor=(0.5, -0.07), ncol=4)
plt.savefig(self.outfile+'.png')
plt.close()
class DataGenerator(Sequence):
"""Generates data for Keras"""
def __init__(self, list_IDs, batch_size=32, dim=(32,32,32), n_channels=1,
n_classes=10, shuffle=True, augment=False):
""" Initialisation """
self.dim = dim
self.batch_size = batch_size
self.list_IDs = list_IDs
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
self.augment=augment
def __len__(self):
"""Denotes the number of batches per epoch"""
return int(np.ceil(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
"""Generate indexes of the batch"""
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
"""
Generates data containing batch_size samples of shape:
(n_samples, *dim, n_channels)
"""
# Initialization
X = np.zeros((self.batch_size, *self.dim))
y = np.zeros((self.batch_size, self.dim[0], self.dim[1]))#, dtype=int)
# Generate data
for i, ID in enumerate(list_IDs_temp):
# Store sample
if self.augment:
k_rot=np.random.choice([0,1,2,3])
k_flip = np.random.choice([0,1])
X[i,:,:,:] = np.flip(np.rot90(np.load(ID['data'])['data'], k_rot, axes=(0,1)),k_flip)
y[i,:,:] = np.flip(np.rot90(np.load(ID['data'])['annotation'].astype('int')/255, k_rot, axes=(0,1)),k_flip)
else:
X[i,:,:,:] = np.load(ID['data'])['data']
# Store class
y[i,:,:] = np.load(ID['data'])['annotation'].astype('int')/255
#print (y)
return X, to_categorical(y, num_classes=self.n_classes)
class TrainS2Unet:
def __init__(self, data_dir, outp_fname, trn_records_pickle, val_records_pickle=None):
"""
"""
# i/o
self.data_dir = data_dir
self.outp_fname = outp_fname
# training parameters
self.BATCH_SIZE = 2
self.N_CLASSES = 2
self.EPOCHS = 40
self.LEARNING_RATE = 1e-7
self.INPUT_SHAPE = (200,200,14)
# data records
self.trn_records = pickle.load(open(trn_records_pickle,'rb'))
self.crossval_split = 0.7
if not val_records_pickle:
val_indexes = np.random.choice(len(self.trn_records),int((1-self.crossval_split)*len(self.trn_records)))
self.val_records = [rr for ii,rr in enumerate(self.trn_records) if ii in val_indexes]
self.trn_records = [rr for ii,rr in enumerate(self.trn_records) if ii not in val_indexes]
else:
self.val_records = pickle.load(open(val_records,'rb'))
def train(self, model_def_file):
# Load the model
with open(model_def_file) as f:
model = model_from_json(f.read())
# compile the model
optimizer = Adam(lr=self.LEARNING_RATE)
model.compile(
optimizer=optimizer,
loss='binary_crossentropy',
metrics=[categorical_accuracy])
logging.info(model.summary())
# instantiate generators
trn_generator = DataGenerator(self.trn_records,
batch_size=self.BATCH_SIZE,
dim=self.INPUT_SHAPE,
n_channels=1,
n_classes=self.N_CLASSES,
shuffle=True,
augment=True)
val_generator = DataGenerator(self.val_records,
batch_size=self.BATCH_SIZE,
dim=self.INPUT_SHAPE,
n_channels=1,
n_classes=self.N_CLASSES,
shuffle=True,
augment=False)
# instantiate callbacks
csv_cb = CSVLogger('log.csv', append=True, separator=';')
hist_plot_cb = HistPlot(validation_generator=val_generator, outfile='trn_bootstrap_metrics')
chkpt_cb = ModelCheckpoint('model_progress.h5', period=5)
history = model.fit_generator(
generator=trn_generator,
validation_data=val_generator,
verbose=1,
epochs=self.EPOCHS,
callbacks=[csv_cb, hist_plot_cb,chkpt_cb])
model.save(self.outp_fname)
if __name__ == "__main__":
trn = TrainS2Unet(
data_dir=os.path.join(os.getcwd(),'data','S2_unet'),
outp_fname='s2_unet.h5',
trn_records_pickle=os.path.join(os.getcwd(),'data','S2_unet','records.pickle'))
trn.train(os.path.join(os.getcwd(),'solarpv','training','model_resunet.json'))
| StarcoderdataPython |
11396289 | import os
import scrapy
import pandas as pd
import numpy as np
from scrapy.http import Request
class DocdownloaderSpider(scrapy.Spider):
name = 'docdownloader'
print("Doc Downloader Constructor Called !!!")
final_df = pd.read_excel('./scrapy_equippo.xlsx')
docs_df = final_df[final_df['Documents for this vehicle 1'].notna()].reset_index(drop=True)
print("Done reading")
DOC_SAVE_DIR = './documents/'
all_documents = os.listdir((DOC_SAVE_DIR))
def start_requests(self):
yield scrapy.Request(url='https://www.google.com/', callback=self.parse_main_page,
dont_filter=True)
def save_pdf(self, response):
name = response.meta['name']
self.logger.info('Saving PDF %s', name)
with open(name, 'wb') as file:
file.write(response.body)
def parse_main_page(self, response):
total_length = len(self.docs_df)
for i in range(0, total_length):
for k in range(1, 7):
doc_name = self.docs_df['Documents for this vehicle ' + str(k)].iloc[i]
doc_link = self.docs_df['Documents Link ' + str(k)].iloc[i]
if doc_name is not np.nan and doc_name != '':
try:
length = len(doc_name)
if doc_name not in self.all_documents:
yield Request(
url=doc_link, callback=self.save_pdf, meta={'name': self.DOC_SAVE_DIR + doc_name}
)
except:
print("Exception: ", doc_name, " ", i)
pass
| StarcoderdataPython |
5026088 | <filename>sync_tester/tests/test_receive_sync_core.py
"""
This pytest module include e2e test of second part of core's synchronization -> receive tiles and create layer:
* From core A
* GW
"""
import logging
import os
import time
from datetime import datetime
from conftest import *
from sync_tester.configuration import config
from sync_tester.functions import executors
is_logger_init = False
_log = logging.getLogger('sync_tester.tests.test_receive_sync_core')
s3_credential = None
storage_provider = None
_log.info('Loading tests suite for receive sync services')
def test_receive_from_gw(receive_product_id=config.PRODUCT_ID_B, receive_product_version=config.PRODUCT_VERSION_B):
"""This test validate core's process of receive tiles and generate layer on core"""
# receive_product_id = config.PRODUCT_ID_B
# receive_product_version = config.PRODUCT_VERSION_B
assert (
receive_product_id and receive_product_version), f"Test: [{test_receive_from_gw.__name__}] Failed: Validation layer " \
f"details\n" \
f"related errors:\n" \
f"at least on of layer params missing: product_id: [{receive_product_id}], " \
f"product_version: [{receive_product_version}]"
# ======================================= Sync-received job task creation ==========================================
try:
criteria_params = {
'timeout': config.RECEIVE_JOB_FIND_TIMEOUT_B,
'product_id': receive_product_id,
'product_version': receive_product_version,
'job_type': config.JobTaskTypes.SYNC_TARGET.value,
'job_manager_url': config.JOB_MANAGER_ROUTE_CORE_B
}
resp = executors.creation_job_loop_follower(criteria_params)
# resp = executors.validate_sync_job_creation(receive_product_id,
# receive_product_version,
# config.JobTaskTypes.SYNC_TARGET.value,
# job_manager_url=config.JOB_MANAGER_ROUTE_CORE_B)
msg = resp['message']
sync_receive_job_state = resp['state']
sync_receive_job = resp['record']
except Exception as e:
sync_receive_job_state = False
msg = str(e)
assert sync_receive_job_state, f'Test: [{test_receive_from_gw.__name__}] Failed: Query for new receive sync job\n' \
f'related errors:\n' \
f'{msg}'
# ======================================= Sync receive job task follower ===========================================
sync_receive_job = sync_receive_job[0]
sync_receive_job_id = sync_receive_job['id']
sync_receive_job_metadata = sync_receive_job['parameters']
# cleanup_data['sync_job_id'] = sync_job_id # todo -> implement cleanup
try:
resp = executors.follow_sync_job(product_id=receive_product_id,
product_version=receive_product_version,
product_type=config.JobTaskTypes.SYNC_TARGET.value,
job_manager_url=config.JOB_MANAGER_ROUTE_CORE_B,
running_timeout=config.SYNC_TIMEOUT_B,
internal_timeout=config.BUFFER_TIMEOUT_CORE_B)
sync_receive_follow_state = resp['status']
msg = resp['message']
except Exception as e:
sync_receive_follow_state = False
msg = str(e)
assert sync_receive_follow_state, f'Test: [{test_receive_from_gw.__name__}]\n' \
f'Failed: Follow for sync receive job complete\n' \
f'related errors:\n' \
f'{msg}'
# ======================================== Sync receive tiles validator ============================================
tile_count_on_toc = sync_receive_job['tasks'][0]['parameters']['expectedTilesCount']
try:
tile_count_on_storage = executors.count_tiles_amount(receive_product_id, receive_product_version, core="B")
tile_count_state = tile_count_on_toc == tile_count_on_storage
msg = f'Tile count on toc: [{tile_count_on_toc}] | Tile count on Storage: [{tile_count_on_storage}]'
_log.info(f'Tile count on toc: [{tile_count_on_toc}] | Tile count on Storage: [{tile_count_on_storage}]')
except Exception as e:
tile_count_state = False
msg = str(e)
assert tile_count_state, f'Test: [{test_receive_from_gw.__name__}] Failed: tile count validation\n' \
f'related errors:\n' \
f'{msg}'
# =========================================== validate pycsw record ================================================
time.sleep(config.BUFFER_TIMEOUT_CORE_B)
try:
validation_dict, pycsw_records, links = executors.validate_metadata_pycsw(sync_receive_job_metadata,
receive_product_id,
receive_product_version,
config.PYCSW_URL_B,
config.PYCSW_GET_RASTER_RECORD_PARAMS_B)
pycsw_validation_state = validation_dict['validation']
msg = validation_dict['reason']
except Exception as e:
pycsw_validation_state = False
msg = str(e)
assert pycsw_validation_state, f'Test: [{test_receive_from_gw.__name__}] Failed: Validation of toc with pycsw\n' \
f'related errors:\n' \
f'{msg}'
# ========================================== validate mapproxy layer================================================
time.sleep(config.BUFFER_TIMEOUT_CORE_B)
try:
params = {'mapproxy_endpoint_url': config.MAPPROXY_ROUTE_CORE_B,
'tiles_storage_provide': config.TILES_PROVIDER_B,
'grid_origin': config.MAPPROXY_GRID_ORIGIN_B,
'nfs_tiles_url': config.NFS_TILES_DIR_B}
if config.TILES_PROVIDER_B.lower() == "s3":
params['endpoint_url'] = config.S3_ENDPOINT_URL_CORE_B
params['access_key'] = config.S3_ACCESS_KEY_CORE_B
params['secret_key'] = config.S3_SECRET_KEY_CORE_B
params['bucket_name'] = config.S3_BUCKET_NAME_CORE_B
result = executors.validate_mapproxy_layer(pycsw_records, receive_product_id, receive_product_version, params)
mapproxy_validation_state = result['validation']
msg = result['reason']
except Exception as e:
mapproxy_validation_state = False
msg = str(e)
assert mapproxy_validation_state, f'Test: [{test_receive_from_gw.__name__}] Failed: Validation of mapproxy urls\n' \
f'related errors:\n' \
f'{msg}'
def setup_module(module):
"""
base init of running tests
"""
init_logger()
def teardown_module(module): # pylint: disable=unused-argument
"""
This method been executed after test running - env cleaning
"""
# todo - future to be implemented after integration with danny
res = executors.clean_env(ValueStorage.discrete_list)
def init_logger():
global is_logger_init
if is_logger_init:
return
else:
is_logger_init = True
log_mode = config.DEBUG_LOG # Define if use debug+ mode logs -> default info+
file_log = config.LOG_TO_FILE # Define if write std out into file
log_output_path = config.LOG_OUTPUT_PATH # The directory to write log output
# init logger
logger = logging.getLogger()
if logger.hasHandlers():
logger.handlers.clear()
logger.setLevel(logging.DEBUG)
# define default handler to std out
ch = logging.StreamHandler()
# validate log level mode to define
if not log_mode:
ch.setLevel(logging.INFO)
else:
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# defining another handler to file on case it is been requested
if file_log:
log_file_name = ".".join([str(datetime.utcnow()), 'log']) # pylint: disable=invalid-name
fh = logging.FileHandler(os.path.join(log_output_path, log_file_name))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
# todo -> on prod should run from setup pytest
if config.DEBUG:
init_logger()
test_receive_from_gw()
| StarcoderdataPython |
261428 | <filename>lib/surface/compute/vpn_tunnels/list.py<gh_stars>0
# -*- coding: utf-8 -*- #
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for listing VPN tunnels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import filter_rewrite
from googlecloudsdk.api_lib.compute import lister
from googlecloudsdk.api_lib.compute.vpn_tunnels import vpn_tunnels_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.vpn_tunnels import flags
from googlecloudsdk.core import properties
class List(base.ListCommand):
"""List VPN tunnels."""
# Placeholder to indicate that a detailed_help field exists and should
# be set outside the class definition.
detailed_help = None
@staticmethod
def Args(parser):
parser.display_info.AddFormat(flags.DEFAULT_LIST_FORMAT)
lister.AddRegionsArg(parser)
parser.display_info.AddCacheUpdater(flags.VpnTunnelsCompleter)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
helper = vpn_tunnels_utils.VpnTunnelHelper(holder)
project = properties.VALUES.core.project.GetOrFail()
args.filter, filter_expr = filter_rewrite.Rewriter().Rewrite(args.filter)
return helper.List(project=project, filter_expr=filter_expr)
List.detailed_help = base_classes.GetRegionalListerHelp('VPN tunnels')
| StarcoderdataPython |
12805706 | from ._core import make_figure
from ._doc import make_docstring
import plotly.graph_objs as go
_wide_mode_xy_append = [
"Either `x` or `y` can optionally be a list of column references or array_likes, ",
"in which case the data will be treated as if it were 'wide' rather than 'long'.",
]
_cartesian_append_dict = dict(x=_wide_mode_xy_append, y=_wide_mode_xy_append)
def scatter(
data_frame=None,
x=None,
y=None,
color=None,
symbol=None,
size=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
error_x=None,
error_x_minus=None,
error_y=None,
error_y_minus=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
orientation=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
symbol_sequence=None,
symbol_map=None,
opacity=None,
size_max=None,
marginal_x=None,
marginal_y=None,
trendline=None,
trendline_color_override=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
render_mode="auto",
title=None,
template=None,
width=None,
height=None,
):
"""
In a scatter plot, each row of `data_frame` is represented by a symbol
mark in 2D space.
"""
return make_figure(args=locals(), constructor=go.Scatter)
scatter.__doc__ = make_docstring(scatter, append_dict=_cartesian_append_dict)
def density_contour(
data_frame=None,
x=None,
y=None,
z=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
orientation=None,
color_discrete_sequence=None,
color_discrete_map=None,
marginal_x=None,
marginal_y=None,
trendline=None,
trendline_color_override=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
histfunc=None,
histnorm=None,
nbinsx=None,
nbinsy=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a density contour plot, rows of `data_frame` are grouped together
into contour marks to visualize the 2D distribution of an aggregate
function `histfunc` (e.g. the count or sum) of the value `z`.
"""
return make_figure(
args=locals(),
constructor=go.Histogram2dContour,
trace_patch=dict(
contours=dict(coloring="none"),
histfunc=histfunc,
histnorm=histnorm,
nbinsx=nbinsx,
nbinsy=nbinsy,
xbingroup="x",
ybingroup="y",
),
)
density_contour.__doc__ = make_docstring(
density_contour,
append_dict=dict(
x=_wide_mode_xy_append,
y=_wide_mode_xy_append,
z=[
"For `density_heatmap` and `density_contour` these values are used as the inputs to `histfunc`.",
],
histfunc=["The arguments to this function are the values of `z`."],
),
)
def density_heatmap(
data_frame=None,
x=None,
y=None,
z=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
orientation=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
marginal_x=None,
marginal_y=None,
opacity=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
histfunc=None,
histnorm=None,
nbinsx=None,
nbinsy=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a density heatmap, rows of `data_frame` are grouped together into
colored rectangular tiles to visualize the 2D distribution of an
aggregate function `histfunc` (e.g. the count or sum) of the value `z`.
"""
return make_figure(
args=locals(),
constructor=go.Histogram2d,
trace_patch=dict(
histfunc=histfunc,
histnorm=histnorm,
nbinsx=nbinsx,
nbinsy=nbinsy,
xbingroup="x",
ybingroup="y",
),
)
density_heatmap.__doc__ = make_docstring(
density_heatmap,
append_dict=dict(
x=_wide_mode_xy_append,
y=_wide_mode_xy_append,
z=[
"For `density_heatmap` and `density_contour` these values are used as the inputs to `histfunc`.",
],
histfunc=["The arguments to this function are the values of `z`.",],
),
)
def line(
data_frame=None,
x=None,
y=None,
line_group=None,
color=None,
line_dash=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
error_x=None,
error_x_minus=None,
error_y=None,
error_y_minus=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
orientation=None,
color_discrete_sequence=None,
color_discrete_map=None,
line_dash_sequence=None,
line_dash_map=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
line_shape=None,
render_mode="auto",
title=None,
template=None,
width=None,
height=None,
):
"""
In a 2D line plot, each row of `data_frame` is represented as vertex of
a polyline mark in 2D space.
"""
return make_figure(args=locals(), constructor=go.Scatter)
line.__doc__ = make_docstring(line, append_dict=_cartesian_append_dict)
def area(
data_frame=None,
x=None,
y=None,
line_group=None,
color=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
orientation=None,
groupnorm=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
line_shape=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a stacked area plot, each row of `data_frame` is represented as
vertex of a polyline mark in 2D space. The area between successive
polylines is filled.
"""
return make_figure(
args=locals(),
constructor=go.Scatter,
trace_patch=dict(stackgroup=1, mode="lines", groupnorm=groupnorm),
)
area.__doc__ = make_docstring(area, append_dict=_cartesian_append_dict)
def bar(
data_frame=None,
x=None,
y=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
base=None,
error_x=None,
error_x_minus=None,
error_y=None,
error_y_minus=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
opacity=None,
orientation=None,
barmode="relative",
log_x=False,
log_y=False,
range_x=None,
range_y=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a bar plot, each row of `data_frame` is represented as a rectangular
mark.
"""
return make_figure(
args=locals(),
constructor=go.Bar,
trace_patch=dict(textposition="auto"),
layout_patch=dict(barmode=barmode),
)
bar.__doc__ = make_docstring(bar, append_dict=_cartesian_append_dict)
def timeline(
data_frame=None,
x_start=None,
x_end=None,
y=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
opacity=None,
range_x=None,
range_y=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a timeline plot, each row of `data_frame` is represented as a rectangular
mark on an x axis of type `date`, spanning from `x_start` to `x_end`.
"""
return make_figure(
args=locals(),
constructor="timeline",
trace_patch=dict(textposition="auto", orientation="h"),
layout_patch=dict(barmode="overlay"),
)
timeline.__doc__ = make_docstring(timeline)
def histogram(
data_frame=None,
x=None,
y=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
marginal=None,
opacity=None,
orientation=None,
barmode="relative",
barnorm=None,
histnorm=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
histfunc=None,
cumulative=None,
nbins=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a histogram, rows of `data_frame` are grouped together into a
rectangular mark to visualize the 1D distribution of an aggregate
function `histfunc` (e.g. the count or sum) of the value `y` (or `x` if
`orientation` is `'h'`).
"""
return make_figure(
args=locals(),
constructor=go.Histogram,
trace_patch=dict(
histnorm=histnorm, histfunc=histfunc, cumulative=dict(enabled=cumulative),
),
layout_patch=dict(barmode=barmode, barnorm=barnorm),
)
histogram.__doc__ = make_docstring(
histogram,
append_dict=dict(
x=["If `orientation` is `'h'`, these values are used as inputs to `histfunc`."]
+ _wide_mode_xy_append,
y=["If `orientation` is `'v'`, these values are used as inputs to `histfunc`."]
+ _wide_mode_xy_append,
histfunc=[
"The arguments to this function are the values of `y`(`x`) if `orientation` is `'v'`(`'h'`).",
],
),
)
def violin(
data_frame=None,
x=None,
y=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
orientation=None,
violinmode=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
points=None,
box=False,
title=None,
template=None,
width=None,
height=None,
):
"""
In a violin plot, rows of `data_frame` are grouped together into a
curved mark to visualize their distribution.
"""
return make_figure(
args=locals(),
constructor=go.Violin,
trace_patch=dict(
points=points, box=dict(visible=box), scalegroup=True, x0=" ", y0=" ",
),
layout_patch=dict(violinmode=violinmode),
)
violin.__doc__ = make_docstring(violin, append_dict=_cartesian_append_dict)
def box(
data_frame=None,
x=None,
y=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
orientation=None,
boxmode=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
points=None,
notched=False,
title=None,
template=None,
width=None,
height=None,
):
"""
In a box plot, rows of `data_frame` are grouped together into a
box-and-whisker mark to visualize their distribution.
Each box spans from quartile 1 (Q1) to quartile 3 (Q3). The second
quartile (Q2) is marked by a line inside the box. By default, the
whiskers correspond to the box' edges +/- 1.5 times the interquartile
range (IQR: Q3-Q1), see "points" for other options.
"""
return make_figure(
args=locals(),
constructor=go.Box,
trace_patch=dict(boxpoints=points, notched=notched, x0=" ", y0=" "),
layout_patch=dict(boxmode=boxmode),
)
box.__doc__ = make_docstring(box, append_dict=_cartesian_append_dict)
def strip(
data_frame=None,
x=None,
y=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
orientation=None,
stripmode=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a strip plot each row of `data_frame` is represented as a jittered
mark within categories.
"""
return make_figure(
args=locals(),
constructor=go.Box,
trace_patch=dict(
boxpoints="all",
pointpos=0,
hoveron="points",
fillcolor="rgba(255,255,255,0)",
line={"color": "rgba(255,255,255,0)"},
x0=" ",
y0=" ",
),
layout_patch=dict(boxmode=stripmode),
)
strip.__doc__ = make_docstring(strip, append_dict=_cartesian_append_dict)
def scatter_3d(
data_frame=None,
x=None,
y=None,
z=None,
color=None,
symbol=None,
size=None,
text=None,
hover_name=None,
hover_data=None,
custom_data=None,
error_x=None,
error_x_minus=None,
error_y=None,
error_y_minus=None,
error_z=None,
error_z_minus=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
size_max=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
symbol_sequence=None,
symbol_map=None,
opacity=None,
log_x=False,
log_y=False,
log_z=False,
range_x=None,
range_y=None,
range_z=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a 3D scatter plot, each row of `data_frame` is represented by a
symbol mark in 3D space.
"""
return make_figure(args=locals(), constructor=go.Scatter3d)
scatter_3d.__doc__ = make_docstring(scatter_3d)
def line_3d(
data_frame=None,
x=None,
y=None,
z=None,
color=None,
line_dash=None,
text=None,
line_group=None,
hover_name=None,
hover_data=None,
custom_data=None,
error_x=None,
error_x_minus=None,
error_y=None,
error_y_minus=None,
error_z=None,
error_z_minus=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
line_dash_sequence=None,
line_dash_map=None,
log_x=False,
log_y=False,
log_z=False,
range_x=None,
range_y=None,
range_z=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a 3D line plot, each row of `data_frame` is represented as vertex of
a polyline mark in 3D space.
"""
return make_figure(args=locals(), constructor=go.Scatter3d)
line_3d.__doc__ = make_docstring(line_3d)
def scatter_ternary(
data_frame=None,
a=None,
b=None,
c=None,
color=None,
symbol=None,
size=None,
text=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
symbol_sequence=None,
symbol_map=None,
opacity=None,
size_max=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a ternary scatter plot, each row of `data_frame` is represented by a
symbol mark in ternary coordinates.
"""
return make_figure(args=locals(), constructor=go.Scatterternary)
scatter_ternary.__doc__ = make_docstring(scatter_ternary)
def line_ternary(
data_frame=None,
a=None,
b=None,
c=None,
color=None,
line_dash=None,
line_group=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
line_dash_sequence=None,
line_dash_map=None,
line_shape=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a ternary line plot, each row of `data_frame` is represented as
vertex of a polyline mark in ternary coordinates.
"""
return make_figure(args=locals(), constructor=go.Scatterternary)
line_ternary.__doc__ = make_docstring(line_ternary)
def scatter_polar(
data_frame=None,
r=None,
theta=None,
color=None,
symbol=None,
size=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
symbol_sequence=None,
symbol_map=None,
opacity=None,
direction="clockwise",
start_angle=90,
size_max=None,
range_r=None,
range_theta=None,
log_r=False,
render_mode="auto",
title=None,
template=None,
width=None,
height=None,
):
"""
In a polar scatter plot, each row of `data_frame` is represented by a
symbol mark in polar coordinates.
"""
return make_figure(args=locals(), constructor=go.Scatterpolar)
scatter_polar.__doc__ = make_docstring(scatter_polar)
def line_polar(
data_frame=None,
r=None,
theta=None,
color=None,
line_dash=None,
hover_name=None,
hover_data=None,
custom_data=None,
line_group=None,
text=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
line_dash_sequence=None,
line_dash_map=None,
direction="clockwise",
start_angle=90,
line_close=False,
line_shape=None,
render_mode="auto",
range_r=None,
range_theta=None,
log_r=False,
title=None,
template=None,
width=None,
height=None,
):
"""
In a polar line plot, each row of `data_frame` is represented as vertex
of a polyline mark in polar coordinates.
"""
return make_figure(args=locals(), constructor=go.Scatterpolar)
line_polar.__doc__ = make_docstring(line_polar)
def bar_polar(
data_frame=None,
r=None,
theta=None,
color=None,
hover_name=None,
hover_data=None,
custom_data=None,
base=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
barnorm=None,
barmode="relative",
direction="clockwise",
start_angle=90,
range_r=None,
range_theta=None,
log_r=False,
title=None,
template=None,
width=None,
height=None,
):
"""
In a polar bar plot, each row of `data_frame` is represented as a wedge
mark in polar coordinates.
"""
return make_figure(
args=locals(),
constructor=go.Barpolar,
layout_patch=dict(barnorm=barnorm, barmode=barmode),
)
bar_polar.__doc__ = make_docstring(bar_polar)
def choropleth(
data_frame=None,
lat=None,
lon=None,
locations=None,
locationmode=None,
geojson=None,
featureidkey=None,
color=None,
facet_row=None,
facet_col=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
projection=None,
scope=None,
center=None,
fitbounds=None,
basemap_visible=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a choropleth map, each row of `data_frame` is represented by a
colored region mark on a map.
"""
return make_figure(
args=locals(),
constructor=go.Choropleth,
trace_patch=dict(locationmode=locationmode),
)
choropleth.__doc__ = make_docstring(choropleth)
def scatter_geo(
data_frame=None,
lat=None,
lon=None,
locations=None,
locationmode=None,
geojson=None,
featureidkey=None,
color=None,
text=None,
symbol=None,
facet_row=None,
facet_col=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
size=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
symbol_sequence=None,
symbol_map=None,
opacity=None,
size_max=None,
projection=None,
scope=None,
center=None,
fitbounds=None,
basemap_visible=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a geographic scatter plot, each row of `data_frame` is represented
by a symbol mark on a map.
"""
return make_figure(
args=locals(),
constructor=go.Scattergeo,
trace_patch=dict(locationmode=locationmode),
)
scatter_geo.__doc__ = make_docstring(scatter_geo)
def line_geo(
data_frame=None,
lat=None,
lon=None,
locations=None,
locationmode=None,
geojson=None,
featureidkey=None,
color=None,
line_dash=None,
text=None,
facet_row=None,
facet_col=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
line_group=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
line_dash_sequence=None,
line_dash_map=None,
projection=None,
scope=None,
center=None,
fitbounds=None,
basemap_visible=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a geographic line plot, each row of `data_frame` is represented as
vertex of a polyline mark on a map.
"""
return make_figure(
args=locals(),
constructor=go.Scattergeo,
trace_patch=dict(locationmode=locationmode),
)
line_geo.__doc__ = make_docstring(line_geo)
def scatter_mapbox(
data_frame=None,
lat=None,
lon=None,
color=None,
text=None,
hover_name=None,
hover_data=None,
custom_data=None,
size=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
opacity=None,
size_max=None,
zoom=8,
center=None,
mapbox_style=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a Mapbox scatter plot, each row of `data_frame` is represented by a
symbol mark on a Mapbox map.
"""
return make_figure(args=locals(), constructor=go.Scattermapbox)
scatter_mapbox.__doc__ = make_docstring(scatter_mapbox)
def choropleth_mapbox(
data_frame=None,
geojson=None,
featureidkey=None,
locations=None,
color=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
opacity=None,
zoom=8,
center=None,
mapbox_style=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a Mapbox choropleth map, each row of `data_frame` is represented by a
colored region on a Mapbox map.
"""
return make_figure(args=locals(), constructor=go.Choroplethmapbox)
choropleth_mapbox.__doc__ = make_docstring(choropleth_mapbox)
def density_mapbox(
data_frame=None,
lat=None,
lon=None,
z=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
opacity=None,
zoom=8,
center=None,
mapbox_style=None,
radius=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a Mapbox density map, each row of `data_frame` contributes to the intensity of
the color of the region around the corresponding point on the map
"""
return make_figure(
args=locals(), constructor=go.Densitymapbox, trace_patch=dict(radius=radius)
)
density_mapbox.__doc__ = make_docstring(density_mapbox)
def line_mapbox(
data_frame=None,
lat=None,
lon=None,
color=None,
text=None,
hover_name=None,
hover_data=None,
custom_data=None,
line_group=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
zoom=8,
center=None,
mapbox_style=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a Mapbox line plot, each row of `data_frame` is represented as
vertex of a polyline mark on a Mapbox map.
"""
return make_figure(args=locals(), constructor=go.Scattermapbox)
line_mapbox.__doc__ = make_docstring(line_mapbox)
def scatter_matrix(
data_frame=None,
dimensions=None,
color=None,
symbol=None,
size=None,
hover_name=None,
hover_data=None,
custom_data=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
symbol_sequence=None,
symbol_map=None,
opacity=None,
size_max=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a scatter plot matrix (or SPLOM), each row of `data_frame` is
represented by a multiple symbol marks, one in each cell of a grid of
2D scatter plots, which plot each pair of `dimensions` against each
other.
"""
return make_figure(
args=locals(), constructor=go.Splom, layout_patch=dict(dragmode="select")
)
scatter_matrix.__doc__ = make_docstring(scatter_matrix)
def parallel_coordinates(
data_frame=None,
dimensions=None,
color=None,
labels=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a parallel coordinates plot, each row of `data_frame` is represented
by a polyline mark which traverses a set of parallel axes, one for each
of the `dimensions`.
"""
return make_figure(args=locals(), constructor=go.Parcoords)
parallel_coordinates.__doc__ = make_docstring(parallel_coordinates)
def parallel_categories(
data_frame=None,
dimensions=None,
color=None,
labels=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
title=None,
template=None,
width=None,
height=None,
dimensions_max_cardinality=50,
):
"""
In a parallel categories (or parallel sets) plot, each row of
`data_frame` is grouped with other rows that share the same values of
`dimensions` and then plotted as a polyline mark through a set of
parallel axes, one for each of the `dimensions`.
"""
return make_figure(args=locals(), constructor=go.Parcats)
parallel_categories.__doc__ = make_docstring(parallel_categories)
def pie(
data_frame=None,
names=None,
values=None,
color=None,
color_discrete_sequence=None,
color_discrete_map=None,
hover_name=None,
hover_data=None,
custom_data=None,
labels=None,
title=None,
template=None,
width=None,
height=None,
opacity=None,
hole=None,
):
"""
In a pie plot, each row of `data_frame` is represented as a sector of a
pie.
"""
if color_discrete_sequence is not None:
layout_patch = {"piecolorway": color_discrete_sequence}
else:
layout_patch = {}
return make_figure(
args=locals(),
constructor=go.Pie,
trace_patch=dict(showlegend=(names is not None), hole=hole),
layout_patch=layout_patch,
)
pie.__doc__ = make_docstring(
pie,
override_dict=dict(
hole=[
"float",
"Sets the fraction of the radius to cut out of the pie."
"Use this to make a donut chart.",
],
),
)
def sunburst(
data_frame=None,
names=None,
values=None,
parents=None,
path=None,
ids=None,
color=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
color_discrete_sequence=None,
color_discrete_map=None,
hover_name=None,
hover_data=None,
custom_data=None,
labels=None,
title=None,
template=None,
width=None,
height=None,
branchvalues=None,
maxdepth=None,
):
"""
A sunburst plot represents hierarchial data as sectors laid out over
several levels of concentric rings.
"""
if color_discrete_sequence is not None:
layout_patch = {"sunburstcolorway": color_discrete_sequence}
else:
layout_patch = {}
if path is not None and (ids is not None or parents is not None):
raise ValueError(
"Either `path` should be provided, or `ids` and `parents`."
"These parameters are mutually exclusive and cannot be passed together."
)
if path is not None and branchvalues is None:
branchvalues = "total"
return make_figure(
args=locals(),
constructor=go.Sunburst,
trace_patch=dict(branchvalues=branchvalues, maxdepth=maxdepth),
layout_patch=layout_patch,
)
sunburst.__doc__ = make_docstring(sunburst)
def treemap(
data_frame=None,
names=None,
values=None,
parents=None,
ids=None,
path=None,
color=None,
color_continuous_scale=None,
range_color=None,
color_continuous_midpoint=None,
color_discrete_sequence=None,
color_discrete_map=None,
hover_name=None,
hover_data=None,
custom_data=None,
labels=None,
title=None,
template=None,
width=None,
height=None,
branchvalues=None,
maxdepth=None,
):
"""
A treemap plot represents hierarchial data as nested rectangular
sectors.
"""
if color_discrete_sequence is not None:
layout_patch = {"treemapcolorway": color_discrete_sequence}
else:
layout_patch = {}
if path is not None and (ids is not None or parents is not None):
raise ValueError(
"Either `path` should be provided, or `ids` and `parents`."
"These parameters are mutually exclusive and cannot be passed together."
)
if path is not None and branchvalues is None:
branchvalues = "total"
return make_figure(
args=locals(),
constructor=go.Treemap,
trace_patch=dict(branchvalues=branchvalues, maxdepth=maxdepth),
layout_patch=layout_patch,
)
treemap.__doc__ = make_docstring(treemap)
def funnel(
data_frame=None,
x=None,
y=None,
color=None,
facet_row=None,
facet_col=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
opacity=None,
orientation=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
title=None,
template=None,
width=None,
height=None,
):
"""
In a funnel plot, each row of `data_frame` is represented as a
rectangular sector of a funnel.
"""
return make_figure(args=locals(), constructor=go.Funnel)
funnel.__doc__ = make_docstring(funnel, append_dict=_cartesian_append_dict)
def funnel_area(
data_frame=None,
names=None,
values=None,
color=None,
color_discrete_sequence=None,
color_discrete_map=None,
hover_name=None,
hover_data=None,
custom_data=None,
labels=None,
title=None,
template=None,
width=None,
height=None,
opacity=None,
):
"""
In a funnel area plot, each row of `data_frame` is represented as a
trapezoidal sector of a funnel.
"""
if color_discrete_sequence is not None:
layout_patch = {"funnelareacolorway": color_discrete_sequence}
else:
layout_patch = {}
return make_figure(
args=locals(),
constructor=go.Funnelarea,
trace_patch=dict(showlegend=(names is not None)),
layout_patch=layout_patch,
)
funnel_area.__doc__ = make_docstring(funnel_area)
| StarcoderdataPython |
9671894 | <filename>lib/game/ui/elements/coop_play.py<gh_stars>10-100
from lib.game.ui.general import UIElement, Rect, load_ui_image
COOP_PLAY_LABEL = UIElement(name='COOP_PLAY_LABEL')
COOP_PLAY_LABEL.description = "CO-OP label in mission selection."
COOP_PLAY_LABEL.text_rect = Rect(0.07857142857142857, 0.29733163913595934, 0.18428571428571427, 0.3392630241423126)
COOP_PLAY_LABEL.text = "CO-OP PLAY"
COOP_PLAY_LABEL.text_threshold = 150
COOP_PLAY_MENU_LABEL = UIElement(name='COOP_PLAY_MENU_LABEL')
COOP_PLAY_MENU_LABEL.description = "CO-OP LOBBY label at the top left corner of CO-OP menu."
COOP_PLAY_MENU_LABEL.text_rect = Rect(0.05142857142857143, 0.021601016518424398, 0.125, 0.07115628970775095)
COOP_PLAY_MENU_LABEL.text = "LOBBY"
COOP_PLAY_MENU_LABEL.text_threshold = 150
COOP_STAGE_PERCENTAGE = UIElement(name='COOP_STAGE_PERCENTAGE')
COOP_STAGE_PERCENTAGE.description = "CO-OP stage percentage."
COOP_STAGE_PERCENTAGE.text_rect = Rect(0.702614427911823, 0.7783171556004411, 0.7342956657932411, 0.8003159445711723)
COOP_STAGE_PERCENTAGE.text_threshold = 160
COOP_STAGE_PERCENTAGE.available_characters = "0123456789%"
COOP_DEPLOY_CHARACTER = UIElement(name='COOP_DEPLOY_CHARACTER')
COOP_DEPLOY_CHARACTER.description = "Deploy character message that shown if you haven't deployed a character."
COOP_DEPLOY_CHARACTER.text_rect = Rect(0.3942857142857143, 0.3875476493011436, 0.6078571428571429, 0.4485387547649301)
COOP_DEPLOY_CHARACTER.text = "Deploy a Character."
COOP_DEPLOY_CHARACTER.text_threshold = 120
COOP_FIRST_CHAR = UIElement(name='COOP_FIRST_CHAR')
COOP_FIRST_CHAR.description = "First available character to deploy."
COOP_FIRST_CHAR.button_rect = Rect(0.023443872902515484, 0.7888071323460268, 0.10569273969590628, 0.9403444384380925)
COOP_START_BUTTON = UIElement(name='COOP_START_BUTTON')
COOP_START_BUTTON.description = "COOP start button when character is deployed."
COOP_START_BUTTON.text_rect = Rect(0.8578960123700078, 0.8805270807701718, 0.9446311809884927, 0.9469908115123059)
COOP_START_BUTTON.button_rect = Rect(0.8578960123700078, 0.8805270807701718, 0.9446311809884927, 0.9469908115123059)
COOP_START_BUTTON.text = "START"
COOP_START_BUTTON.text_threshold = 150
COOP_START_BUTTON_INACTIVE = UIElement(name='COOP_START_BUTTON_INACTIVE')
COOP_START_BUTTON_INACTIVE.description = "COOP start button when no character was deployed."
COOP_START_BUTTON_INACTIVE.text_rect = Rect(0.8578960123700078, 0.8805270807701718, 0.9446311809884927, 0.9469908115123059)
COOP_START_BUTTON_INACTIVE.button_rect = Rect(0.8578960123700078, 0.8805270807701718, 0.9446311809884927, 0.9469908115123059)
COOP_START_BUTTON_INACTIVE.text = "START"
COOP_START_BUTTON_INACTIVE.text_threshold = 20
COOP_REWARD = UIElement(name='COOP_REWARD')
COOP_REWARD.description = "COOP get reward button."
COOP_REWARD.text_rect = Rect(0.8242487486818023, 0.8818563553850144, 0.9820170295309432, 0.9469908115123059)
COOP_REWARD.button_rect = Rect(0.8242487486818023, 0.8818563553850144, 0.9820170295309432, 0.9469908115123059)
COOP_REWARD.text = "Reward Acquired"
COOP_REWARD.text_threshold = 150
COOP_REWARD_ACQUIRE = UIElement(name='COOP_REWARD_ACQUIRE')
COOP_REWARD_ACQUIRE.description = "COOP reward acquire windows with OK button to close it."
COOP_REWARD_ACQUIRE.text_rect = Rect(0.47207405541191994, 0.7595630908194879, 0.5266573942838976, 0.8207097231022513)
COOP_REWARD_ACQUIRE.button_rect = Rect(0.47207405541191994, 0.7595630908194879, 0.5266573942838976, 0.8207097231022513)
COOP_REWARD_ACQUIRE.text = "OK"
COOP_REWARD_ACQUIRE.text_threshold = 120
COOP_REWARD_ACQUIRE.available_characters = "OK"
COOP_REWARD_ACQUIRE_CONFIRM = UIElement(name='COOP_REWARD_ACQUIRE_CONFIRM')
COOP_REWARD_ACQUIRE_CONFIRM.description = "COOP reward acquire confirm window (for first reward)."
COOP_REWARD_ACQUIRE_CONFIRM.text_rect = Rect(0.5354531186506587, 0.7239043166136473, 0.6210184720082992, 0.7685172025445903)
COOP_REWARD_ACQUIRE_CONFIRM.button_rect = Rect(0.5354531186506587, 0.7239043166136473, 0.6210184720082992, 0.7685172025445903)
COOP_REWARD_ACQUIRE_CONFIRM.text = "ACQUIRE"
COOP_REWARD_ACQUIRE_CONFIRM.text_threshold = 160
COOP_REWARD_ACQUIRE_CONFIRM_TICKETS = UIElement(name='COOP_REWARD_ACQUIRE_CONFIRM_TICKETS')
COOP_REWARD_ACQUIRE_CONFIRM_TICKETS.description = "COOP reward acquire confirm window with tickets (for others reward)."
COOP_REWARD_ACQUIRE_CONFIRM_TICKETS.text_rect = Rect(0.6, 0.7331639135959339, 0.6785714285714286, 0.7776365946632783)
COOP_REWARD_ACQUIRE_CONFIRM_TICKETS.button_rect = Rect(0.4835714285714286, 0.7242693773824651, 0.6835714285714286, 0.7801778907242694)
COOP_REWARD_ACQUIRE_CONFIRM_TICKETS.text = "ACQUIRE"
COOP_REWARD_ACQUIRE_CONFIRM_TICKETS.text_threshold = 160
COOP_REPEAT_TOGGLE = UIElement(name='COOP_REPEAT_TOGGLE')
COOP_REPEAT_TOGGLE.description = "Toggle for repeat."
COOP_REPEAT_TOGGLE.image_rect = Rect(0.8307291666666666, 0.7731481481481481, 0.8505208333333333, 0.8083333333333333)
COOP_REPEAT_TOGGLE.button_rect = Rect(0.8307291666666666, 0.7731481481481481, 0.8505208333333333, 0.8083333333333333)
COOP_REPEAT_TOGGLE.image_threshold = 0.7
COOP_REPEAT_TOGGLE.image = load_ui_image("repeat_toggle.png")
COOP_QUICK_MATCH_TOGGLE = UIElement(name='COOP_QUICK_MATCH_TOGGLE')
COOP_QUICK_MATCH_TOGGLE.description = "Toggle for quick match."
COOP_QUICK_MATCH_TOGGLE.image_rect = Rect(0.8307291666666666, 0.8194444444444444, 0.8505208333333333, 0.8546296296296296)
COOP_QUICK_MATCH_TOGGLE.button_rect = Rect(0.8307291666666666, 0.8194444444444444, 0.8505208333333333, 0.8546296296296296)
COOP_QUICK_MATCH_TOGGLE.image_threshold = 0.7
COOP_QUICK_MATCH_TOGGLE.image = load_ui_image("repeat_toggle.png")
| StarcoderdataPython |
9653493 | import time
from PyQt5.QtCore import QObject, QThread, pyqtSlot, pyqtSignal
from PyQt5.QtWidgets import QApplication
class LoopTrigger(QObject):
"""
This class is used to synchronize the plotting of all of the graphs in a
similar way than QTimer, but it waits until all the plots has been painted.
"""
sigUpdate=pyqtSignal()
def __init__(self,semaphore, minDelay=0.1):
super().__init__()
self.minDelay = minDelay
self.doLoop = True
self.app= QApplication.instance()
self.semaphore=semaphore
@pyqtSlot()
def loop(self):
while self.doLoop:
t = time.time() + self.minDelay
self.sigUpdate.emit()
self.semaphore.acquire(1)
waitTime = t - time.time()
if waitTime > 0:
time.sleep(waitTime)
self.app.processEvents()
@pyqtSlot()
def stop(self):
self.doLoop = False
| StarcoderdataPython |
6484564 | <gh_stars>0
"""Binary Tree Inorder Traversal"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
## Practice:
res = []
self.dfs(root, res)
return res
def dfs(self, node, res):
if not node:
return
self.dfs(node.left, res)
res.append(node.val)
self.dfs(node.right, res)
# iterative:
curr = root
stack = []
res = []
while True:
while curr:
stack.append(curr)
curr = curr.left
if not stack:
return res
node = stack.pop()
res.append(node.val)
curr = node.right
return res
# recursive
res = []
self.dfs(root, res)
return res
def dfs(self, node, res):
if node:
self.dfs(node.left, res)
res.append(node.val)
self.dfs(node.right, res)
# iterative
res = []
stack = []
while True:
while root:
stack.append(root)
root = root.left
if not stack:
return res
root = stack.pop()
res.append(root.val)
root = root.right
| StarcoderdataPython |
3375024 | <gh_stars>0
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.airflow.airflow_adapter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import mock
import tensorflow as tf
from tfx.components.base import base_driver
from tfx.components.base import base_executor
from tfx.orchestration import data_types
from tfx.orchestration.airflow import airflow_adapter
from tfx.orchestration.airflow import airflow_component
from tfx.utils import logging_utils
from tfx.utils import types
class AirflowAdapterTest(tf.test.TestCase):
def setUp(self):
self.input_one = types.TfxArtifact('INPUT_ONE')
self.input_one.source = airflow_component._OrchestrationSource(
'input_one_key', 'input_one_component_id')
self.output_one = types.TfxArtifact('OUTPUT_ONE')
self.output_one.source = airflow_component._OrchestrationSource(
'output_one_key', 'output_one_component_id')
self.input_one_json = json.dumps([self.input_one.json_dict()])
self.output_one_json = json.dumps([self.output_one.json_dict()])
self._logger_config = logging_utils.LoggerConfig()
def _setup_mocks(self, mock_metadata_class, mock_driver_class,
mock_executor_class, mock_get_logger):
self._setup_mock_driver(mock_driver_class)
self._setup_mock_executor(mock_executor_class)
self._setup_mock_metadata(mock_metadata_class)
self._setup_mock_task_instance()
self._mock_get_logger = mock_get_logger
def _setup_adapter_and_args(self):
input_dict = {u'input_one': [self.input_one]}
output_dict = {u'output_one': [self.output_one]}
exec_properties = {}
driver_args = {}
adapter = airflow_adapter.AirflowAdapter(
component_name='TfxComponent',
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
driver_class=base_driver.BaseDriver,
executor_class=base_executor.BaseExecutor,
driver_args=driver_args,
additional_pipeline_args=None,
metadata_connection_config='metadata_connection_config',
logger_config=self._logger_config)
return adapter, input_dict, output_dict, exec_properties, driver_args
def _setup_mock_metadata(self, mock_metadata_class):
mock_metadata = mock.Mock()
enter_mock = mock.Mock()
exit_mock = mock.Mock()
mock_metadata_class.return_value = mock_metadata
mock_metadata.__enter__ = enter_mock
mock_metadata.__exit__ = exit_mock
enter_mock.return_value = mock_metadata
self.mock_metadata = mock_metadata
def _setup_mock_driver(self, mock_driver_class):
mock_driver = mock.Mock()
prepare_execution_mock = mock.Mock()
mock_driver_class.return_value = mock_driver
mock_driver.prepare_execution = prepare_execution_mock
self.mock_driver = mock_driver
def _setup_mock_executor(self, mock_executor_class):
mock_executor = mock.Mock()
mock_executor_class.return_value = mock_executor
mock_executor_class.__name__ = 'mock_executor_class'
self.mock_executor = mock_executor
def _setup_mock_task_instance(self):
mock_task_instance = mock.Mock()
self.mock_task_instance = mock_task_instance
@mock.patch('tfx.utils.logging_utils.get_logger')
@mock.patch(
'tfx.components.base.base_executor.BaseExecutor')
@mock.patch('tfx.components.base.base_driver.BaseDriver')
@mock.patch('tfx.orchestration.metadata.Metadata')
def test_cached_execution(self, mock_metadata_class, mock_driver_class,
mock_executor_class, mock_get_logger):
self._setup_mocks(mock_metadata_class, mock_driver_class,
mock_executor_class, mock_get_logger)
adapter, input_dict, output_dict, exec_properties, driver_args = self._setup_adapter_and_args(
)
self.mock_task_instance.xcom_pull.side_effect = [self.input_one_json]
self.mock_driver.prepare_execution.return_value = data_types.ExecutionDecision(
input_dict, output_dict, exec_properties)
check_result = adapter.check_cache_and_maybe_prepare_execution(
'cached_branch',
'uncached_branch',
ti=self.mock_task_instance)
mock_get_logger.assert_called_with(self._logger_config)
mock_driver_class.assert_called_with(metadata_handler=self.mock_metadata)
self.mock_driver.prepare_execution.called_with(input_dict, output_dict,
exec_properties, driver_args)
self.mock_task_instance.xcom_pull.assert_called_with(
dag_id='input_one_component_id', key='input_one_key')
self.mock_task_instance.xcom_push.assert_called_with(
key='output_one_key', value=self.output_one_json)
self.assertEqual(check_result, 'cached_branch')
@mock.patch('tfx.utils.logging_utils.get_logger')
@mock.patch(
'tfx.components.base.base_executor.BaseExecutor')
@mock.patch('tfx.components.base.base_driver.BaseDriver')
@mock.patch('tfx.orchestration.metadata.Metadata')
def test_new_execution(self, mock_metadata_class, mock_driver_class,
mock_executor_class, mock_get_logger):
self._setup_mocks(mock_metadata_class, mock_driver_class,
mock_executor_class, mock_get_logger)
adapter, input_dict, output_dict, exec_properties, driver_args = self._setup_adapter_and_args(
)
self.mock_task_instance.xcom_pull.side_effect = [self.input_one_json]
self.mock_driver.prepare_execution.return_value = data_types.ExecutionDecision(
input_dict, output_dict, exec_properties, execution_id=12345)
check_result = adapter.check_cache_and_maybe_prepare_execution(
'cached_branch',
'uncached_branch',
ti=self.mock_task_instance)
mock_driver_class.assert_called_with(metadata_handler=self.mock_metadata)
self.mock_driver.prepare_execution.called_with(input_dict, output_dict,
exec_properties, driver_args)
self.mock_task_instance.xcom_pull.assert_called_with(
dag_id='input_one_component_id', key='input_one_key')
calls = [
mock.call(
key='_exec_inputs', value=types.jsonify_tfx_type_dict(input_dict)),
mock.call(
key='_exec_outputs',
value=types.jsonify_tfx_type_dict(output_dict)),
mock.call(key='_exec_properties', value=json.dumps(exec_properties)),
mock.call(key='_execution_id', value=12345)
]
self.mock_task_instance.xcom_push.assert_has_calls(calls)
self.assertEqual(check_result, 'uncached_branch')
@mock.patch('tfx.utils.logging_utils.get_logger')
@mock.patch(
'tfx.components.base.base_executor.BaseExecutor')
@mock.patch('tfx.components.base.base_driver.BaseDriver')
@mock.patch('tfx.orchestration.metadata.Metadata')
def test_python_exec(self, mock_metadata_class, mock_driver_class,
mock_executor_class, mock_get_logger):
self._setup_mocks(mock_metadata_class, mock_driver_class,
mock_executor_class, mock_get_logger)
adapter, input_dict, output_dict, exec_properties, _ = self._setup_adapter_and_args(
)
self.mock_task_instance.xcom_pull.side_effect = [
types.jsonify_tfx_type_dict(input_dict),
types.jsonify_tfx_type_dict(output_dict),
json.dumps(exec_properties), 12345
]
adapter.python_exec('cache_task_name', ti=self.mock_task_instance)
calls = [
mock.call(key='_exec_inputs', task_ids='cache_task_name'),
mock.call(key='_exec_outputs', task_ids='cache_task_name'),
mock.call(key='_exec_properties', task_ids='cache_task_name'),
mock.call(key='_execution_id', task_ids='cache_task_name')
]
self.assertEqual(
json.dumps(exec_properties), json.dumps(adapter._exec_properties))
mock_executor_class.assert_called_once()
self.mock_executor.Do.assert_called_with(
adapter._input_dict, adapter._output_dict, adapter._exec_properties)
self.mock_task_instance.xcom_pull.assert_has_calls(calls)
self.mock_task_instance.xcom_push.assert_called_once()
@mock.patch('tfx.utils.logging_utils.get_logger')
@mock.patch(
'tfx.components.base.base_executor.BaseExecutor')
@mock.patch('tfx.components.base.base_driver.BaseDriver')
@mock.patch('tfx.orchestration.metadata.Metadata')
def test_publish_exec(self, mock_metadata_class, mock_driver_class,
mock_executor_class, mock_get_logger):
self._setup_mocks(mock_metadata_class, mock_driver_class,
mock_executor_class, mock_get_logger)
adapter, input_dict, output_dict, exec_properties, _ = self._setup_adapter_and_args(
)
self.mock_task_instance.xcom_pull.side_effect = [
types.jsonify_tfx_type_dict(input_dict),
types.jsonify_tfx_type_dict(output_dict),
json.dumps(exec_properties), 12345,
types.jsonify_tfx_type_dict(output_dict)
]
output_artifact_published = types.TfxArtifact('O')
output_artifact_published.source = self.output_one.source
self.mock_metadata.publish_execution.return_value = {
u'output_one': [output_artifact_published]
}
adapter.publish_exec(
'cache_task_name', 'exec_task_name', ti=self.mock_task_instance)
calls = [
mock.call(key='_exec_inputs', task_ids='cache_task_name'),
mock.call(key='_exec_outputs', task_ids='cache_task_name'),
mock.call(key='_exec_properties', task_ids='cache_task_name'),
mock.call(key='_execution_id', task_ids='cache_task_name'),
mock.call(key='return_value', task_ids='exec_task_name')
]
self.mock_metadata.publish_execution.assert_called_with(
12345, adapter._input_dict, adapter._output_dict)
self.mock_task_instance.xcom_pull.assert_has_calls(calls)
self.mock_task_instance.xcom_push.assert_called_once()
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
8041785 | <reponame>xiaol/keras_<gh_stars>1-10
import numpy as np
import pytest
from keras.models import Sequential, weighted_objective
from keras.layers.core import TimeDistributedDense, Masking
from keras import objectives
from keras import backend as K
@pytest.mark.skipif(K._BACKEND == 'tensorflow',
reason='currently not working with TensorFlow')
def test_masking():
np.random.seed(1337)
X = np.array(
[[[1, 1], [2, 1], [3, 1], [5, 5]],
[[1, 5], [5, 0], [0, 0], [0, 0]]], dtype=np.int32)
model = Sequential()
model.add(Masking(mask_value=0, input_shape=(None, 2)))
model.add(TimeDistributedDense(1, init='one'))
model.compile(loss='mse', optimizer='sgd')
y = model.predict(X)
history = model.fit(X, 4 * y, nb_epoch=1, batch_size=2, verbose=1)
assert history.history['loss'][0] == 285.
def test_loss_masking():
weighted_loss = weighted_objective(objectives.get('mae'))
shape = (3, 4, 2)
X = np.arange(24).reshape(shape)
Y = 2 * X
# Normally the trailing 1 is added by standardize_weights
weights = np.ones((3,))
mask = np.ones((3, 4))
mask[1, 0] = 0
out = K.eval(weighted_loss(K.variable(X),
K.variable(Y),
K.variable(weights),
K.variable(mask)))
if __name__ == '__main__':
pytest.main([__file__])
| StarcoderdataPython |
6485085 | import datetime
import json
import os
import requests
from skt.vault_utils import get_secrets
headers = {
"Catalog-User": os.environ.get("USER", "unknown_user"),
"Catalog-Hostname": os.environ.get("HOSTNAME", "unknown_hostname"),
"Catalog-NB-User": os.environ.get("NB_USER", None),
}
DATA_CATALOG_SECRETS_NAME = "data_catalog"
DATA_LINEAGE_SECRETS_NAME = "data_lineage"
def get_lineage(table):
secrets = get_secrets(DATA_LINEAGE_SECRETS_NAME)
return requests.get(f"{secrets['url_prd']}/relationships/lineage/node/{table}").json()
def get_lineage_network(table, sequence=True):
secrets = get_secrets(DATA_LINEAGE_SECRETS_NAME)
return requests.get(f"{secrets['url_prd']}/relationships/lineage/node/{table}/network?sequence={sequence}")
def get_sources():
secrets = get_secrets(DATA_CATALOG_SECRETS_NAME)
return requests.get(f"{secrets['url_prd']}/sources").json()
def get_source(source):
secrets = get_secrets(DATA_CATALOG_SECRETS_NAME)
return requests.get(f"{secrets['url_prd']}/sources/{source}").json()
def get_table(table_id):
return get_resource("tables", table_id)
def get_tables(source, limit=1000):
secrets = get_secrets(DATA_CATALOG_SECRETS_NAME)
return requests.get(f"{secrets['url_prd']}/sources/{source}/tables?limit={limit}").json()
def get_table_detail(source, table_id):
print("deprecated! use 'get_table(table_id)' ")
return get_table(table_id)
def get_column(column_id):
return get_resource("columns", column_id)
def get_columns(source, table_id):
secrets = get_secrets(DATA_CATALOG_SECRETS_NAME)
return requests.get(f"{secrets['url_prd']}/sources/{source}/tables/{table_id}/columns").json()
def get_column_detail(source, column_id):
print("deprecated! use 'get_column(column_id)' ")
return get_column(column_id)
def get_resource(resource_name, resource_id):
secrets = get_secrets(DATA_CATALOG_SECRETS_NAME)
return requests.get(f"{secrets['url_prd']}/v1/resources/{resource_name}/{resource_id}").json()
def get_query(query_id):
return get_resource("processes", query_id)
def get_queries(source, limit=100):
secrets = get_secrets(DATA_CATALOG_SECRETS_NAME)
return requests.get(f"{secrets['url_prd']}/sources/{source}/processes?limit={limit}").json()
def get_query_detail(source, query_id):
print("deprecated! use 'get_query(query_id)' ")
return get_query(query_id)
def search_table_by_name(name, **kwargs):
secrets = get_secrets(DATA_CATALOG_SECRETS_NAME)
kwargs["name"] = name
return requests.get(f"{secrets['url_prd']}/v1/search/tables", params=kwargs).json()
def search_table_by_dc(description, **kwargs):
secrets = get_secrets(DATA_CATALOG_SECRETS_NAME)
kwargs["description"] = description
return requests.get(f"{secrets['url_prd']}/v1/search/tables", params=kwargs).json()
def search_column_by_dc(description, **kwargs):
secrets = get_secrets(DATA_CATALOG_SECRETS_NAME)
kwargs["description"] = description
return requests.get(f"{secrets['url_prd']}/v1/search/columns", params=kwargs).json()
def search_column_by_name(name, **kwargs):
secrets = get_secrets(DATA_CATALOG_SECRETS_NAME)
kwargs["name"] = name
return requests.get(f"{secrets['url_prd']}/v1/search/columns", params=kwargs).json()
def search_queries_by_table_id(table_id, **kwargs):
limit = kwargs.get("limit", 100)
fuzziness = kwargs.get("fuzziness", "AUTO")
operator = kwargs.get("operator", "and")
offset = kwargs.get("offset", None)
fields = kwargs.get("fields", None)
must = kwargs.get("must", None)
sort = kwargs.get("sort", "desc")
start_date = kwargs.get("start_date", None)
end_date = kwargs.get("end_date", None)
secrets = get_secrets(DATA_CATALOG_SECRETS_NAME)
es_sort = [{"start_time": sort}]
params = {
"inputs": table_id,
"outputs": table_id,
"limit": limit,
"fuzziness": fuzziness,
"offset": offset,
"operator": operator,
"fields": fields,
"must": must,
"sort": json.dumps(es_sort),
}
if start_date or end_date:
range_filter = {"range": {"start_time": {}}}
if start_date:
range_filter["range"]["start_time"]["gte"] = start_date
if end_date:
range_filter["range"]["start_time"]["lt"] = end_date
params["range_filter"] = json.dumps(range_filter)
return requests.get(secrets["url_prd"] + "/v1/search/processes", params=params).json()
def search_queries_by_column(table_id, column_name, **kwargs):
limit = kwargs.get("limit", 100)
fuzziness = kwargs.get("fuzziness", "AUTO")
operator = kwargs.get("operator", "and")
offset = kwargs.get("offset", None)
must = kwargs.get("must", None)
sort = kwargs.get("sort", "desc")
start_date = kwargs.get("start_date", None)
end_date = kwargs.get("end_date", None)
catalog_secrets = get_secrets(DATA_CATALOG_SECRETS_NAME)
lineage_secrets = get_secrets(DATA_LINEAGE_SECRETS_NAME)
dataset = None
table_name = None
result = []
if "." in table_id:
dataset, table_name = table_id.split(".")
if dataset:
column_id = ".".join([dataset, table_name, column_name])
params = {
"qualified_name": column_id,
"fields": "qualified_name",
"fuzziness": fuzziness,
"operator": operator,
"must": must,
}
response = requests.get(catalog_secrets["url_prd"] + "/v1/search/columns", params=params).json()
column_list = response.get("qualified_name", {}).get("hits", [])
if column_list:
max_score_column_id = column_list[0]["_source"]["qualified_name"]
params = {
"offset": offset,
"limit": limit,
"sort_by_time": True,
"order": sort,
"start_date": start_date,
"end_date": end_date,
}
relationship_list = requests.get(
lineage_secrets["url_prd"] + "/relationships/queries/resource/columns/" + max_score_column_id,
params=params,
).json()
for each_relationship in relationship_list:
query_id = each_relationship["source"]
project_id = query_id.split("@")[1]
response = requests.get(
catalog_secrets["url_prd"] + f"/sources/bigquery-{project_id}/processes/{query_id}"
).json()
result.append(response)
else:
raise Exception("You should use [dataset].[table_name] style on table_id argument. Abort")
return result
def get_user_queries(user_name, start_date=None, end_date=None, **kwargs):
secrets = get_secrets(DATA_CATALOG_SECRETS_NAME)
default_order = "asc" if (start_date or end_date) else "desc"
order = kwargs.get("sort", default_order)
limit = kwargs.get("limit", 100)
es_sort = [{"start_time": order}]
es_limit = min(100, limit)
params = {"user_name": user_name, "limit": es_limit, "sort": json.dumps(es_sort)}
gte = start_date or (datetime.datetime.now() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
lt = end_date or datetime.datetime.now().strftime("%Y-%m-%d")
range_filter = {"start_time": {"gte": gte, "lt": lt}}
params["range_filter"] = json.dumps(range_filter)
total_queries = []
response = requests.get(secrets["url_prd"] + "/v1/search/processes", params=params).json()
total_queries.extend(response["user_name"]["hits"])
total = response["user_name"]["total"]["value"]
while total > len(total_queries) and limit < len(total_queries):
params["offset"] = json.dumps(total_queries[-1]["sort"])
response = requests.get(secrets["url_prd"] + "/v1/search/processes", params=params).json()
total_queries.extend(response["user_name"]["hits"])
return total_queries
def get_user_data_access(user_name, start_date=None, end_date=None, timeseries=False, **kwargs):
secrets = get_secrets(DATA_CATALOG_SECRETS_NAME)
lineage_secrets = get_secrets(DATA_LINEAGE_SECRETS_NAME)
default_order = "asc" if (start_date or end_date) else "desc"
order = kwargs.get("sort", default_order)
limit = kwargs.get("limit", 1000)
es_sort = [{"start_time": order}]
es_limit = min(1000, limit)
params = {
"user_name": user_name,
"sort": json.dumps(es_sort),
"limit": es_limit,
"fields": json.dumps(["inputs", "outputs"]),
}
gte = start_date or (datetime.datetime.now() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
lt = end_date or datetime.datetime.now().strftime("%Y-%m-%d")
range_filter = {"start_time": {"gte": gte, "lt": lt}}
params["range_filter"] = json.dumps(range_filter)
total_queries = []
response = requests.get(secrets["url_prd"] + "/v1/search/processes", params=params).json()
total_queries.extend(response["user_name"]["hits"])
total = response["user_name"]["total"]["value"]
while total > len(total_queries) and limit < len(total_queries):
params["offset"] = json.dumps(total_queries[-1]["sort"])
response = requests.get(secrets["url_prd"] + "/v1/search/processes", params=params).json()
total_queries.extend(response["user_name"]["hits"])
result = []
table_dict = {}
column_dict = {}
for each_query in total_queries:
query_id = each_query["_id"]
if timeseries:
inputs = each_query["_source"].get("inputs", [])
outputs = each_query["_source"].get("outputs", [])
response = requests.get(
lineage_secrets["url_prd"] + f"/relationships/queries/query/{query_id}/columns", params=params
).json()
column_list = list(map(lambda each: each["target"], response))
result.append(
{
"inputs": inputs,
"outputs": outputs,
"columns": column_list,
"start_time": each_query["sort"][0],
"query_id": query_id,
}
)
else:
inputs = each_query["_source"].get("inputs", []) or []
outputs = each_query["_source"].get("outputs", []) or []
for each in inputs:
if each not in table_dict:
table_dict[each] = 1
for each in outputs:
if each not in table_dict:
table_dict[each] = 1
response = requests.get(
lineage_secrets["url_prd"] + f"/relationships/queries/query/{query_id}/columns", params=params
).json()
column_list = list(map(lambda each: each["target"], response))
for each_column in column_list:
if each_column not in column_dict:
column_dict[each_column] = 1
if timeseries:
return result
else:
return {"tables": list(table_dict.keys()), "columns": list(column_dict.keys())}
def get_table_top_n_tables(n, start_date=None, end_date=None):
lineage_secrets = get_secrets(DATA_LINEAGE_SECRETS_NAME)
params = {"top_n": n, "start_date": start_date, "end_date": end_date}
response = requests.get(lineage_secrets["url_prd"] + "/relationships/queries/top_n/tables", params=params).json()
return response
def get_table_top_n_columns(n, start_date=None, end_date=None):
lineage_secrets = get_secrets(DATA_LINEAGE_SECRETS_NAME)
params = {"top_n": n, "start_date": start_date, "end_date": end_date}
response = requests.get(lineage_secrets["url_prd"] + "/relationships/queries/top_n/columns", params=params).json()
return response
| StarcoderdataPython |
4938209 | <reponame>ckarageorgkaneen/pybpod-gui-plugin
# !/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
import os
from AnyQt.QtWidgets import QFileDialog, QMessageBox
import pyforms as app
from pyforms.controls import ControlText
from pyforms_generic_editor.models.project import GenericProject
from pybpodgui_api.models.project import Project
logger = logging.getLogger(__name__)
class ProjectWindow(Project, GenericProject):
""" ProjectWindow represents the project entity as a GUI window"""
def __init__(self):
"""
"""
GenericProject.__init__(self)
self._name = ControlText('Project name')
self.formset = ['_name', ' ']
self._name.changed_event = self._name_changed_evt
Project.__init__(self)
def _name_changed_evt(self):
if not hasattr(self, '_update_name') or not self._update_name:
self.name = self._name.value
@property
def name(self):
return self._name.value
@name.setter
def name(self, value):
self._update_name = True # Flag to avoid recurse calls when editing the name text field
self._name.value = value
self._update_name = False
def save(self, project_path=None):
if project_path:
Project.save(self, project_path)
elif self.path:
Project.save(self, self.path)
else:
folder = QFileDialog.getExistingDirectory(self, "Select a directory to save the project: {0}".format(
self.name))
if folder:
folder = os.path.join(folder, self.name)
try:
Project.save(self, str(folder))
except FileExistsError as err:
logger.warning(str(err))
QMessageBox.warning(self, 'Project exists',
'Project with same name already exists. Please select another path.')
def close(self, silent=False):
self.projects -= self
super(ProjectWindow, self).close(silent)
# Execute the application
if __name__ == "__main__":
app.startApp(ProjectWindow)
| StarcoderdataPython |
8029052 | from configparser import RawConfigParser
import youtube_dl
from mpserver.grpc import mmp_pb2_grpc as rpc
from mpserver.grpc import mmp_pb2
from mpserver.interfaces import Logger, EventFiring
class MediaDownloader(rpc.MediaDownloaderServicer, Logger, EventFiring):
"""
Wrapper for the youtube_dl.YoutubeDL so it can be used in the mpserver package
"""
_section = 'mediadownloader'
def __init__(self, config: RawConfigParser):
super(MediaDownloader, self).__init__()
self._config = config
# TODO: make some of these options available in ini file
self._options = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '320',
}],
'logger': self.YTDLLogger(),
'progress_hooks': [self.__download_hook],
}
self.__process_conf()
def download(self, url, location):
with youtube_dl.YoutubeDL(self._options) as ytdl:
ytdl.download([url])
def process_message(self, message: dict) -> dict:
"""
Process the message and return a response
:param message: the message to process
:type message: dict
:return: returns a response as type dictionary
:rtype: dict
"""
retdict = {'result': 'ok'}
if 'cmd' in message:
# DOWNLOAD
if message['cmd'] == 'download':
if 'albumid' in message and 'url' in message:
self.download(message['url'], )
else:
retdict['result'] = 'error'
retdict['message'] = 'no albumid or url given'
else:
retdict['result'] = 'error'
retdict['toast'] = 'Sorry, could not handle request'
return retdict
def __download_hook(self, info):
self._latest_status = info
if info['status'] == 'downloading':
self._fire_event(self.Events.DOWNLOAD_UPDATE)
elif info['status'] == 'finished':
self._fire_event(self.Events.DOWNLOAD_FINISHED)
class Events:
DOWNLOAD_FINISHED = 1
DOWNLOAD_UPDATE = 0
class YTDLLogger:
def debug(self, msg):
# print(msg)
pass
def warning(self, msg):
# print(msg)
pass
def error(self, msg):
print(msg)
def __process_conf(self):
self._raw_download_location = self._config.get(self._section, 'download_location',
fallback='{{album}}/%(title)s.%(ext)s')
self.log('raw_download_location: ' + self._raw_download_location)
def DownloadMedia(self, request, context):
return super().DownloadMedia(request, context)
def RetrieveMDStatus(self, request, context):
return super().RetrieveMDStatus(request, context)
def NotifyMDStatus(self, request, context):
return super().NotifyMDStatus(request, context)
| StarcoderdataPython |
8123281 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 26 23:07:26 2016
Copyright (c) 2016, <NAME>. All rights reserved.
@author: <NAME>
@email: <EMAIL>
@license: BSD 3-clause.
"""
from nose.tools import assert_less
import numpy as np
import OnPLS.consts as consts
import OnPLS.estimators as estimators
import OnPLS.utils as utils
import tests
class TestOnPLS(tests.TestCase):
def test_comparison_to_nPLS(self):
np.random.seed(42)
verbose = 1
n, p_1, p_2, p_3 = 10, 5, 10, 15
# Generate data:
t = np.sort(np.random.randn(n, 1), axis=0)
t = t / np.linalg.norm(t)
to1 = np.ones((n, 1))
to1 = to1 - utils.project(to1, t)
to1 = to1 / np.linalg.norm(to1)
to2 = np.random.rand(n, 1)
to2 = to2 - utils.project(to2, t) \
- utils.project(to2, to1)
to2 = to2 / np.linalg.norm(to2)
to3 = np.random.rand(n, 1)
to3 = to3 - utils.project(to3, t) \
- utils.project(to3, to1) \
- utils.project(to3, to2)
to3 = to3 / np.linalg.norm(to3)
assert(np.dot(t.T, to1) < 5e-15)
assert(np.dot(t.T, to2) < 5e-15)
assert(np.dot(t.T, to3) < 5e-15)
assert(np.dot(to1.T, to2) < 5e-15)
assert(np.dot(to1.T, to3) < 5e-15)
assert(np.dot(to2.T, to3) < 5e-15)
p1 = np.sort(np.random.randn(p_1, 1), axis=0)
p2 = np.sort(np.random.randn(p_2, 1), axis=0)
p3 = np.sort(np.random.randn(p_3, 1), axis=0)
po1 = np.sort(np.random.randn(p_1, 1), axis=0)
po2 = np.sort(np.random.randn(p_2, 1), axis=0)
po3 = np.sort(np.random.randn(p_3, 1), axis=0)
X1 = np.dot(t, p1.T) + np.dot(to1, po1.T)
X2 = np.dot(t, p2.T) + np.dot(to2, po2.T)
X3 = np.dot(t, p3.T) + np.dot(to3, po3.T)
# Xte1 = np.dot(t, p1.T)
# Xte2 = np.dot(t, p2.T)
# Xte3 = np.dot(t, p3.T)
predComp = [[0, 1, 1], [1, 0, 1], [1, 1, 0]]
precomputedW = None
# OnPLS model:
orthComp = [1, 1, 1]
model = None
onpls = estimators.OnPLS(predComp, orthComp, model, precomputedW,
numReps=1, verbose=verbose)
onpls.fit([X1, X2, X3])
Xhat, That = onpls.predict([X1, X2, X3], [2], return_scores=True)
onpls_score = onpls.score([X1, X2, X3])
assert(onpls_score > 0.999)
# Xhat, That = onpls.predict([Xte1, Xte2, Xte3], [2],
# return_scores=True)
if np.dot(t.T, That[0]) < 0.0:
That[0] = -That[0]
# print np.linalg.norm(Xhat[0] - np.dot(t, p3.T))
assert(np.linalg.norm(Xhat[0] - np.dot(t, p3.T)) < 5e-13)
# nPLS model:
npls = estimators.nPLS(predComp, precomputed_A=None, numReps=1,
randomState=None, verbose=verbose)
npls.fit([X1, X2, X3])
Xhat, That = npls.predict([X1, X2, X3], [2], return_scores=True)
# Xhat, That = npls.predict([Xte1, Xte2, Xte3], [2],
# return_scores=True)
if np.dot(t.T, That[0]) < 0.0:
That[0] = -That[0]
# print np.linalg.norm(Xhat[0] - np.dot(t, p3.T))
assert(np.linalg.norm(Xhat[0] - np.dot(t, p3.T)) < 1.5)
npls_score = npls.score([X1, X2, X3])
# print abs(npls_score - 0.37736)
assert(abs(npls_score - 0.37736) < 5e-6)
assert(onpls_score > npls_score)
def test_comparison_to_nPLS_2comps(self):
np.random.seed(42)
verbose = 1
n, p_1, p_2, p_3 = 10, 5, 10, 15
# Generate data:
t1 = np.sort(np.random.randn(n, 1), axis=0)
t1 = t1 / np.linalg.norm(t1)
t2 = np.random.randn(n, 1)
t2 = t2 - utils.project(t2, t1)
t2 = t2 / np.linalg.norm(t2)
to1 = np.ones((n, 1))
to1 = to1 - utils.project(to1, t1) \
- utils.project(to1, t2)
to1 = to1 / np.linalg.norm(to1)
to2 = np.random.rand(n, 1)
to2 = to2 - utils.project(to2, t1) \
- utils.project(to2, t2) \
- utils.project(to2, to1)
to2 = to2 / np.linalg.norm(to2)
to3 = np.random.rand(n, 1)
to3 = to3 - utils.project(to3, t1) \
- utils.project(to3, t2) \
- utils.project(to3, to1) \
- utils.project(to3, to2)
to3 = to3 / np.linalg.norm(to3)
assert(np.abs(np.dot(t1.T, t2)) < 5e-15)
assert(np.abs(np.dot(t1.T, to1)) < 5e-15)
assert(np.abs(np.dot(t1.T, to2)) < 5e-15)
assert(np.abs(np.dot(t1.T, to3)) < 5e-15)
assert(np.abs(np.dot(t2.T, to1)) < 5e-15)
assert(np.abs(np.dot(t2.T, to2)) < 5e-15)
assert(np.abs(np.dot(t2.T, to3)) < 5e-15)
assert(np.abs(np.dot(to1.T, to2)) < 5e-15)
assert(np.abs(np.dot(to1.T, to3)) < 5e-15)
assert(np.abs(np.dot(to2.T, to3)) < 5e-15)
p11 = np.sort(np.random.randn(p_1, 1), axis=0)
p12 = np.sort(np.random.randn(p_2, 1), axis=0)
p13 = np.sort(np.random.randn(p_3, 1), axis=0)
p21 = np.sort(np.random.randn(p_1, 1), axis=0)
p22 = np.sort(np.random.randn(p_2, 1), axis=0)
p23 = np.sort(np.random.randn(p_3, 1), axis=0)
po1 = np.sort(np.random.randn(p_1, 1), axis=0)
po2 = np.sort(np.random.randn(p_2, 1), axis=0)
po3 = np.sort(np.random.randn(p_3, 1), axis=0)
X1 = np.dot(t1, p11.T) + np.dot(t2, p21.T) + np.dot(to1, po1.T)
X2 = np.dot(t1, p12.T) + np.dot(t2, p22.T) + np.dot(to2, po2.T)
X3 = np.dot(t1, p13.T) + np.dot(t2, p23.T) + np.dot(to3, po3.T)
# Xte1 = np.dot(t, p1.T)
# Xte2 = np.dot(t, p2.T)
# Xte3 = np.dot(t, p3.T)
pred_comp = [[0, 2, 2], [2, 0, 2], [2, 2, 0]]
precomputed_W = None
# OnPLS model:
orth_comp = [1, 1, 1]
model = None
onpls = estimators.OnPLS(pred_comp, orth_comp, model, precomputed_W,
numReps=1, verbose=verbose)
onpls.fit([X1, X2, X3])
Xhat, That = onpls.predict([X1, X2, X3], [2], return_scores=True)
onpls_score = onpls.score([X1, X2, X3])
assert(onpls_score > 0.85)
# Xhat, That = onpls.predict([Xte1, Xte2, Xte3], [2],
# return_scores=True)
if np.dot(t1.T, That[0]) < 0.0:
That[0] = -That[0]
# print np.linalg.norm(Xhat[0] - np.dot(t, p3.T))
# assert(np.linalg.norm(Xhat[0] - np.dot(t1, p13.T)) < 5e-13)
# nPLS model:
npls = estimators.nPLS(pred_comp, K=2, precomputed_A=None, numReps=1,
randomState=None, verbose=verbose)
npls.fit([X1, X2, X3])
Xhat, That = npls.predict([X1, X2, X3], [2], return_scores=True)
# Xhat, That = npls.predict([Xte1, Xte2, Xte3], [2],
# return_scores=True)
if np.asscalar(np.dot(t1.T, That[0][:, [0]])) < 0.0:
That[0][:, 0] = -That[0][:, 0]
if np.asscalar(np.dot(t1.T, That[0][:, [1]])) < 0.0:
That[0][:, 1] = -That[0][:, 1]
# print np.linalg.norm(Xhat[0] - np.dot(t, p3.T))
assert(np.linalg.norm(Xhat[0] - np.dot(t1, p13.T)) < 3.3)
npls_score = npls.score([X1, X2, X3])
# print(abs(npls_score - 0.534009))
assert(abs(npls_score - 0.534009) < 5e-7)
assert(onpls_score > npls_score)
if __name__ == "__main__":
import unittest
unittest.main()
| StarcoderdataPython |
6473260 | <filename>bot/update_status.py
from bot.args_twitter_keys import (
access_token,
access_token_secret,
api_key,
api_secret
)
import requests
from requests_oauthlib import OAuth1Session
twitter = OAuth1Session(
access_token,
access_token_secret,
api_key,
api_secret
)
def tweet(text):
url = 'https://api.twitter.com/1.1/statuses/update.json'
params = {'status': text}
return twitter.post(url, params=params)
| StarcoderdataPython |
9686199 | <gh_stars>0
"""Training script for SinGAN."""
import torch
from src.singan import SinGAN
import argparse
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='cuda', help='cuda or cpu')
parser.add_argument('--lr', type=float, default=5e-4, help='learning rate')
parser.add_argument('--iters', type=int, default=2000, help='iterations per scale')
parser.add_argument('--sample_interval', type=int, default=500, help='iteration intervals to sample test images')
parser.add_argument('--lam', type=float, default=0.1, help='lambda parameter for gradient penalty')
parser.add_argument('--network_iters', type=int, default=3, help='iterations per network update')
parser.add_argument('--alpha', type=int, default=10, help='reconstruction loss weight')
parser.add_argument('--path', type=str, default='./assets/new_york.jpg', help='image path')
parser.add_argument('--load', default=False, action='store_true', help='load current network')
# Get arguments
args = parser.parse_args()
# Init variables
device = torch.device('cuda:0') if args.device=='cuda' else torch.device('cpu')
lr = args.lr
iters = args.iters
sample_interval = args.sample_interval
lam = args.lam
network_iters = args.network_iters
alpha = args.alpha
# Create SinGAN model
singan = SinGAN(device, lr, lam, alpha, iters, sample_interval, network_iters, args.path)
# Load current network if using load
if args.load:
singan.load()
# Train SinGAN
singan.train() | StarcoderdataPython |
5039176 | from lock import Lock
from manifest import Manifest
from package import Package
from release import Release
| StarcoderdataPython |
11398022 | #!/usr/bin/env python2.7
import sys
import re
import json
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from json2run import *
from pymongo import *
from multiprocessing import *
from collections import namedtuple
import logging as log
from multiprocessing import cpu_count
import datetime
from math import floor, ceil
def main():
# Add options parser
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
prepare_args(parser)
args = parser.parse_args()
# setup scm
Persistent.scm = args.scm
# logging setup
log_level = log.INFO
if args.log_level == "error":
log_level = log.ERROR
elif args.log_level == "info":
log_level = log.INFO
if args.log_file:
log.basicConfig(filename=args.log_file, level=log_level, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
else:
log.basicConfig(level=log_level, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
# database setup
Persistent.connect(host=args.db_host, port=args.db_port, user=args.db_user, passw=args.db_pass, database=args.db_database)
# Get srun arguments
slurm_cmd = ("srun --quiet --job-name=j2rtask --quit-on-interrupt --time=%s --cpus-per-task=%i"
% (args.slurm_time, args.slurm_cpus))
if args.slurm_partition != "":
# If we have set a partition, add it to the cmd
slurm_cmd += " --partition=%s" % (args.slurm_partition)
if args.slurm_mem > 0:
# If we have set memory, work out how much per cpu and add it to cmd
mem_per_cpu = int(ceil(args.slurm_mem / args.slurm_cpus))
slurm_cmd += " --mem-per-cpu=%i" % (mem_per_cpu)
# Dict with slurm settings
slurm = {"use": args.slurm,
"cmd": slurm_cmd}
# action dispatching
if args.action == "print-cll" or args.action == "print-csv":
if not args.input:
log.error("You need to provide a source JSON file.")
sys.exit(1)
pex = from_file(args.input)
if args.action == "print-cll":
while pex.has_more():
print(ParameterExpression.format(args.executable, pex.next(), args.separator, args.prefix))
else:
headers = pex.headers()
print(",".join(headers))
while pex.has_more():
n = pex.next()
l = []
for h in headers:
p_value = None
for p in n:
if p.name == h:
p_value = p.value
break
if p_value:
l.append(str(p.value))
else:
l.append("")
print(",".join(l))
# run a batch
elif args.action == "run-batch":
if not args.batch_name:
log.error("You need to provide a valid batch name.")
sys.exit(1)
batch = None
samename = [b for b in Batch.get({ "name": args.batch_name })]
unfinished = [b for b in samename if b["date_started"] == b["date_stopped"]]
# resume
if unfinished:
log.info("There is an unfinished batch with this name, resuming.")
b = unfinished.pop()
batch = Batch(False)
batch.load(b)
batch.run(slurm, args.parallel_threads, args.greedy)
# initialize
elif not samename:
if not args.executable:
log.error("You need to provide a valid executable.")
sys.exit(1)
if not args.input:
log.errro("You need to provide a source JSON file.")
sys.exit(1)
pex = from_file(args.input)
batch = Batch(name = args.batch_name, generator = pex, executable = args.executable, repetitions = int(args.repetitions), prefix = args.prefix, separator = args.separator)
batch.run(slurm, args.parallel_threads, args.greedy)
else:
log.error("A complete batch with the same name is already on the database, try another name.")
sys.exit(1)
batch.save()
# run a race
elif args.action == "run-race":
if not args.batch_name:
log.error("You need to provide a valid batch name.")
sys.exit(1)
batch = None
samename = [b for b in Race.get({ "name": args.batch_name })]
unfinished = [b for b in samename if b["date_started"] == b["date_stopped"]]
# resume
if unfinished:
log.info("There is an unfinished batch with this name, resuming.")
b = unfinished.pop()
batch = Race(False)
batch.load(b)
if args.input:
pex = from_file(args.input)
batch.set_generator(pex)
batch.run(slurm, args.parallel_threads, args.greedy, args.confidence)
# initialize
elif not samename:
if not args.instance_param:
log.error("You need to provide the name of the parameter representing the instance.")
sys.exit(1)
if not args.performance_param:
log.error("You need to provide the name of the parameter representing the performance metric.")
sys.exit(1)
if not args.executable:
log.error("You need to provide a valid executable.")
sys.exit(1)
if not args.input:
log.error("You need to provide a source JSON file.")
sys.exit(1)
pex = from_file(args.input)
batch = Race(name = args.batch_name, generator = pex, executable = args.executable, repetitions = int(args.repetitions), initial_block = int(args.initial_block), performance_parameter = args.performance_param, instance_parameter = args.instance_param, seed = args.seed, prefix = args.prefix, separator = args.separator)
batch.run(slurm, args.parallel_threads, args.greedy, args.confidence)
else:
log.error("A complete batch with the same name is already on the database, try another name.")
sys.exit(1)
batch.save()
# list batches
elif args.action == "list-batches":
fields = ["Name", "Completion", "Host", "User", "Type", "Started", "Finished"]
batches = None
if args.filter:
batches = Batch.get({"name": { "$regex": args.filter }}).sort("date_started", -1)
else:
batches = Batch.get({}).sort("date_started", -1)
if args.limit != 0:
batches.limit(args.limit)
print("Batches matching criteria: ", batches.count())
# get average experiment run time
aggregate = False
avg_duration = None
last_experiment = None
# get unfinished batches
unfinished = [b["_id"] for b in batches if b["date_started"] == b["date_stopped"]]
batches.rewind()
try:
# compute (on db) average experiment execution time and last experiment time
ad = Persistent.database["experiments"].aggregate([{"$match":{"date_stopped":{"$exists":True}, "batch": { "$in": unfinished } }},{"$group":{"_id":"$batch","last_experiment":{"$max":"$date_stopped"}, "duration":{"$avg":{"$add":[{"$subtract":[{"$second":"$date_stopped"},{"$second":"$date_started"}]},{"$multiply":[60.0,{"$subtract":[{"$minute":"$date_stopped"},{"$minute":"$date_started"}]}]},{"$multiply":[3600.0,{"$subtract":[{"$hour":"$date_stopped"},{"$hour":"$date_started"}]}]},{"$multiply":[86400.0,{"$subtract":[{"$dayOfYear":"$date_stopped"},{"$dayOfYear":"$date_started"}]}]},{"$multiply":[977616000.0,{"$subtract":[{"$year":"$date_stopped"},{"$year":"$date_started"}]}]}]}}}}])
avg_duration = {}
for entry in ad["result"]:
avg_duration[entry["_id"]] = entry["duration"]
last_experiment = {}
for entry in ad["result"]:
last_experiment[entry["_id"]] = entry["last_experiment"]
fields.extend(["ETA", "Active"])
aggregate = True
except Exception as e:
log.info("The database doesn't support some of the features.")
Row = namedtuple("Row", fields)
table = []
for b in batches:
batch = None
if (b["type"] == "race"):
batch = Race(False)
else:
batch = Batch(False)
batch.load(b)
date_started = str(batch["date_started"].strftime("%d/%m/%y %H:%M"))
date_stopped = "never" if batch["date_started"] == batch["date_stopped"] else str(batch["date_stopped"].strftime("%d/%m/%y %H:%M"))
completion = batch.completion()
if (b["type"] == "full"):
completion = "%.2f " % batch.completion() + '%'
if aggregate:
ad = 0 if batch["_id"] not in avg_duration else avg_duration[batch["_id"]]
le = datetime.datetime(year = 1970, month = 1, day = 1) if batch["_id"] not in last_experiment else last_experiment[batch["_id"]]
# compute eta wrt. available cores
threads = args.parallel_threads
try:
threads = batch["threads"]
except:
pass
eta = (float(batch.missing()) * ad) / float(threads)
# a batch is active if last experiment terminated not before twice the average duration ago
now = datetime.datetime.utcnow()
status = "*" if (now - datetime.timedelta(seconds = int(ad * 2.0)) < le) and batch["date_started"] == batch["date_stopped"] else " "
# report eta if > than 1 minute
sec_in_day = 86400.0
sec_in_hour = 3600.0
sec_in_minute = 60.0
# compose eta string
eta_str = ""
if floor(eta / sec_in_day) > 0:
eta_str += "%dd " % floor(eta / sec_in_day)
if floor((eta % sec_in_day) / sec_in_hour):
eta_str += "%dh " % floor((eta % sec_in_day) / sec_in_hour)
if int((eta % sec_in_hour) / sec_in_minute):
eta_str += "%dm " % int((eta % sec_in_hour) / sec_in_minute)
if eta_str:
eta_str += "(%d cores)" % threads
# eta_str += " (avg. %f)" % ad
r = Row(batch["name"], completion, batch["host"], batch["user"], batch.type(), date_started, date_stopped, eta_str or "--", status )
else:
r = Row(batch["name"], completion, batch["host"], batch["user"], batch.type(), date_started, date_stopped)
table.append(r)
print_table(table)
# delete batch
elif args.action == "delete-batch":
if not args.batch_name:
log.error("You need to provide a batch name to remove.")
sys.exit(1)
batch = None
try:
batch = Batch(False)
batch.load(Batch.get({ "name": args.batch_name }).next())
except:
log.error("Error loading batch.")
sys.exit(1)
sys.stdout.write("Are you sure? ")
sys.stdout.flush()
if re.compile("Y|y|yes|YES").match(sys.stdin.readline()):
Batch.remove({ "name": batch["name"] })
else:
sys.exit(0)
sys.stdout.write("Remove related experiments? ")
sys.stdout.flush()
if re.compile("Y|y|yes|YES").match(sys.stdin.readline()):
Experiment.remove({ "batch": batch["_id"] })
else:
sys.exit(0)
# mark unfinished batch
elif args.action == "mark-unfinished":
if not args.batch_name:
log.error("You need to provide a batch name to mark as unfinished.")
sys.exit(1)
batch = None
try:
batch = Batch(False)
batch.load(Batch.get({ "name": args.batch_name }).next())
except:
log.error("Error loading batch.")
sys.exit(1)
batch["date_stopped"] = batch["date_started"]
batch.save()
# rename batch
elif args.action == "rename-batch":
if not args.batch_name:
log.error("You need to provide a batch name to rename.")
sys.exit(1)
if not args.new_name:
log.error("You need to provide a new name for this batch.")
sys.exit(1)
batch = None
try:
batch = Batch(False)
batch.load(Batch.get({ "name": args.batch_name }).next())
except:
log.error("Error loading batch.")
sys.exit(1)
batch["name"] = args.new_name
batch.save()
# add repetitions to batch, set unfinished
elif args.action == "set-repetitions":
if not args.batch_name:
log.error("You need to provide a batch name to rename.")
sys.exit(1)
if not args.repetitions:
log.error("You need to provide a number of repetitions.")
sys.exit(1)
batch = None
try:
batch = Batch(False)
batch.load(Batch.get({ "name": args.batch_name }).next())
except:
log.error("Error loading batch.")
sys.exit(1)
batch["repetitions"] = args.repetitions
batch["date_stopped"] = batch["date_started"]
batch.save()
elif args.action == "set-generator":
if not args.batch_name:
log.error("You need to provide a batch name.")
sys.exit(1)
if not args.input:
log.error("You need to provide a new JSON file.")
sys.exit(1)
batch = None
try:
batch = Batch(False)
batch.load(Batch.get({ "name": args.batch_name }).next())
except:
log.error("Error loading batch.")
sys.exit(1)
pex = from_file(args.input)
batch.update_generator(pex)
# mark unfinished (in general)
batch["date_stopped"] = batch["date_started"]
batch.save()
# batch info
elif args.action == "batch-info":
if not args.batch_name:
log.error("You need to provide a batch name.")
sys.exit(1)
batch = None
try:
batch = Batch.get({ "name": args.batch_name }).next()
del(batch["_id"])
batch["date_started"] = str(batch["date_started"])
batch["date_stopped"] = str(batch["date_stopped"])
batch["generator"] = json.loads(batch["generator"])
if batch["type"] == "race":
batch["configurations"] = json.loads(batch["configurations"])
print(json.dumps(batch, indent = 4))
except Exception as e:
log.error(e)
log.error("Error loading batch.")
sys.exit(1)
# show non-pruned solutions
elif args.action == "show-winning":
if not args.batch_name:
log.error("You need to provide a batch name.")
sys.exit(1)
batch = None
try:
batch = Batch.get({ "name": args.batch_name }).next()
if batch["type"] != "race":
log.error("This is not a race.")
system.exit(1)
winning = [j for j in json.loads(batch["configurations"]) if j["sum_of_ranks"]]
print(json.dumps(winning, indent = 4))
except Exception as e:
log.error(e)
log.error("Error loading batch.")
sys.exit(1)
# show non-pruned solutions
elif args.action == "show-best":
if not args.batch_name:
log.error("You need to provide a batch name.")
sys.exit(1)
batch = None
try:
batch = Batch.get({ "name": args.batch_name }).next()
if batch["type"] != "race":
log.error("This is not a race.")
system.exit(1)
winning = [j for j in json.loads(batch["configurations"]) if j["sum_of_ranks"]]
best = None
for j in winning:
if not best or best[0]["sum_of_ranks"] > j["sum_of_ranks"]:
best = [j]
for j in winning:
if j["sum_of_ranks"] == best[0] and j != best[0]:
best.append(j)
print(json.dumps(best, indent = 4))
except Exception as e:
log.error(e)
log.error("Error loading batch.")
sys.exit(1)
# dump csv of experiments
elif args.action == "dump-experiments":
if not args.batch_name:
log.error("You need to provide a batch name to remove.")
sys.exit(1)
batch = None
try:
batch = Batch(False)
batch.load(Batch.get({ "name": args.batch_name }).next())
except:
log.error("Error loading batch.")
sys.exit(1)
experiments = Experiment.get({ "batch": batch["_id"] })
if not experiments.count():
sys.exit(0)
# stat headers
e = experiments.next()
stat_head = list(map(str, e["stats"].keys()) if "stats" in e else [])
if args.stats:
stat_head = args.stats
batch.initialize_experiments()
generator = batch.generator
head = generator.headers()
full_head = head[:]
full_head.extend(stat_head)
print(",".join(full_head))
experiments = Experiment.get({ "batch": batch["_id"] })
for e in experiments:
l = [(str(e["parameters"][h]) if e["parameters"][h] != None else "true") if h in e["parameters"] else "" for h in head]
if stat_head:
l.extend((str(e["stats"][s]) if e["stats"][s] != None else "") if s in e["stats"] else "" for s in stat_head)
print(",".join(l))
Persistent.disconnect()
def prepare_args(parser):
"""Prepare the arguments for the program"""
parser.add_argument("--input", "-i", required = False, type=str, help="the JSON input file")
parser.add_argument("--executable", "-e", required = False, type=str, help="the executable to use")
parser.add_argument("--action", "-a", required = False, type=str, default = "print-cll", choices=["batch-info", "show-best", "mark-unfinished", "rename-batch", "delete-batch", "dump-experiments", "show-winning", "run-batch", "run-race", "list-batches", "print-cll", "print-csv", "set-repetitions", "set-generator"], help="the action to execute")
parser.add_argument("--repetitions", "-r", required = False, type=int, default = 1, help="number of repetitions of each experiment on a single instance")
parser.add_argument("--instance-param", "-ip", required = False, type=str, help="name of the parameter representing the instance in a race")
parser.add_argument("--performance-param", "-pp", required = False, type=str, help="name of the parameter representing the performance metric in a races")
parser.add_argument("--initial-block", "-ib", required = False, type=int, default = 10, help="size of the initial block of experiments in a race")
parser.add_argument("--confidence", required = False, type=float, default = 0.05, help="confidence for the hypotesis testing in a race")
parser.add_argument("--batch-name", "-n", required = False, type = str, help = "name of the batch on the database")
parser.add_argument("--parallel-threads", "-p", required = False, type = int, default = cpu_count(), help="number of parallel threads onto which to run the experiments, with slurm this is the max task concurrency")
parser.add_argument("--greedy", "-g", required = False, type = bool, default = False, help="whether the experiment can be reused from every batch in the database (true) or just the current one (false)")
parser.add_argument("--log-file", required = False, type = str, help="file where the whole log is written")
parser.add_argument("--log-level", required = False, type = str, default="info", choices=["warning", "error", "info"] )
parser.add_argument("--db-host", "-dh", required = False, type = str, default=Persistent.config["host"], help="the host where the database is installed")
parser.add_argument("--db-port", "-dp", required = False, type = int, default=Persistent.config["port"], help="the port onto which the database is served")
parser.add_argument("--db-database", "-dd", required = False, type = str, default=Persistent.config["database"], help="the database name")
parser.add_argument("--db-user", "-du", required = False, type = str, default=Persistent.config["user"], help="the database username")
parser.add_argument("--db-pass", "-dx", required = False, type = str, default=Persistent.config["pass"], help="the database password")
parser.add_argument("--scm", required = False, type = str, default="", choices=["", "git", "mercurial"], help="kind of SCM used")
parser.add_argument("--seed", "-s", required = False, type = int, default=0, help="seed to use, e.g. for race")
parser.add_argument("--new-name", "-nn", required = False, type = str, help="new name for the batch")
parser.add_argument("--prefix", "-pre", required = False, type = str, default=ParameterExpression.def_prefix, help="prefix character(s) for arguments")
parser.add_argument("--filter", "-f", required = False, type = str, help="filter printouts")
parser.add_argument("--separator", "-sep", required = False, type = str, default=ParameterExpression.def_separator, help="separator character(s) for arguments")
parser.add_argument("--stats", "-st", required = False, type = str, nargs="+", help="list of stats to export in CSV")
parser.add_argument("--limit", "-l", required = False, type = int, default=0, help="how many batches to show in the list")
parser.add_argument("--slurm", "-sl", required = False, type = bool, default=False, help="run on a slurm cluster")
parser.add_argument("--slurm-time", "-slt", required = False, type = str, default="02:00:00", help="time limit for each task in HH:MM:SS, note that priority is reduced for long tasks")
parser.add_argument("--slurm-cpus", "-slc", required = False, type = int, default=1, help="cpus per task")
parser.add_argument("--slurm-partition", "-slq", required = False, type = str, default="", help="the slurm partition(s) to submit to, can specify multiple comma separated partitions")
parser.add_argument("--slurm-mem", "-slm", required = False, type = int, default=0, help="memory requested per task in MB (defaults to cluster default)")
parser.add_help = True
parser.prefix_chars = "-"
parser.description = "Generates a number of parameter configurations from a JSON definition file, then uses them to either run experiments, tune parameter or just print out the parameter configurations."
def from_file(file):
"""Generates parameter expression from file name."""
# open file
try:
input_file = open(file, "r")
json_str = input_file.read()
except Exception as e:
log.error("Impossible to open file " + file + " for reading.")
sys.exit(1)
try:
pex = ParameterExpression.from_string(json_str)
except Exception as e:
log.error("Impossible to generate ParameterExpression.")
log.error(e)
sys.exit(1)
return pex
def print_table(rows):
"""Kindly donated by stackoverflow user"""
if len(rows) > 0:
headers = rows[0]._fields
lens = []
for i in range(len(rows[0])):
lens.append(len(max([x[i] for x in rows] + [headers[i]],key=lambda x:len(str(x)))))
formats = []
hformats = []
for i in range(len(rows[0])):
if isinstance(rows[0][i], int):
formats.append("%%%dd" % lens[i])
else:
formats.append("%%-%ds" % lens[i])
hformats.append("%%-%ds" % lens[i])
pattern = " | ".join(formats)
hpattern = " | ".join(hformats)
separator = "-+-".join(['-' * n for n in lens])
print(hpattern % tuple(headers))
print(separator)
for line in rows:
print(pattern % tuple(line))
# Run
if __name__ == "__main__":
main()
| StarcoderdataPython |
4870585 | global leftController
global rightController
if starting:
leftController = OSVR.leftController()
rightController = OSVR.rightController()
hydra[0].enabled = True
hydra[0].isDocked = False
hydra[0].side = 'L'
hydra[1].enabled = True
hydra[1].isDocked = False
hydra[1].side = 'R'
def update():
global leftController
global rightController
hydra[0].x = leftController.x * 10000
hydra[0].y = leftController.y * 10000
hydra[0].z = leftController.z * 10000
hydra[0].yaw = leftController.yaw
hydra[0].pitch = -leftController.pitch
hydra[0].roll = leftController.roll
hydra[0].one = leftController.one
hydra[0].two = leftController.two
hydra[0].three = leftController.three
hydra[0].four = leftController.four
hydra[0].bumper = leftController.bumper
hydra[0].joybutton = leftController.joystick
hydra[0].start = leftController.middle
hydra[0].joyx = leftController.joystickX
hydra[0].joyy = leftController.joystickY
hydra[0].trigger = leftController.trigger
hydra[1].x = rightController.x * 10000
hydra[1].y = rightController.y * 10000
hydra[1].z = rightController.z * 10000
hydra[1].yaw = rightController.yaw
hydra[1].pitch = -rightController.pitch
hydra[1].roll = rightController.roll
hydra[1].one = rightController.one
hydra[1].two = rightController.two
hydra[1].three = rightController.three
hydra[1].four = rightController.four
hydra[1].bumper = rightController.bumper
hydra[1].joybutton = rightController.joystick
hydra[1].start = rightController.middle
hydra[1].joyx = rightController.joystickX
hydra[1].joyy = rightController.joystickY
hydra[1].trigger = rightController.trigger
update()
diagnostics.watch(bool(leftController.one))
diagnostics.watch(float(rightController.trigger))
diagnostics.watch(leftController.position.x)
diagnostics.watch(leftController.position.y)
diagnostics.watch(leftController.position.z)
diagnostics.watch(leftController.roll)
diagnostics.watch(leftController.pitch)
diagnostics.watch(leftController.yaw) | StarcoderdataPython |
11248150 | <reponame>jsabak/antycaptcha-solutions-locust
from locust import seq_task, TaskSequence
class Exercise1Steps(TaskSequence):
def __init__(self, parent):
super().__init__(parent)
self.seed = '553a09d3-baf5-45f6-ad89-83324bac051a'
@seq_task(1)
def exercises_exercise1(self):
response = self.client.get(
url=f'/exercises/exercise1?seed={self.seed}',
name='E1.01 Main',
# headers={'Host': 'localhost:5000', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36',
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
# 'asdasdas': '', 'Referer': 'http://localhost:5000/?seed=553a09d3-baf5-45f6-ad89-83324bac051a',
# 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.9,pl;q=0.8',
# 'Cookie': 'Pycharm-3df98f63=98db09b2-b36b-4444-8454-ebfae793727a; grafana_session=2a4ddc7ab47e815ccdff5873d7c7d452; session=eyJzZWVkIjoiNTUzYTA5ZDM<KEY>'},
timeout=30, allow_redirects=False)
@seq_task(3)
def exercises_exercise1_button1a(self):
response = self.client.post(url='/exercises/exercise1/button1',
name='E1.02 Button1',
# headers={'Origin': 'http://localhost:5000', 'Accept-Encoding': 'gzip, deflate, br',
# 'Host': 'localhost:5000', 'asdasdas': '',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36',
# 'Content-Type': 'application/json; charset=UTF-8',
# 'Accept-Language': 'en-US,en;q=0.9,pl;q=0.8',
# 'Accept': 'application/json, text/*',
# 'Referer': 'http://localhost:5000/exercises/exercise1?seed=553a09d3-baf5-45f6-ad89-83324bac051a',
# 'Cookie': 'Pycharm-3df98f63=98db09b2-b36b-4444-8454-ebfae793727a; grafana_session=2a4ddc7ab47e815ccdff5873d7c7d452; session=eyJzZWVkIjoiNTUzYTA5ZDMtYmFmNS00NWY2LWFkODktODMzMjRiYWMwNTFhIiwic29sdXRpb24iOiJiMWIxYjEiLCJ0cmFpbCI6IiJ9.XIKcgw.gjQGmB7vDSa8l2nMo8jFgAkyOek',
# 'Connection': 'keep-alive', 'Content-Length': '1'}, timeout=30,
allow_redirects=False, data=b'0')
@seq_task(4)
def exercises_exercise1_button1b(self):
response = self.client.post(url='/exercises/exercise1/button1',
name='E1.03 Button1',
# headers={'Origin': 'http://localhost:5000', 'Accept-Encoding': 'gzip, deflate, br',
# 'Host': 'localhost:5000', 'asdasdas': '',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36',
# 'Content-Type': 'application/json; charset=UTF-8',
# 'Accept-Language': 'en-US,en;q=0.9,pl;q=0.8',
# 'Accept': 'application/json, text/*',
# 'Referer': 'http://localhost:5000/exercises/exercise1?seed=553a09d3-baf5-45f6-ad89-83324bac051a',
# 'Cookie': 'Pycharm-3df98f63=98db09b2-b36b-4444-8454-ebfae793727a; grafana_session=2a4ddc7ab47e815ccdff5873d7c7d452; session=<KEY>',
# 'Connection': 'keep-alive', 'Content-Length': '1'}, timeout=30,
allow_redirects=False, data=b'0')
@seq_task(5)
def exercises_exercise1_button1c(self):
response = self.client.post(url='/exercises/exercise1/button1',
name='E1.04 Button1',
# headers={'Origin': 'http://localhost:5000', 'Accept-Encoding': 'gzip, deflate, br',
# 'Host': 'localhost:5000', 'asdasdas': '',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36',
# 'Content-Type': 'application/json; charset=UTF-8',
# 'Accept-Language': 'en-US,en;q=0.9,pl;q=0.8',
# 'Accept': 'application/json, text/*',
# 'Referer': 'http://localhost:5000/exercises/exercise1?seed=553a09d3-baf5-45f6-ad89-83324bac051a',
# 'Cookie': 'Pycharm-3df98f63=98db09b2-b36b-4444-8454-ebfae793727a; grafana_session=2a4ddc7ab47e815ccdff5873d7c7d452; session=.eJyrVipOTU1RslIyNTVONLBMMdZNSkwz1TUxTTPTTUyxsNS1MDY2MklKTDYwNUxU0lEqzs8pLcnMzwPqSDIEQaBYSVFiZg5UQKkWALW7Fxw.XIKciw.J4U2SMP8hOorIW24a70iP3lZzEo',
# 'Connection': 'keep-alive', 'Content-Length': '1'}, timeout=30,
allow_redirects=False, data=b'0')
@seq_task(6)
def exercises_exercise1_solution(self):
response = self.client.post(url='/exercises/exercise1/solution',
name='E1.05 Solution',
# headers={'Origin': 'http://localhost:5000', 'Accept-Encoding': 'gzip, deflate, br',
# 'Host': 'localhost:5000', 'asdasdas': '',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36',
# 'Content-Type': 'application/json; charset=UTF-8',
# 'Accept-Language': 'en-US,en;q=0.9,pl;q=0.8',
# 'Accept': 'application/json, text/*',
# 'Referer': 'http://localhost:5000/exercises/exercise1?seed=553a09d3-baf5-45f6-ad89-83324bac051a',
# 'Cookie': 'Pycharm-3df98f63=98db09b2-b36b-4444-8454-ebfae793727a; grafana_session=2a4ddc7ab47e815ccdff5873d7c7d452; session=.eJyrVipOTU1RslIyNTVONLBMMdZNSkwz1TUxTTPTTUyxsNS1MDY2MklKTDYwNUxU0lEqzs8pLcnMzwPqSDIEQaBYSVFiZg5CoBYA5NAXrw.XIKcjA.xqrERxswWtopRCHPruDUrXQIpp8',
# 'Connection': 'keep-alive', 'Content-Length': '1'}, timeout=30,
allow_redirects=False, data=b'0')
assert 'OK. Good answer' in response.text
@seq_task(100)
def stop(self):
self.interrupt()
| StarcoderdataPython |
3230837 | <reponame>siliconcupcake/aioquic
import asyncio
import os
from functools import partial
from typing import Callable, Dict, Optional, Text, Union, cast
from aioquic.buffer import Buffer
from aioquic.quic.configuration import QuicConfiguration
from aioquic.quic.connection import NetworkAddress, QuicConnection
from aioquic.quic.packet import (
PACKET_TYPE_INITIAL,
encode_quic_retry,
encode_quic_version_negotiation,
pull_quic_header,
)
from aioquic.quic.retry import QuicRetryTokenHandler
from aioquic.tls import SessionTicketFetcher, SessionTicketHandler
from .protocol import QuicConnectionProtocol, QuicStreamHandler
__all__ = ["serve"]
class QuicServer(asyncio.DatagramProtocol):
def __init__(
self,
*,
configuration: QuicConfiguration,
create_protocol: Callable = QuicConnectionProtocol,
session_ticket_fetcher: Optional[SessionTicketFetcher] = None,
session_ticket_handler: Optional[SessionTicketHandler] = None,
stateless_retry: bool = False,
stream_handler: Optional[QuicStreamHandler] = None,
) -> None:
self._configuration = configuration
self._create_protocol = create_protocol
self._loop = asyncio.get_event_loop()
self._protocols: Dict[bytes, QuicConnectionProtocol] = {}
self._session_ticket_fetcher = session_ticket_fetcher
self._session_ticket_handler = session_ticket_handler
self._transport: Optional[asyncio.DatagramTransport] = None
self._stream_handler = stream_handler
if stateless_retry:
self._retry = QuicRetryTokenHandler()
else:
self._retry = None
def close(self):
for protocol in set(self._protocols.values()):
protocol.close()
self._protocols.clear()
self._transport.close()
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self._transport = cast(asyncio.DatagramTransport, transport)
def datagram_received(self, data: Union[bytes, Text], addr: NetworkAddress) -> None:
data = cast(bytes, data)
buf = Buffer(data=data)
try:
header = pull_quic_header(
buf, host_cid_length=self._configuration.connection_id_length
)
except ValueError:
return
# version negotiation
if (
header.version is not None
and header.version not in self._configuration.supported_versions
):
self._transport.sendto(
encode_quic_version_negotiation(
source_cid=header.destination_cid,
destination_cid=header.source_cid,
supported_versions=self._configuration.supported_versions,
),
addr,
)
return
protocol = self._protocols.get(header.destination_cid, None)
original_connection_id: Optional[bytes] = None
if (
protocol is None
and len(data) >= 1200
and header.packet_type == PACKET_TYPE_INITIAL
):
# stateless retry
if self._retry is not None:
if not header.token:
# create a retry token
self._transport.sendto(
encode_quic_retry(
version=header.version,
source_cid=os.urandom(8),
destination_cid=header.source_cid,
original_destination_cid=header.destination_cid,
retry_token=self._retry.create_token(
addr, header.destination_cid
),
),
addr,
)
return
else:
# validate retry token
try:
original_connection_id = self._retry.validate_token(
addr, header.token
)
except ValueError:
return
# create new connection
connection = QuicConnection(
configuration=self._configuration,
logger_connection_id=original_connection_id or header.destination_cid,
original_connection_id=original_connection_id,
session_ticket_fetcher=self._session_ticket_fetcher,
session_ticket_handler=self._session_ticket_handler,
)
protocol = self._create_protocol(
connection, stream_handler=self._stream_handler
)
protocol.connection_made(self._transport)
# register callbacks
protocol._connection_id_issued_handler = partial(
self._connection_id_issued, protocol=protocol
)
protocol._connection_id_retired_handler = partial(
self._connection_id_retired, protocol=protocol
)
protocol._connection_terminated_handler = partial(
self._connection_terminated, protocol=protocol
)
self._protocols[header.destination_cid] = protocol
self._protocols[connection.host_cid] = protocol
if protocol is not None:
protocol.datagram_received(data, addr)
def _connection_id_issued(self, cid: bytes, protocol: QuicConnectionProtocol):
self._protocols[cid] = protocol
def _connection_id_retired(
self, cid: bytes, protocol: QuicConnectionProtocol
) -> None:
assert self._protocols[cid] == protocol
del self._protocols[cid]
def _connection_terminated(self, protocol: QuicConnectionProtocol):
for cid, proto in list(self._protocols.items()):
if proto == protocol:
del self._protocols[cid]
async def serve(
host: str,
port: int,
*,
configuration: QuicConfiguration,
create_protocol: Callable = QuicConnectionProtocol,
session_ticket_fetcher: Optional[SessionTicketFetcher] = None,
session_ticket_handler: Optional[SessionTicketHandler] = None,
stateless_retry: bool = False,
stream_handler: QuicStreamHandler = None,
) -> QuicServer:
"""
Start a QUIC server at the given `host` and `port`.
:func:`serve` requires a :class:`~aioquic.quic.configuration.QuicConfiguration`
containing TLS certificate and private key as the ``configuration`` argument.
:func:`serve` also accepts the following optional arguments:
* ``create_protocol`` allows customizing the :class:`~asyncio.Protocol` that
manages the connection. It should be a callable or class accepting the same
arguments as :class:`~aioquic.asyncio.QuicConnectionProtocol` and returning
an instance of :class:`~aioquic.asyncio.QuicConnectionProtocol` or a subclass.
* ``session_ticket_fetcher`` is a callback which is invoked by the TLS
engine when a session ticket is presented by the peer. It should return
the session ticket with the specified ID or `None` if it is not found.
* ``session_ticket_handler`` is a callback which is invoked by the TLS
engine when a new session ticket is issued. It should store the session
ticket for future lookup.
* ``stateless_retry`` specifies whether a stateless retry should be
performed prior to handling new connections.
* ``stream_handler`` is a callback which is invoked whenever a stream is
created. It must accept two arguments: a :class:`asyncio.StreamReader`
and a :class:`asyncio.StreamWriter`.
"""
loop = asyncio.get_event_loop()
_, protocol = await loop.create_datagram_endpoint(
lambda: QuicServer(
configuration=configuration,
create_protocol=create_protocol,
session_ticket_fetcher=session_ticket_fetcher,
session_ticket_handler=session_ticket_handler,
stateless_retry=stateless_retry,
stream_handler=stream_handler,
),
local_addr=(host, port),
)
return cast(QuicServer, protocol)
| StarcoderdataPython |
310744 | #!/usr/bin/env python
from __future__ import absolute_import, print_function, unicode_literals
import os
import subprocess
import sys
def warning(*objs):
print("WARNING: ", *objs, file=sys.stderr)
def fail(message):
sys.exit("Error: {message}".format(message=message))
def has_module(module_name):
try:
import imp
imp.find_module(module_name)
del imp
return True
except ImportError:
return False
def which(exe=None, throw=True):
"""Return path of bin. Python clone of /usr/bin/which.
from salt.util - https://www.github.com/saltstack/salt - license apache
:param exe: Application to search PATHs for.
:type exe: string
:param throw: Raise ``Exception`` if not found in paths
:type throw: bool
:rtype: string
"""
if exe:
if os.access(exe, os.X_OK):
return exe
# default path based on busybox's default
default_path = '/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin'
search_path = os.environ.get('PATH', default_path)
for path in search_path.split(os.pathsep):
full_path = os.path.join(path, exe)
if os.access(full_path, os.X_OK):
return full_path
message = (
'{0!r} could not be found in the following search '
'path: {1!r}'.format(
exe, search_path
)
)
if throw:
raise Exception(message)
else:
print(message)
return None
project_dir = os.path.dirname(os.path.realpath(__file__))
env_dir = os.path.join(project_dir, '.venv')
pip_bin = os.path.join(env_dir, 'bin', 'pip')
python_bin = os.path.join(env_dir, 'bin', 'python')
virtualenv_bin = which('virtualenv', throw=False)
virtualenv_exists = os.path.exists(env_dir) and os.path.isfile(python_bin)
entr_bin = which('entr', throw=False)
nvim_bin = which('nvim', throw=False)
dev_reqs_fpath = os.path.join(project_dir, 'requirements', 'dev.txt')
test_reqs_fpath = os.path.join(project_dir, 'requirements', 'test.txt')
test27_reqs_fpath = os.path.join(project_dir, 'requirements', 'test-py27.txt')
sphinx_reqs_fpath = os.path.join(project_dir, 'requirements', 'doc.txt')
if not has_module('virtualenv'):
message = (
'Virtualenv is required for this bootstrap to run.\n'
'Install virtualenv via:\n'
'\t$ [sudo] pip install virtualenv'
)
fail(message)
if not has_module('pip'):
message = (
'pip is required for this bootstrap to run.\n'
'Find instructions on how to install at: %s' %
'http://pip.readthedocs.io/en/latest/installing.html'
)
fail(message)
def main():
if not virtualenv_exists:
virtualenv_bin = which('virtualenv', throw=False)
subprocess.check_call(
[virtualenv_bin, '-p%s' % 'python3.5', env_dir]
)
subprocess.check_call(
[pip_bin, 'install', '-e', project_dir]
)
if not entr_bin:
message = (
'entr(1) is missing.\n'
'If you want to enable rebuilding documentation and '
're-running commands when a file is saved.\n'
'See https://bitbucket.org/eradman/entr/'
)
print(message)
# neovim requires this to be installed in the virtualenv 05/13/2016
if nvim_bin:
try:
import neovim # noqa
except ImportError:
subprocess.check_call(
[pip_bin, 'install', 'neovim']
)
try:
import pytest # noqa
except ImportError:
subprocess.check_call(
[pip_bin, 'install', '-r', test_reqs_fpath]
)
if not os.path.isfile(os.path.join(env_dir, 'bin', 'flake8')):
subprocess.check_call(
[pip_bin, 'install', '-r', dev_reqs_fpath]
)
if not os.path.isfile(os.path.join(env_dir, 'bin', 'sphinx-quickstart')):
subprocess.check_call(
[pip_bin, 'install', '-r', sphinx_reqs_fpath]
)
if os.path.exists(os.path.join(env_dir, 'build')):
os.removedirs(os.path.join(env_dir, 'build'))
if __name__ == '__main__':
main()
| StarcoderdataPython |
8163084 | """
This file contains shared functions for the project
@author: <NAME>
"""
import os, csv
# Parse input CSV file to get list of parameters
def parse_file(infile):
param_list = None
# check if the infile exists
if not os.path.exists(infile):
print("Parameter file is not exist: ", infile)
else:
print("parsing parameter file: ", infile)
with open(infile, newline='') as f:
reader = csv.reader(f, delimiter=',')
param_list = list(reader)[1]
print(param_list)
return param_list
| StarcoderdataPython |
8199455 | <reponame>vsoch/wordfish<gh_stars>0
from celery.decorators import periodic_task
from celery import shared_task, Celery
from celery.schedules import crontab
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
from django.utils import timezone
from notifications.signals import notify
from docfish.settings import DOMAIN_NAME
from docfish.apps.main.models import *
from som.wordfish.validators import (
validate_folder,
validate_compressed
)
from som.wordfish.structures import (
structure_compressed
)
from docfish.apps.users.utils import get_user
from docfish.apps.main.utils import get_collection
from docfish.apps.storage.utils import (
extract_tmp,
import_structures
)
from docfish.settings import MEDIA_ROOT
from datetime import datetime
from itertools import chain
import os
import tempfile
import shutil
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'docfish.settings')
app = Celery('docfish')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@shared_task
def test_worker(printme):
'''test worker is a dummy function to print some output to the console.
You should be able to see it via docker-compose logs worker
'''
print(printme)
@shared_task
def validate_dataset_upload(compressed_file,remove_folder=False):
'''validate_dataset_upload will take an compressed file object, decompress
it, and validate it for correctness. If it's valid, it will return true,
and another job can actually upload the dataset (so there is no
delay on the front end)
:param compressed_file: a string path to a file to test.
'''
tmpdir = os.path.dirname(compressed_file)
valid = validate_compressed(compressed_file)
result = {'valid':valid,
'file':compressed_file}
if remove_folder == True:
shutil.rmtree(tmpdir)
return result
@shared_task
def dataset_upload(compressed_file,cid):
'''dataset_upload will take an compressed file object, decompress
it, (again) validate it for correctness, and then upload the dataset.
'''
tmpdir = os.path.dirname(compressed_file)
collection = get_collection(cid)
if validate_compressed(compressed_file) == True:
# Now we add entities to the collection
structures = structure_compressed(compressed_file,
testing_base=tmpdir,
clean_up=False)
collection = import_structures(structures,collection)
shutil.rmtree(tmpdir)
@shared_task
def validate_memory_upload(memory_file,collection):
'''validate_upload will first validate an uploaded (compressed) file
for validity. If it's valid, it fires off a job to extract data
to a collection. If not valid, it returns the error message to
the user.
:param memory_file: the in memory uploaded file
:param collection: the collection to add the uploaded dataset to
'''
data = {'is_valid': False, 'name': 'Invalid', 'url': ""}
tmpdir = "%s/tmp" %MEDIA_ROOT
if not os.path.exists(tmpdir):
os.mkdir(tmpdir)
compressed_file = extract_tmp(memory_file,base_dir=tmpdir)
result = validate_dataset_upload(compressed_file)
if result['valid'] == True:
dataset_upload.apply_async(kwargs={"compressed_file": result['file'],
"cid":collection.id })
name = os.path.basename(result['file'])
data = {'is_valid': True, 'name': name, 'url': collection.get_absolute_url()}
else:
tmpdir = os.path.dirname(result['file'])
shutil.rmtree(tmpdir)
return data
| StarcoderdataPython |
5111947 | <filename>lintcode/0157-unique-characters.py<gh_stars>1-10
# Description
# 中文
# English
# Implement an algorithm to determine if a string has all unique characters.
# Have you met this question in a real interview?
# Example
# Example 1:
# Input: "abc_____"
# Output: false
# Example 2:
# Input: "abc"
# Output: true
# Challenge
# What if you can not use additional data structures?
class Solution:
"""
@param: str: A string
@return: a boolean
"""
def isUnique(self, str):
# write your code here
# counts = set()
# for char in str:
# if char not in counts:
# counts.add(char)
# else:
# return False
# return True
# Answer using ASCII Code arrary
ch = range(129)
ch = [0 for i in range(129)]
for char in str:
if ch[ord(char)]==0:
ch[ord(char)] = 1
else:
return False
return True | StarcoderdataPython |
6659094 | from django.conf.urls import url, include
from django.views.generic import TemplateView, DetailView
from django.contrib.auth.decorators import login_required
from securedpi_events import views
urlpatterns = [
url(r'^(?P<pk>\d+)/$',
login_required(views.EventView.as_view()),
name='events'),
url(r'^(?P<pk>\d+)/delete-old-events/$',
login_required(views.delete_old_events),
name='delete_old_events'),
]
| StarcoderdataPython |
1979030 | <reponame>JiahuaWU/fastai
import pytest, fastai
from fastai.gen_doc.doctest import this_tests
def test_has_version():
this_tests('na')
assert fastai.__version__
| StarcoderdataPython |
8029007 | from django.db import models
from .component import Component
from .test_status import TestStatus
from .contract_status import ContractStatus
from .lock_status import LockStatus
from .contract_type import ContractType
from .custom_field import CustomField
from django.contrib.contenttypes.fields import GenericRelation
from .model_mixins import SoftDeleteModel
class Contract(SoftDeleteModel):
name = models.CharField(max_length=255)
custome_fields = GenericRelation(CustomField)
type = models.ForeignKey(ContractType, on_delete=models.CASCADE)
lock_status = models.ForeignKey(LockStatus, on_delete=models.CASCADE)
contract_status = models.ForeignKey(
ContractStatus, on_delete=models.CASCADE)
tests_status = models.ForeignKey(TestStatus, on_delete=models.CASCADE)
components = models.ManyToManyField(
Component, blank=True, related_name='+')
def __str__(self):
return self.name
| StarcoderdataPython |
1846894 | <filename>Python/100Excersises/.history/51 to 75/74/74_20201119130141.py<gh_stars>0
import pandas as p
d1=p | StarcoderdataPython |
389691 | import tensorflow as tf
def softmax_nd(target, axis, name=None):
"""
Multi dimensional softmax,
refer to https://github.com/tensorflow/tensorflow/issues/210
compute softmax along the dimension of target
the native softmax only supports batch_size x dimension
"""
with tf.name_scope(name, 'softmax', values=[target]):
max_axis = tf.reduce_max(target, axis, keepdims=True)
target_exp = tf.exp(target - max_axis)
normalize = tf.reduce_sum(target_exp, axis, keepdims=True)
sft = target_exp / normalize
return sft
| StarcoderdataPython |
6514698 | import unittest
import pytest
from geopyspark.geotrellis.constants import LayerType, ReadMethod
from geopyspark.geotrellis import Extent, GlobalLayout, LocalLayout, SourceInfo
from geopyspark.tests.base_test_class import BaseTestClass
from geopyspark.geotrellis.layer import TiledRasterLayer
class TiledRasterLayerTest(BaseTestClass):
difference = 0.000001
@pytest.fixture(autouse=True)
def tearDown(self):
yield
BaseTestClass.pysc._gateway.close()
def read(self, layout, read_method, target_crs=None, multiplex=False):
expected_tiled = self.rdd.tile_to_layout(layout, target_crs=target_crs)
expected_collected = expected_tiled.to_numpy_rdd().collect()
if multiplex:
sources = [SourceInfo(self.path, {0: 0}), SourceInfo(self.path, {0:1})]
actual_tiled = TiledRasterLayer.read(sources,
layout_type=layout,
target_crs=target_crs)
else:
actual_tiled = TiledRasterLayer.read([self.path],
layout_type=layout,
target_crs=target_crs)
actual_collected = actual_tiled.to_numpy_rdd().collect()
self.assertEqual(len(expected_collected), len(actual_collected))
expected_collected.sort(key=lambda tup: (tup[0].col, tup[0].row))
actual_collected.sort(key=lambda tup: (tup[0].col, tup[0].row))
if multiplex:
bands = (0, 1)
else:
bands = [0]
for expected, actual in zip(expected_collected, actual_collected):
for x in bands:
self.assertEqual(expected[0], actual[0])
self.assertTrue(expected[1].cells.shape[1:] == actual[1].cells[x,:,:].shape)
diff = abs(expected[1].cells - actual[1].cells[x,:,:])
off_values_count = (diff > self.difference).sum()
self.assertTrue(off_values_count / expected[1].cells.size <= 0.025)
# Tests using LocalLayout
def test_read_no_reproject_local_geotrellis(self):
self.read(LocalLayout(256, 256), ReadMethod.GEOTRELLIS)
def test_read_ordered_no_reproject_local_geotrellis(self):
self.read(LocalLayout(256, 256), ReadMethod.GEOTRELLIS, multiplex=True)
def test_read_no_reproject_local_gdal(self):
self.read(LocalLayout(256, 256), ReadMethod.GDAL)
def test_read_with_reproject_local_geotrellis(self):
self.read(LocalLayout(128, 256), ReadMethod.GEOTRELLIS, target_crs=3857)
def test_ordered_read_with_reproject_local_geotrellis(self):
self.read(LocalLayout(128, 256), ReadMethod.GEOTRELLIS, target_crs=3857, multiplex=True)
def test_read_with_reproject_local_gdal(self):
self.read(LocalLayout(128, 256), ReadMethod.GDAL, target_crs=3857)
# Tests with GlobalLayout
def test_read_no_reproject_global_geotrellis(self):
self.read(GlobalLayout(tile_size=16, zoom=4), ReadMethod.GEOTRELLIS)
def test_ordered_read_no_reproject_global_geotrellis(self):
self.read(GlobalLayout(tile_size=16, zoom=4), ReadMethod.GEOTRELLIS, multiplex=True)
def test_read_no_reproject_global_gdal(self):
self.read(GlobalLayout(tile_size=128, zoom=4), ReadMethod.GDAL)
def test_read_with_reproject_global_geotrellis(self):
self.read(GlobalLayout(tile_size=128, zoom=4), ReadMethod.GEOTRELLIS, target_crs=3857)
def test_read_with_reproject_global_gdal(self):
self.read(GlobalLayout(tile_size=128, zoom=4), ReadMethod.GDAL, target_crs=3857)
def test_ordered_read_with_reproject_global_geotrellis(self):
self.read(GlobalLayout(tile_size=128, zoom=4), ReadMethod.GEOTRELLIS, target_crs=3857, multiplex=True)
def test_read_with_reproject_global_gdal(self):
self.read(GlobalLayout(tile_size=128, zoom=4), ReadMethod.GDAL, target_crs=3857)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
323413 | # Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import sahara.service.validations.edp.base as b
JOB_EXEC_SCHEMA = {
"type": "object",
"properties": {
"input_id": {
"type": "string",
"format": "uuid",
},
"output_id": {
"type": "string",
"format": "uuid",
},
"cluster_id": {
"type": "string",
"format": "uuid",
},
"interface": {
"type": "simple_config",
},
"job_configs": b.job_configs,
"is_public": {
"type": ["boolean", "null"],
},
"is_protected": {
"type": ["boolean", "null"],
}
},
"additionalProperties": False,
"required": [
"cluster_id"
]
}
JOB_EXEC_SCHEMA_V2 = copy.deepcopy(JOB_EXEC_SCHEMA)
JOB_EXEC_SCHEMA_V2['properties'].update({
"job_template_id": {
"type": "string",
"format": "uuid",
}})
JOB_EXEC_SCHEMA_V2['required'].append('job_template_id')
JOB_EXEC_UPDATE_SCHEMA = {
"type": "object",
"properties": {
"is_public": {
"type": ["boolean", "null"],
},
"is_protected": {
"type": ["boolean", "null"],
},
"info": {
"type": "simple_config",
"properties": {
"status": {
"enum": ["suspend", "cancel"]
}
},
"additionalProperties": False
}
},
"additionalProperties": False,
"required": []
}
| StarcoderdataPython |
9758918 | <filename>code/finetune/loadTwitterData.py
import glob
import os
import pathlib
import pickle
from collections import defaultdict
import tensorflow as tf
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tqdm import tqdm
def get_all_images():
data_root = "/data4/zyr/projects/HierachicalFusionModel/data/image_data"
data_root = pathlib.Path(data_root)
all_image_paths = list(data_root.glob("*.jpg"))
all_image_paths = [str(path) for path in all_image_paths]
return all_image_paths
def get_dataset():
print("Start Loading the Twitter dataset...")
all_image_paths = get_all_images()
def prepocess_image(image_raw):
image = tf.convert_to_tensor(image_raw)
image = tf.cast(image, tf.float32)
image = image / 255
return image
def load_and_preprocess_image(image_file):
image_file = image_file.numpy().decode("utf-8")
keras_image = img_to_array(
load_img(image_file, target_size=(224, 224), interpolation="bilinear")
)
return prepocess_image(keras_image)
def load_and_preprocess_image_wapper(image_file):
return tf.py_function(
load_and_preprocess_image, inp=[image_file], Tout=tf.float32
)
def parse_idx(image_path):
image_path = image_path.numpy().decode("utf-8")
image_idx = os.path.splitext(os.path.split(image_path)[-1])[0]
image_idx = tf.cast(image_idx, tf.string)
return image_idx
def parse_idx_wrapper(image_path):
return tf.py_function(parse_idx, inp=[image_path], Tout=tf.string)
image_path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = image_path_ds.map(
load_and_preprocess_image_wapper,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
idx_ds = image_path_ds.map(
parse_idx_wrapper, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
ds = tf.data.Dataset.zip((image_ds, idx_ds))
return ds
# img_tensor_dict = defaultdict()
# for image_path in tqdm(all_image_paths):
# image_idx_list = list(img_tensor_dict.keys())
# image_tensor_list = [img_tensor_dict[idx] for idx in image_idx_list]
# return image_idx_list, image_tensor_list
| StarcoderdataPython |
1612767 | <filename>envinorma/parametrization/consistency.py
import math
from datetime import date
from typing import List, Optional, Set, Tuple
from envinorma.models.condition import Condition, Equal, Greater, LeafCondition, Littler, Range, extract_leaf_conditions
from envinorma.models.parameter import Parameter, ParameterType
from .exceptions import ParametrizationError
_DateRange = Tuple[Optional[date], Optional[date]]
def _extract_date_range(condition: LeafCondition) -> _DateRange:
if isinstance(condition, Range):
return (condition.left, condition.right)
if isinstance(condition, Equal):
return (condition.target, condition.target)
if isinstance(condition, Littler):
return (None, condition.target)
if isinstance(condition, Greater):
return (condition.target, None)
raise NotImplementedError(type(condition))
def _ranges_strictly_overlap(ranges: List[Tuple[float, float]]) -> bool:
sorted_ranges = sorted(ranges)
for ((x, y), (z, t)) in zip(sorted_ranges, sorted_ranges[1:]):
if x > y or z > t:
raise AssertionError
if y > z:
return True
return False
def _date_ranges_strictly_overlap(ranges: List[_DateRange]) -> bool:
timestamp_ranges = [
(float(dt_left.toordinal()) if dt_left else -math.inf, float(dt_right.toordinal()) if dt_right else math.inf)
for dt_left, dt_right in ranges
]
return _ranges_strictly_overlap(timestamp_ranges)
def _check_date_conditions_not_compatible(all_conditions: List[Condition], parameter: Parameter) -> None:
leaf_conditions = [leaf for cd in all_conditions for leaf in extract_leaf_conditions(cd, parameter)]
ranges: List[_DateRange] = []
for condition in leaf_conditions:
ranges.append(_extract_date_range(condition))
if _date_ranges_strictly_overlap(ranges):
raise ParametrizationError(
f'Date ranges overlap, they can be satisfied simultaneously, which can lead to'
f' ambiguities: {all_conditions}'
)
_Range = Tuple[float, float]
def _extract_range(condition: LeafCondition) -> _Range:
if isinstance(condition, Range):
return (condition.left, condition.right)
if isinstance(condition, Equal):
return (condition.target, condition.target)
if isinstance(condition, Littler):
return (-math.inf, condition.target)
if isinstance(condition, Greater):
return (condition.target, math.inf)
raise NotImplementedError(type(condition))
def _check_real_number_conditions_not_compatible(all_conditions: List[Condition], parameter: Parameter) -> None:
leaf_conditions = [leaf for cd in all_conditions for leaf in extract_leaf_conditions(cd, parameter)]
ranges = [_extract_range(condition) for condition in leaf_conditions]
if _ranges_strictly_overlap(ranges):
raise ParametrizationError(
f'Ranges overlap, they can be satisfied simultaneously, which can lead to' f' ambiguities: {all_conditions}'
)
def _check_discrete_conditions_not_compatible(all_conditions: List[Condition], parameter: Parameter) -> None:
leaf_conditions = [leaf for cd in all_conditions for leaf in extract_leaf_conditions(cd, parameter)]
targets: Set = set()
for condition in leaf_conditions:
if not isinstance(condition, Equal):
raise ParametrizationError(f'{parameter.id} conditions must be "=" conditions, got {condition.type}')
if condition.target in targets:
raise ParametrizationError(f'Several conditions are simultaneously satisfiable : {all_conditions}')
targets.add(condition.target)
def _check_bool_conditions_not_compatible(all_conditions: List[Condition], parameter: Parameter) -> None:
leaf_conditions = [leaf for cd in all_conditions for leaf in extract_leaf_conditions(cd, parameter)]
targets: Set[bool] = set()
for condition in leaf_conditions:
if not isinstance(condition, Equal):
raise ParametrizationError(f'bool conditions must be "=" conditions, got {condition.type}')
if condition.target in targets:
raise ParametrizationError(f'Several conditions are simultaneously satisfiable : {all_conditions}')
targets.add(condition.target)
def check_conditions_not_compatible(all_conditions: List[Condition], parameter: Parameter) -> None:
if parameter.type == ParameterType.DATE:
_check_date_conditions_not_compatible(all_conditions, parameter)
elif parameter.type == ParameterType.REAL_NUMBER:
_check_real_number_conditions_not_compatible(all_conditions, parameter)
elif parameter.type == ParameterType.REGIME:
_check_discrete_conditions_not_compatible(all_conditions, parameter)
elif parameter.type == ParameterType.STRING:
_check_discrete_conditions_not_compatible(all_conditions, parameter)
elif parameter.type == ParameterType.RUBRIQUE:
_check_discrete_conditions_not_compatible(all_conditions, parameter)
elif parameter.type == ParameterType.BOOLEAN:
_check_bool_conditions_not_compatible(all_conditions, parameter)
else:
raise NotImplementedError(parameter.type)
| StarcoderdataPython |
3202790 | <gh_stars>10-100
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x1, y1 = 0.3, 0.3
x2, y2 = 0.7, 0.7
fig = plt.figure(1, figsize=(8,3))
fig.clf()
from mpl_toolkits.axes_grid.axes_grid import AxesGrid
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
#from matplotlib.font_manager import FontProperties
def add_at(ax, t, loc=2):
fp = dict(size=10)
_at = AnchoredText(t, loc=loc, prop=fp)
ax.add_artist(_at)
return _at
grid = AxesGrid(fig, 111, (1, 4), label_mode="1", share_all=True)
grid[0].set_autoscale_on(False)
ax = grid[0]
ax.plot([x1, x2], [y1, y2], ".")
el = mpatches.Ellipse((x1, y1), 0.3, 0.4, angle=30, alpha=0.2)
ax.add_artist(el)
ax.annotate("",
xy=(x1, y1), xycoords='data',
xytext=(x2, y2), textcoords='data',
arrowprops=dict(arrowstyle="-", #linestyle="dashed",
color="0.5",
patchB=None,
shrinkB=0,
connectionstyle="arc3,rad=0.3",
),
)
add_at(ax, "connect", loc=2)
ax = grid[1]
ax.plot([x1, x2], [y1, y2], ".")
el = mpatches.Ellipse((x1, y1), 0.3, 0.4, angle=30, alpha=0.2)
ax.add_artist(el)
ax.annotate("",
xy=(x1, y1), xycoords='data',
xytext=(x2, y2), textcoords='data',
arrowprops=dict(arrowstyle="-", #linestyle="dashed",
color="0.5",
patchB=el,
shrinkB=0,
connectionstyle="arc3,rad=0.3",
),
)
add_at(ax, "clip", loc=2)
ax = grid[2]
ax.plot([x1, x2], [y1, y2], ".")
el = mpatches.Ellipse((x1, y1), 0.3, 0.4, angle=30, alpha=0.2)
ax.add_artist(el)
ax.annotate("",
xy=(x1, y1), xycoords='data',
xytext=(x2, y2), textcoords='data',
arrowprops=dict(arrowstyle="-", #linestyle="dashed",
color="0.5",
patchB=el,
shrinkB=5,
connectionstyle="arc3,rad=0.3",
),
)
add_at(ax, "shrink", loc=2)
ax = grid[3]
ax.plot([x1, x2], [y1, y2], ".")
el = mpatches.Ellipse((x1, y1), 0.3, 0.4, angle=30, alpha=0.2)
ax.add_artist(el)
ax.annotate("",
xy=(x1, y1), xycoords='data',
xytext=(x2, y2), textcoords='data',
arrowprops=dict(arrowstyle="fancy", #linestyle="dashed",
color="0.5",
patchB=el,
shrinkB=5,
connectionstyle="arc3,rad=0.3",
),
)
add_at(ax, "mutate", loc=2)
grid[0].set_xlim(0, 1)
grid[0].set_ylim(0, 1)
grid[0].axis["bottom"].toggle(ticklabels=False)
grid[0].axis["left"].toggle(ticklabels=False)
fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95)
plt.draw()
plt.show()
| StarcoderdataPython |
3578689 | <filename>beaker/services/secret.py
from typing import Union
from ..data_model import *
from ..exceptions import *
from .service_client import ServiceClient
class SecretClient(ServiceClient):
"""
Accessed via :data:`Beaker.secret <beaker.Beaker.secret>`.
"""
def get(self, secret: str, workspace: Optional[Union[str, Workspace]] = None) -> Secret:
"""
Get metadata about a secret.
:param secret: The name of the secret.
:param workspace: The Beaker workspace ID, full name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises SecretNotFound: If the secret doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises HTTPError: Any other HTTP exception that can occur.
"""
workspace: Workspace = self.resolve_workspace(workspace, read_only_ok=True)
return Secret.from_json(
self.request(
f"workspaces/{workspace.id}/secrets/{self.url_quote(secret)}",
method="GET",
exceptions_for_status={404: SecretNotFound(secret)},
).json()
)
def read(
self, secret: Union[str, Secret], workspace: Optional[Union[str, Workspace]] = None
) -> str:
"""
Read the value of a secret.
:param secret: The secret name or object.
:param workspace: The Beaker workspace ID, full name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises SecretNotFound: If the secret doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises HTTPError: Any other HTTP exception that can occur.
"""
workspace: Workspace = self.resolve_workspace(workspace, read_only_ok=True)
name = secret.name if isinstance(secret, Secret) else secret
return self.request(
f"workspaces/{workspace.id}/secrets/{self.url_quote(name)}/value",
method="GET",
).content.decode()
def write(
self, name: str, value: str, workspace: Optional[Union[str, Workspace]] = None
) -> Secret:
"""
Write a new secret or update an existing one.
:param name: The name of the secret.
:param value: The value to write to the secret.
:param workspace: The Beaker workspace ID, full name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises HTTPError: Any other HTTP exception that can occur.
"""
workspace: Workspace = self.resolve_workspace(workspace)
return Secret.from_json(
self.request(
f"workspaces/{workspace.id}/secrets/{self.url_quote(name)}/value",
method="PUT",
data=value.encode(),
).json()
)
def delete(self, secret: Union[str, Secret], workspace: Optional[Union[str, Workspace]] = None):
"""
Permanently delete a secret.
:param secret: The secret name or object.
:param workspace: The Beaker workspace ID, full name, or object. If not specified,
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` is used.
:raises SecretNotFound: If the secret doesn't exist.
:raises WorkspaceNotSet: If neither ``workspace`` nor
:data:`Beaker.config.default_workspace <beaker.Config.default_workspace>` are set.
:raises BeakerError: Any other :class:`~beaker.exceptions.BeakerError` type that can occur.
:raises HTTPError: Any other HTTP exception that can occur.
"""
workspace: Workspace = self.resolve_workspace(workspace)
name = secret.name if isinstance(secret, Secret) else secret
return self.request(
f"workspaces/{workspace.id}/secrets/{self.url_quote(name)}",
method="DELETE",
exceptions_for_status={404: SecretNotFound(secret)},
)
| StarcoderdataPython |
1645342 | <filename>mser.py
#TODO:
#1. Make a variable to store the musics' destination/folder/directory DONE
#2. Get rid of the mutagen, not needed. Became redundant DONE
import os
import sys
import time
import telepot
from telepot.loop import MessageLoop
import config
#Telegram bot key
bot = telepot.Bot(config.key)
#THis is the queue that we will be storing everything
q = []
#So the app would send the QUEUE IS EMPTY n times, just once
empty = False
# flash/directory name, where the music is
dir_name = "/media/pi/MUSICPI"
#The varibale to hold listing
a = os.listdir(dir_name)
#THis one will hold the archive, that will be outputed
listall = """Music archive."""
#List all music in the directory
for i in range(0, len(a)):
print('{}.{}'.format(i, a[i]))
listall = listall + '\n{}.{}'.format(i, a[i])
# Send message to user with their user ID
def sms(ID, str):
bot.sendMessage(ID, str)
# Reply message to user with their user ID and to the message ID
def reply(ID, msgID, str):
bot.sendMessage(ID, str, None, None, None, msgID)
# Handling all new messages, like the getUpdates()
def handle(msg):
user_id = msg['chat']['id']
msg_id = msg['message_id']
command = msg['text'].encode('utf-8')
#Output the whole directory
if command == '/help':
sms(user_id, listall)
return
if command == '/all':
for i in range(0, len(a)):
q.insert(0, i)
sms(user_id, "Successfully inserted all songs. Total queue size - {}".format(len(q)))
return
try:
numba = int(command)
except:
sms(user_id, "Not a number!")
return
if a[int(command)] == "System Volume Information":
sms(user_id, "A song, not a folder!")
return
if int(command) > (len(a) - 1):
print("Out of range!")
sms(user_id, "Out of range, please enter values from 0 to {}".format(len(a)-1))
return
#Insert from the tail
q.insert(0, command)
print('Inserted new song - {}'.format(a[int(command)]))
print('Total queue size - {}'.format(len(q)))
reply(user_id, msg_id, "Song - {} has been inserted into the queue.\n\
Your place in the queue - {}".format(
a[int(command)],
len(q)
))
#Checking for new messages
MessageLoop(bot, handle).run_as_thread()
#Now if the queue is bigger than 0, then pop the 0 element and start playing, repeat
while 1:
if (len(q)>0):
try:
empty = False
song = q.pop()
print("Successfully popped song - {}".format(a[int(song)]))
print("Total queue size - {}".format(len(q)))
os.system("mpv --no-video {}/\"{}\"".format(
dir_name, a[int(song)]
))
except:
print('ERROR')
else:
#if it's empty
if empty == False:
print('Queue is empty. Nothing to play')
empty = True
time.sleep(1)
| StarcoderdataPython |
11330503 | #
# author: <NAME> (<EMAIL>)
# last updated: December 29, 2020
#
"""These files are for implementing Student-:math:`t` process regression.
It is implemented, based on the following article:
(i) <NAME>., & <NAME>. (2006). Gaussian Process
Regression for Machine Learning. MIT Press.
(ii) <NAME>., <NAME>., & <NAME>. (2014). Student-t Processes
as Alternatives to Gaussian Processes. In Proceedings of the 17th
International Conference on Artificial Intelligence and Statistics
(pp. 877-885)."""
| StarcoderdataPython |
5174840 | import utils.decisions_constants as log
from game.ai.strategies.chinitsu import ChinitsuStrategy
from game.ai.strategies.common_open_tempai import CommonOpenTempaiStrategy
from game.ai.strategies.formal_tempai import FormalTempaiStrategy
from game.ai.strategies.honitsu import HonitsuStrategy
from game.ai.strategies.main import BaseStrategy
from game.ai.strategies.tanyao import TanyaoStrategy
from game.ai.strategies.yakuhai import YakuhaiStrategy
from mahjong.shanten import Shanten
from mahjong.tile import TilesConverter
class OpenHandHandler:
player = None
current_strategy = None
last_discard_option = None
def __init__(self, player):
self.player = player
def determine_strategy(self, tiles_136, meld_tile=None):
# for already opened hand we don't need to give up on selected strategy
if self.player.is_open_hand and self.current_strategy:
return False
old_strategy = self.current_strategy
self.current_strategy = None
# order is important, we add strategies with the highest priority first
strategies = []
if self.player.table.has_open_tanyao:
strategies.append(TanyaoStrategy(BaseStrategy.TANYAO, self.player))
strategies.append(YakuhaiStrategy(BaseStrategy.YAKUHAI, self.player))
strategies.append(HonitsuStrategy(BaseStrategy.HONITSU, self.player))
strategies.append(ChinitsuStrategy(BaseStrategy.CHINITSU, self.player))
strategies.append(FormalTempaiStrategy(BaseStrategy.FORMAL_TEMPAI, self.player))
strategies.append(CommonOpenTempaiStrategy(BaseStrategy.COMMON_OPEN_TEMPAI, self.player))
for strategy in strategies:
if strategy.should_activate_strategy(tiles_136, meld_tile=meld_tile):
self.current_strategy = strategy
break
if self.current_strategy and (not old_strategy or self.current_strategy.type != old_strategy.type):
self.player.logger.debug(
log.STRATEGY_ACTIVATE,
context=self.current_strategy,
)
if not self.current_strategy and old_strategy:
self.player.logger.debug(log.STRATEGY_DROP, context=old_strategy)
return self.current_strategy and True or False
def try_to_call_meld(self, tile_136, is_kamicha_discard):
tiles_136_previous = self.player.tiles[:]
closed_hand_136_previous = self.player.closed_hand[:]
tiles_136 = tiles_136_previous + [tile_136]
self.determine_strategy(tiles_136, meld_tile=tile_136)
if not self.current_strategy:
self.player.logger.debug(log.MELD_DEBUG, "We don't have active strategy. Abort melding.")
return None, None
closed_hand_34_previous = TilesConverter.to_34_array(closed_hand_136_previous)
previous_shanten, _ = self.player.ai.hand_builder.calculate_shanten_and_decide_hand_structure(
closed_hand_34_previous
)
if previous_shanten == Shanten.AGARI_STATE and not self.current_strategy.can_meld_into_agari():
return None, None
meld, discard_option = self.current_strategy.try_to_call_meld(tile_136, is_kamicha_discard, tiles_136)
if discard_option:
self.last_discard_option = discard_option
self.player.logger.debug(
log.MELD_CALL,
"We decided to open hand",
context=[
f"Hand: {self.player.format_hand_for_print(tile_136)}",
f"Meld: {meld.serialize()}",
f"Discard after meld: {discard_option.serialize()}",
],
)
return meld, discard_option
| StarcoderdataPython |
6472004 | from keras import backend as K
from keras.layers import ( # noqa
Input, Dense, Activation,
Reshape, Lambda, Dropout,
Bidirectional, BatchNormalization
)
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.merge import add, concatenate
from keras.layers.recurrent import GRU
from keras.models import Model
import src.config as cf
from src.loss import ctc_lambda_func
def CRNN_model():
act = 'relu'
input_data = Input(
name='the_input', shape=cf.IMAGE_SIZE + (cf.NO_CHANNELS, ),
dtype='float32'
)
inner = Conv2D(16, (3, 3), padding='same',
activation=act, kernel_initializer='he_normal',
name='conv1')(input_data)
inner = MaxPooling2D(pool_size=(2, 2), name='max1')(inner)
inner = Conv2D(32, (3, 3), padding='same',
activation=act, kernel_initializer='he_normal',
name='conv2')(inner)
inner = MaxPooling2D(pool_size=(2, 2), name='max2')(inner)
inner = Conv2D(64, (3, 3), padding='same',
activation=act, kernel_initializer='he_normal',
name='conv3')(input_data)
inner = MaxPooling2D(pool_size=(2, 2), name='max3')(inner)
inner = Conv2D(128, (3, 3), padding='same',
activation=act, kernel_initializer='he_normal',
name='conv4')(inner)
inner = MaxPooling2D(pool_size=(2, 2), name='max4')(inner)
inner = Conv2D(256, (3, 3), padding='same',
activation=act, kernel_initializer='he_normal',
name='conv5')(inner)
inner = MaxPooling2D(pool_size=(2, 2), name='max5')(inner)
conv_to_rnn_dims = (256, 572)
inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)
# cuts down input size going into RNN:
inner = Dense(cf.TIME_DENSE_SIZE, activation=act, name='dense1')(inner)
gru_1 = GRU(cf.RNN_SIZE, return_sequences=True,
kernel_initializer='he_normal', name='gru1')(inner)
gru_1b = GRU(cf.RNN_SIZE, return_sequences=True, go_backwards=True,
kernel_initializer='he_normal', name='gru1_b')(inner)
gru1_merged = add([gru_1, gru_1b])
gru_2 = GRU(cf.RNN_SIZE, return_sequences=True,
kernel_initializer='he_normal', name='gru2')(gru1_merged)
gru_2b = GRU(cf.RNN_SIZE, return_sequences=True, go_backwards=True,
kernel_initializer='he_normal', name='gru2_b')(gru1_merged)
# transforms RNN output to character activations:
inner = Dense(cf.NO_LABELS, kernel_initializer='he_normal',
name='dense2')(concatenate([gru_2, gru_2b]))
y_pred = Activation('softmax', name='softmax')(inner)
Model(inputs=input_data, outputs=y_pred).summary()
labels = Input(name='the_labels', shape=[cf.MAX_LEN_TEXT], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
# loss function
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')(
[y_pred, labels, input_length, label_length]
)
model = Model(inputs=[input_data, labels,
input_length, label_length], outputs=loss_out)
# input --> output
y_func = K.function([input_data], [y_pred])
return model, y_func
| StarcoderdataPython |
1747457 | <filename>Data/OpenNpy.py
import numpy as np
import matplotlib.pyplot as plt
img_array = np.load('car.npy')
rimg = np.reshape(img_array[0], (28, 28))
plt.imshow(rimg, cmap="gray")
plt.show()
print(type(rimg))
print(rimg)
| StarcoderdataPython |
6494161 | <gh_stars>1-10
""" Define the Client model."""
# Django imports
from django.db import models
from django.utils.translation import gettext_lazy as _
# Utils Abstract model
from hisitter.utils.abstract_users import HisitterModel
# Models
from .users import User
class Client(HisitterModel):
""" Class which create the relation between User and client,
this class is maked with the objective of control the children
of each user."""
user_client = models.OneToOneField(
User,
verbose_name=_("Client"),
related_name='user_client',
on_delete=models.CASCADE
)
def __str__(self):
return str(self.user_client)
| StarcoderdataPython |
11397367 | # -*- coding: utf-8 -*-
"""
This module implements API endpoint handlers to query the database and
return data for the connexion app.
copyright: © 2019 by <NAME>.
license: MIT, see LICENSE for more details.
"""
from flask import abort, json
from data_access import DBClient
unspecified = object()
def read_parks():
"""Handler for /parks endpoint.
Retrieves data for all parks from database.
Returns
-------
list of dicts
Raises
------
werkzeug.exceptions.NotFound
If no park records are found.
"""
with DBClient() as DB:
parks_data = DB.read_parks()
response = []
for _, data in parks_data.items():
response.append(json.loads(data))
if response:
return response
else:
abort(404, f"No park records found.")
def read_park(park_id):
"""Handler for /parks/{park_id} endpoint.
Retrieves data for the specified park from database.
Parameters
----------
park_id : str
A park ID.
Returns
-------
dict
Raises
------
werkzeug.exceptions.NotFound
If no match is found for `park_id`.
"""
with DBClient() as DB:
park_data = DB.read_park(park_id=park_id)
if park_data:
return json.loads(park_data)
else:
abort(404, f"Park ID not found.")
def read_experiences(park_id, _type=unspecified):
"""Handler for /parks/{park_id}/experiences endpoint.
Retrieves all experiences under the specified park from database.
Parameters
----------
park_id : str
A park ID.
_type : str, optional
Experience type used for filtering.
Returns
-------
list of dicts
Raises
------
werkzeug.exceptions.NotFound
If no match is found for `park_id`.
If `_type` is specified but no match is found.
"""
with DBClient() as DB:
experience_data = DB.read_experiences(park_id=park_id)
if not experience_data:
abort(404, f"Park ID not found.")
response = []
for _, data in experience_data.items():
experience = json.loads(data)
if _type is unspecified:
response.append(experience)
elif _type is not unspecified and _type.lower() == experience["type"].lower():
response.append(experience)
if response:
return response
elif _type is not unspecified:
# park_id returned results but no match for _type.
abort(404, f"Experience of type '{_type}' not found.")
def read_experience(park_id, experience_id):
"""Handler for /parks/{park_id}/experiences/{experience_id} endpoint
Retrieves one experience from database.
Parameters
----------
park_id : str
A park ID.
experience_id : str
An experience ID.
Returns
-------
dict
Raises
------
werkzeug.exceptions.NotFound
If no match is found for `park_id` and/or `experience_id`.
"""
with DBClient() as DB:
experience_data = DB.read_experience(
park_id=park_id, experience_id=experience_id
)
if experience_data:
return json.loads(experience_data)
else:
abort(404, f"Park and/or experience ID not found.")
| StarcoderdataPython |
4891619 | '''
code by <NAME>(<NAME>) @graykode
'''
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
tf.reset_default_graph()
# 3 Words Sentence
sentences = [ "i like dog", "i like cat", "i like animal",
"dog cat animal", "apple cat dog like", "dog fish milk like",
"dog cat eyes like", "i like apple", "apple i hate",
"apple i movie book music like", "cat dog hate", "cat dog like"]
word_sequence = " ".join(sentences).split() #string
word_list = " ".join(sentences).split()
word_list = list(set(word_list))#去重的list
word_dict = {w: i for i, w in enumerate(word_list)}#字典
# Word2Vec Parameter
batch_size = 20
embedding_size = 2 # To show 2 dim embedding graph
voc_size = len(word_list)
def random_batch(data, size):
random_inputs = []
random_labels = []
random_index = np.random.choice(range(len(data)), size, replace=False)
for i in random_index:
random_inputs.append(np.eye(voc_size)[data[i][0]]) # target
random_labels.append(np.eye(voc_size)[data[i][1]]) # context word
return random_inputs, random_labels
# Make skip gram of one size window
skip_grams = []
for i in range(1, len(word_sequence) - 1):
target = word_dict[word_sequence[i]]#找到对应的字典key
context = [word_dict[word_sequence[i - 1]], word_dict[word_sequence[i + 1]]]#左右两边的value
for w in context:
skip_grams.append([target, w])#将左右两边的value放到中心的key中
# Model
inputs = tf.placeholder(tf.float32, shape=[None, voc_size])#PXn的矩阵
labels = tf.placeholder(tf.float32, shape=[None, voc_size])#???
# W and WT is not Traspose relationship
W = tf.Variable(tf.random_uniform([voc_size, embedding_size], -1.0, 1.0))#nx2的矩阵
WT = tf.Variable(tf.random_uniform([embedding_size, voc_size], -1.0, 1.0))
hidden_layer = tf.matmul(inputs, W) # [batch_size, embedding_size] px2的矩阵
output_layer = tf.matmul(hidden_layer, WT) # [batch_size, voc_size] pxn的矩阵
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=output_layer, labels=labels))
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)#0.001是学习步划
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(5000):
batch_inputs, batch_labels = random_batch(skip_grams, batch_size)
_, loss = sess.run([optimizer, cost], feed_dict={inputs: batch_inputs, labels: batch_labels})
if (epoch + 1)%1000 == 0:
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
trained_embeddings = W.eval()
for i, label in enumerate(word_list):
x, y = trained_embeddings[i]
plt.scatter(x, y)
plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
plt.show()
| StarcoderdataPython |
1888566 | #!/usr/bin/env python3
# type: ignore
# Configuration file for the Sphinx documentation builder.
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute
import importlib
import os
import shutil
import sys
from datetime import datetime
PATH2DOC = os.path.abspath(".")
PATH2ROOT = os.path.abspath(os.path.join(PATH2DOC, "..", ".."))
PATH2SRC = os.path.abspath(os.path.join(PATH2ROOT, "datafold"))
try:
sys.path.insert(0, PATH2DOC)
sys.path.insert(0, PATH2ROOT)
sys.path.insert(0, PATH2SRC)
from datafold import __version__
except ImportError:
raise ImportError(f"The path to datafold is not correct \npath:" f"{PATH2ROOT}")
# For a details on Sphinx configuration see documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------------------
project = "datafold"
copyright = f"2019-{datetime.now().year}, the datafold contributors"
author = "datafold contributors"
version = __version__
release = version # no need to make it separate
today_fmt = "%d %B %Y"
# -- General configuration ---------------------------------------------------------------
needs_sphinx = "3.4.0"
# document name of the “master” document, that is, the document that contains the root
# toctree directive
master_doc = "index"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
# See build_full.sh file to execute sphinx-apidoc which fetches
# the documentation from the Python source code automatically.
"sphinx.ext.autodoc",
# generates function/method/attribute summary lists
"sphinx.ext.autosummary",
# See below for configuration of _todo extension
"sphinx.ext.todo",
# See below for configuration
"sphinx.ext.imgmath",
# Include bibtex citations
# see https://sphinxcontrib-bibtex.readthedocs.io/en/latest/quickstart.html#overview
"sphinxcontrib.bibtex",
# 'napoleon' allows NumPy and Google style documentation (no external Sphinx
# package required)
# -> https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html
# numpydoc docstring guide
# -> https://numpydoc.readthedocs.io/en/latest/format.html
"sphinx.ext.napoleon",
# Provides automatic generation of API documentation pages for Python package
# modules. https://sphinx-automodapi.readthedocs.io/en/latest/
"sphinx_automodapi.automodapi",
# Allows to use type-hinting for documenting acceptable argument types and return
# value types of functions.
# https://github.com/agronholm/sphinx-autodoc-typehints
# NOTE: sphinx_autodoc_typehints must be AFTER the "sphinx.ext.napoleon" include!!
# https://github.com/agronholm/sphinx-autodoc-typehints/issues/15#issuecomment\-298224484
"sphinx_autodoc_typehints",
# Tries to find the source files where the objects are contained. When found,
# a separate HTML page will be output for each module with a highlighted version of
# the source code.
# https://www.sphinx-doc.org/en/master/usage/extensions/viewcode.html
"sphinx.ext.viewcode",
# Generate automatic links to the documentation of objects in other projects.
# see options below
# https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html
"sphinx.ext.intersphinx",
# https://nbsphinx.readthedocs.io/en/0.8.5/
# provides a source parser for *.ipynb files
"nbsphinx",
# Include notebook files from outside the sphinx source root.
# https://github.com/vidartf/nbsphinx-link
"nbsphinx_link",
# Include panels in a grid layout or as drop-downs
# https://sphinx-panels.readthedocs.io/en/latest/
"sphinx_panels",
# Include copy buttons in code blocks
# https://sphinx-copybutton.readthedocs.io/en/latest/
"sphinx_copybutton",
]
# If the API folder is not removed, classes that were renamed can produce errors
# because the old files are still around.
remove_api_folder = True
if remove_api_folder:
try:
shutil.rmtree(os.path.join(PATH2DOC, "api"))
except FileNotFoundError:
pass # no worries the folder is already not there anymore
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# sphinx.ext._todo:
# See all options: http://www.sphinx-doc.org/en/master/ext/todo.html
# If this is True, _todo and todolist produce output, else they produce nothing. The
# default is False.
todo_include_todos = True
# If this is True, _todo emits a warning for each _TODO entry. The default is False.
todo_emit_warnings = False
# ----------------------------------------------------------------------------------------
# sphinx.ext.imgmath -- only the image version allows to include full latex functionality
# MathJax has other advantages (such as copying the equations in latex format) but does
# only support basic functionality
# https://www.sphinx-doc.org/en/master/usage/extensions/math.html#module-sphinx.ext.imgmath
imgmath_image_format = "png" # default "png", other option "svg"
imgmath_add_tooltips = (
True # add the LaTeX code as an “alt” attribute for math images --
)
# (like on Wikipedia equations)
imgmath_font_size = 12 # default=12
# command name with which to invoke LaTeX. The default is 'latex';
# you may need to set this to a full path if latex is not in the executable search path
imgmath_latex = "latex"
imgmath_latex_args = [] # TODO raise error if not found?
imgmath_latex_preamble = r"\usepackage{amsmath,amstext}"
# ----------------------------------------------------------------------------------------
# "sphinxcontrib.bibtex"
# Because exported BibTex files include file information to PDF -- remove in the
# following snippet.
filepath_literature_file = os.path.join(".", "_static", "literature.bib")
filepath_literature_file = os.path.abspath(filepath_literature_file)
# read content
with open(filepath_literature_file, "r") as file:
content = file.read()
# leave out 'file' keys out
new_content = []
for line in content.splitlines(keepends=True):
if not line.lstrip().startswith("file") and not line.lstrip().startswith("urldate"):
new_content.append(line)
# write content back to file
with open(filepath_literature_file, "w") as file:
file.write("".join(new_content))
bibtex_bibfiles = [filepath_literature_file]
# ----------------------------------------------------------------------------------------
# napoleon (see full list of available options:
# Full config explanations here:
# https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html#configuration
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
# include private members (like _membername)
napoleon_include_private_with_doc = False
# include special members (like __membername__)
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
# use the :ivar: role for instance variables
# shows the "Attributes" section
napoleon_use_ivar = True
# True -> :param: role for each function parameter.
# False -> use a single :parameters: role for all the parameters.
napoleon_use_param = True
napoleon_use_keyword = True
# True to use the :rtype: role for the return type. False to output the return type inline
# with the description.
napoleon_use_rtype = True
# ----------------------------------------------------------------------------------------
# sphinx_automodapi.automodapi (see full list of available options:
# Full config explanations here:
# https://sphinx-automodapi.readthedocs.io/en/latest/
# Do not include inherited members by default
automodsumm_inherited_members = False
# ----------------------------------------------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html
# generate automatic links to the documentation of objects in other projects.
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"scikit-learn": ("https://scikit-learn.org/stable/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
}
# TODO: many pandas links are not resolved -- See:
# https://github.com/agronholm/sphinx-autodoc-typehints/issues/47
# in order to have not a mix between some links that work and many that don't
# pandas is unfortunately excluded for now
# a solution would be to make an own .inv file, that replaces the short links to
# deep-links (see github issue)
# "pandas": ("http://pandas.pydata.org/pandas-docs/dev", None)
# ~
# See also: https://sphobjinv.readthedocs.io/en/latest/customfile.html
# The maximum number of days to cache remote inventories.
intersphinx_cache_limit = 5 # default = 5
# The number of seconds for timeout.
intersphinx_timeout = 30
# ----------------------------------------------------------------------------------------
# nbsphinx - provides a source parser for *.ipynb files
# generate automatic links to the documentation of objects in other projects.
# https://nbsphinx.readthedocs.io/en/0.6.0/usage.html#nbsphinx-Configuration-Values
nbsphinx_allow_errors = False
try:
# allows to set expensive tutorial execution with environment variable
# the environment variable should be set if publishing the pages
nbsphinx_execute = str(os.environ["DATAFOLD_NBSPHINX_EXECUTE"])
print(nbsphinx_execute)
assert nbsphinx_execute in ["auto", "always", "never"]
print(
f"INFO: found valid DATAFOLD_NBSPHINX_EXECUTE={nbsphinx_execute} environment "
f"variable."
)
except KeyError:
# default
print(
"INFO: no environment variable DATFOLD_NBSPHINX_EXECUTE. Defaulting to not "
"execute tutorial files."
)
nbsphinx_execute = "never"
nbsphinx_execute_arguments = [
"--InlineBackend.figure_formats={'svg', 'pdf'}",
"--InlineBackend.rc={'figure.dpi': 96}",
]
# add datafold and tutorials folder to PYTHONPATH to run jupyter notebooks
os.environ["PYTHONPATH"] = f"{PATH2ROOT}:{os.path.join(PATH2ROOT, 'tutorials')}"
# code parts were taken from here https://stackoverflow.com/a/67692
spec = importlib.util.spec_from_file_location(
"tutorials_script", os.path.join(PATH2DOC, "generate_tutorials_page.py")
)
tutorials_script = importlib.util.module_from_spec(spec)
spec.loader.exec_module(tutorials_script)
tutorials_script.setup_tutorials()
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["README.rst", "setup.py"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = "sphinx_rtd_theme" # alternative theme
html_theme = "pydata_sphinx_theme"
html_logo = "_static/img/datafold_logo_pre.svg"
html_theme_options = {
"icon_links": [
{
"name": "GitLab",
"url": "https://gitlab.com/datafold-dev/datafold/",
"icon": "fab fa-gitlab",
},
{
"name": "PyPI",
"url": "https://pypi.org/project/datafold/",
"icon": "fab fa-python",
},
],
"icon_links_label": "Quick Links",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| StarcoderdataPython |
11202268 | # -*- coding: utf-8 -*-
"""
Flow based cut algorithms
"""
# http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf
# http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
import itertools
from operator import itemgetter
import networkx as nx
from networkx.algorithms.connectivity.connectivity import \
_aux_digraph_node_connectivity, _aux_digraph_edge_connectivity, \
dominating_set, node_connectivity
__author__ = '\n'.join(['<NAME> <<EMAIL>>'])
__all__ = [ 'minimum_st_node_cut',
'minimum_node_cut',
'minimum_st_edge_cut',
'minimum_edge_cut',
]
def minimum_st_edge_cut(G, s, t, capacity='capacity'):
"""Returns the edges of the cut-set of a minimum (s, t)-cut.
We use the max-flow min-cut theorem, i.e., the capacity of a minimum
capacity cut is equal to the flow value of a maximum flow.
Parameters
----------
G : NetworkX graph
Edges of the graph are expected to have an attribute called
'capacity'. If this attribute is not present, the edge is
considered to have infinite capacity.
s : node
Source node for the flow.
t : node
Sink node for the flow.
capacity: string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
Returns
-------
cutset : set
Set of edges that, if removed from the graph, will disconnect it
Raises
------
NetworkXUnbounded
If the graph has a path of infinite capacity, all cuts have
infinite capacity and the function raises a NetworkXError.
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_edge('x','a', capacity = 3.0)
>>> G.add_edge('x','b', capacity = 1.0)
>>> G.add_edge('a','c', capacity = 3.0)
>>> G.add_edge('b','c', capacity = 5.0)
>>> G.add_edge('b','d', capacity = 4.0)
>>> G.add_edge('d','e', capacity = 2.0)
>>> G.add_edge('c','y', capacity = 2.0)
>>> G.add_edge('e','y', capacity = 3.0)
>>> list(nx.minimum_edge_cut(G, 'x', 'y'))
[('c', 'y'), ('x', 'b')]
>>> nx.min_cut(G, 'x', 'y')
3.0
"""
try:
flow, H = nx.ford_fulkerson_flow_and_auxiliary(G, s, t, capacity=capacity)
cutset = set()
# Compute reachable nodes from source in the residual network
reachable = set(nx.single_source_shortest_path(H,s))
# And unreachable nodes
others = set(H) - reachable # - set([s])
# Any edge in the original network linking these two partitions
# is part of the edge cutset
for u, nbrs in ((n, G[n]) for n in reachable):
cutset.update((u,v) for v in nbrs if v in others)
return cutset
except nx.NetworkXUnbounded:
# Should we raise any other exception or just let ford_fulkerson
# propagate nx.NetworkXUnbounded ?
raise nx.NetworkXUnbounded("Infinite capacity path, no minimum cut.")
def minimum_st_node_cut(G, s, t, aux_digraph=None, mapping=None):
r"""Returns a set of nodes of minimum cardinality that disconnect source
from target in G.
This function returns the set of nodes of minimum cardinality that,
if removed, would destroy all paths among source and target in G.
Parameters
----------
G : NetworkX graph
s : node
Source node.
t : node
Target node.
Returns
-------
cutset : set
Set of nodes that, if removed, would destroy all paths between
source and target in G.
Examples
--------
>>> # Platonic icosahedral graph has node connectivity 5
>>> G = nx.icosahedral_graph()
>>> len(nx.minimum_node_cut(G, 0, 6))
5
Notes
-----
This is a flow based implementation of minimum node cut. The algorithm
is based in solving a number of max-flow problems (ie local st-node
connectivity, see local_node_connectivity) to determine the capacity
of the minimum cut on an auxiliary directed network that corresponds
to the minimum node cut of G. It handles both directed and undirected
graphs.
This implementation is based on algorithm 11 in [1]_. We use the Ford
and Fulkerson algorithm to compute max flow (see ford_fulkerson).
See also
--------
node_connectivity
edge_connectivity
minimum_edge_cut
max_flow
ford_fulkerson
References
----------
.. [1] <NAME>. Connectivity Algorithms.
http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
"""
if aux_digraph is None or mapping is None:
H, mapping = _aux_digraph_node_connectivity(G)
else:
H = aux_digraph
edge_cut = minimum_st_edge_cut(H, '%sB' % mapping[s], '%sA' % mapping[t])
# Each node in the original graph maps to two nodes of the auxiliary graph
node_cut = set(H.node[node]['id'] for edge in edge_cut for node in edge)
return node_cut - set([s,t])
def minimum_node_cut(G, s=None, t=None):
r"""Returns a set of nodes of minimum cardinality that disconnects G.
If source and target nodes are provided, this function returns the
set of nodes of minimum cardinality that, if removed, would destroy
all paths among source and target in G. If not, it returns a set
of nodes of minimum cardinality that disconnects G.
Parameters
----------
G : NetworkX graph
s : node
Source node. Optional (default=None)
t : node
Target node. Optional (default=None)
Returns
-------
cutset : set
Set of nodes that, if removed, would disconnect G. If source
and target nodes are provided, the set contians the nodes that
if removed, would destroy all paths between source and target.
Examples
--------
>>> # Platonic icosahedral graph has node connectivity 5
>>> G = nx.icosahedral_graph()
>>> len(nx.minimum_node_cut(G))
5
>>> # this is the minimum over any pair of non adjacent nodes
>>> from itertools import combinations
>>> for u,v in combinations(G, 2):
... if v not in G[u]:
... assert(len(nx.minimum_node_cut(G,u,v)) == 5)
...
Notes
-----
This is a flow based implementation of minimum node cut. The algorithm
is based in solving a number of max-flow problems (ie local st-node
connectivity, see local_node_connectivity) to determine the capacity
of the minimum cut on an auxiliary directed network that corresponds
to the minimum node cut of G. It handles both directed and undirected
graphs.
This implementation is based on algorithm 11 in [1]_. We use the Ford
and Fulkerson algorithm to compute max flow (see ford_fulkerson).
See also
--------
node_connectivity
edge_connectivity
minimum_edge_cut
max_flow
ford_fulkerson
References
----------
.. [1] <NAME>. Connectivity Algorithms.
http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
"""
# Local minimum node cut
if s is not None and t is not None:
if s not in G:
raise nx.NetworkXError('node %s not in graph' % s)
if t not in G:
raise nx.NetworkXError('node %s not in graph' % t)
return minimum_st_node_cut(G, s, t)
# Global minimum node cut
# Analog to the algoritm 11 for global node connectivity in [1]
if G.is_directed():
if not nx.is_weakly_connected(G):
raise nx.NetworkXError('Input graph is not connected')
iter_func = itertools.permutations
def neighbors(v):
return itertools.chain.from_iterable([G.predecessors_iter(v),
G.successors_iter(v)])
else:
if not nx.is_connected(G):
raise nx.NetworkXError('Input graph is not connected')
iter_func = itertools.combinations
neighbors = G.neighbors_iter
# Choose a node with minimum degree
deg = G.degree()
min_deg = min(deg.values())
v = next(n for n,d in deg.items() if d == min_deg)
# Initial node cutset is all neighbors of the node with minimum degree
min_cut = set(G[v])
# Reuse the auxiliary digraph
H, mapping = _aux_digraph_node_connectivity(G)
# compute st node cuts between v and all its non-neighbors nodes in G
# and store the minimum
for w in set(G) - set(neighbors(v)) - set([v]):
this_cut = minimum_st_node_cut(G, v, w, aux_digraph=H, mapping=mapping)
if len(min_cut) >= len(this_cut):
min_cut = this_cut
# Same for non adjacent pairs of neighbors of v
for x,y in iter_func(neighbors(v),2):
if y in G[x]: continue
this_cut = minimum_st_node_cut(G, x, y, aux_digraph=H, mapping=mapping)
if len(min_cut) >= len(this_cut):
min_cut = this_cut
return min_cut
def minimum_edge_cut(G, s=None, t=None):
r"""Returns a set of edges of minimum cardinality that disconnects G.
If source and target nodes are provided, this function returns the
set of edges of minimum cardinality that, if removed, would break
all paths among source and target in G. If not, it returns a set of
edges of minimum cardinality that disconnects G.
Parameters
----------
G : NetworkX graph
s : node
Source node. Optional (default=None)
t : node
Target node. Optional (default=None)
Returns
-------
cutset : set
Set of edges that, if removed, would disconnect G. If source
and target nodes are provided, the set contians the edges that
if removed, would destroy all paths between source and target.
Examples
--------
>>> # Platonic icosahedral graph has edge connectivity 5
>>> G = nx.icosahedral_graph()
>>> len(nx.minimum_edge_cut(G))
5
>>> # this is the minimum over any pair of nodes
>>> from itertools import combinations
>>> for u,v in combinations(G, 2):
... assert(len(nx.minimum_edge_cut(G,u,v)) == 5)
...
Notes
-----
This is a flow based implementation of minimum edge cut. For
undirected graphs the algorithm works by finding a 'small' dominating
set of nodes of G (see algorithm 7 in [1]_) and computing the maximum
flow between an arbitrary node in the dominating set and the rest of
nodes in it. This is an implementation of algorithm 6 in [1]_.
For directed graphs, the algorithm does n calls to the max flow function.
This is an implementation of algorithm 8 in [1]_. We use the Ford and
Fulkerson algorithm to compute max flow (see ford_fulkerson).
See also
--------
node_connectivity
edge_connectivity
minimum_node_cut
max_flow
ford_fulkerson
References
----------
.. [1] <NAME>. Connectivity Algorithms.
http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
"""
# reuse auxiliary digraph
H = _aux_digraph_edge_connectivity(G)
# Local minimum edge cut if s and t are not None
if s is not None and t is not None:
if s not in G:
raise nx.NetworkXError('node %s not in graph' % s)
if t not in G:
raise nx.NetworkXError('node %s not in graph' % t)
return minimum_st_edge_cut(H, s, t)
# Global minimum edge cut
# Analog to the algoritm for global edge connectivity
if G.is_directed():
# Based on algorithm 8 in [1]
if not nx.is_weakly_connected(G):
raise nx.NetworkXError('Input graph is not connected')
# Initial cutset is all edges of a node with minimum degree
deg = G.degree()
min_deg = min(deg.values())
node = next(n for n,d in deg.items() if d==min_deg)
min_cut = G.edges(node)
nodes = G.nodes()
n = len(nodes)
for i in range(n):
try:
this_cut = minimum_st_edge_cut(H, nodes[i], nodes[i+1])
if len(this_cut) <= len(min_cut):
min_cut = this_cut
except IndexError: # Last node!
this_cut = minimum_st_edge_cut(H, nodes[i], nodes[0])
if len(this_cut) <= len(min_cut):
min_cut = this_cut
return min_cut
else: # undirected
# Based on algorithm 6 in [1]
if not nx.is_connected(G):
raise nx.NetworkXError('Input graph is not connected')
# Initial cutset is all edges of a node with minimum degree
deg = G.degree()
min_deg = min(deg.values())
node = next(n for n,d in deg.items() if d==min_deg)
min_cut = G.edges(node)
# A dominating set is \lambda-covering
# We need a dominating set with at least two nodes
for node in G:
D = dominating_set(G, start_with=node)
v = D.pop()
if D: break
else:
# in complete graphs the dominating set will always be of one node
# thus we return min_cut, which now contains the edges of a node
# with minimum degree
return min_cut
for w in D:
this_cut = minimum_st_edge_cut(H, v, w)
if len(this_cut) <= len(min_cut):
min_cut = this_cut
return min_cut
| StarcoderdataPython |
6449863 | <reponame>mesoscope/cellpack
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
#
# $Header: /opt/cvs/python/packages/share1.5/mglutil/math/transformation.py,v 1.45 2007/07/24 17:30:40 vareille Exp $
#
import numpy as np
from . import rotax
# from mglutil.math.VectorModule import Vector
Vector = None # sVectorModule.Vector
class Quaternion:
"""Base Quaternion class"""
def __init__(self, data=(1.0, np.array((0.0, 0.0, 0.0), "f"))):
"""data is in the form ( c, (x y, z)), where c is the
real part (float) and (x,y,z) is the pure part (Numeric
array of floats)
"""
try:
self.real = float(data[0])
self.pure = np.array((data[1][0], data[1][1], data[1][2]), "f")
except Exception:
raise ValueError("1Arguments must be (c,(x,y,z))")
if len(self.pure) != 3:
raise ValueError("2Arguments must be (c,(x,y,z))")
def __repr__(self):
"""representation of a general quaternion must be (real,pure),
since not all quaternions are rotations
"""
result = "Quaternion (%g (%g %g %g))" % (
self.real,
self.pure[0],
self.pure[1],
self.pure[2],
)
return result
def __add__(self, other):
"""Get the sum of two quaternions."""
real = self.real + other.real
pure = self.pure + other.pure
return Quaternion((real, pure))
def __mul__(self, other):
"""Multiply two quaternions together.
For unit Quaternons, this is equivalent to concatenating rotations"""
real = self.real * other.real - np.inner(self.pure, other.pure)
v1 = self.pure
v2 = other.pure
cofactor1 = v1[1] * v2[2] - v1[2] * v2[1]
cofactor2 = v1[2] * v2[0] - v1[0] * v2[2]
cofactor3 = v1[0] * v2[1] - v1[1] * v2[0]
pure = (
np.array([cofactor1, cofactor2, cofactor3])
+ self.real * other.pure
+ other.real * self.pure
)
return Quaternion((real, pure))
def conjugate(self):
"""The conjugate of quaternion (c,(x,y,z)) is (c,(-x,-y,-z))
So the product of a quaternion and its conjugate is its
magnitude
"""
pure = -self.pure
real = self.real
return Quaternion((real, pure))
def magnitude(self):
""" Quicker than multiplying conjugates"""
return self.real ** 2 + np.inner(self.pure, self.pure)
def inverse(self):
"""Get the multiplicative inverse of a quaternion"""
real = self.real / self.magnitude()
pure = -self.pure / self.magnitude()
return Quaternion((real, pure))
def normal(self):
"""Normalise a quaternion by dividing throughout by the
magnitude
"""
M = np.sqrt(self.magnitude())
self.pure = self.pure / M
self.real = self.real / M
class UnitQuaternion(Quaternion):
"""Special subclass of Quaternions with magnitude 1.0
Can be used to represent rotations, in which case real =
cos(theta/2) and pure = sin(theta/2)*(unit direction vector)
Input can also be given in the form (x,y,z,theta), where (x,y,z)
is the rotation axis (not necessarily normalized) and theta is the
rotation angle in degrees.
"""
def __init__(self, data=(1.0, np.array((0.0, 0.0, 0.0), "f"))):
"""(real,(pure x,pure y,pure z)) or (x,y,z,theta) (theta in degrees)"""
if len(data) == 2:
self.real = data[0]
try:
theta = np.arccos(self.real)
self.pure = np.array((data[1][0], data[1][1], data[1][2]), "f")
except Exception:
raise ValueError("The real part must be between -1.0 and 1.0")
elif len(data) == 4:
theta = np.pi * data[3] / 360.0
self.real = np.cos(theta)
self.pure = np.sin(theta) * np.array((data[0], data[1], data[2]), "f")
else:
raise ValueError("Args must be (x,y,z,theta) or (real,pure)")
self.normal()
def normal(self):
if self.real != 1.0:
theta = np.arccos(self.real)
vector = self.pure / np.sin(theta)
vector = vector / np.sqrt(np.inner(vector, vector))
self.pure = np.sin(theta) * vector
else:
self.pure = np.zeros(3, "f")
def __repr__(self):
"""Representation of a unit quaternion is as rx,ry,rz,theta,
so we can see what it does
"""
if self.real != 1.0:
# if it is not the identity
theta = np.arccos(self.real)
angle = 360 * theta / np.pi
xyz = self.pure / np.sin(theta)
else:
# if it is the identity
angle = 0.0
xyz = self.pure
return "Unit Quaternion %7.4f %7.4f %7.4f %7.3f" % (
xyz[0],
xyz[1],
xyz[2],
angle,
)
def __mul__(self, other):
# same as Quaternion, except return another UnitQuaternion
result = Quaternion.__mul__(self, other)
return UnitQuaternion((result.real, result.pure))
def conjugate(self):
result = Quaternion.conjugate(self)
return UnitQuaternion((result.real, result.pure))
def inverse(self):
return self.conjugate()
def getAxisAndAngleDegres(self):
"""Given a quaternion, compute axis and angle."""
theta = np.arccos(self.real)
angle = 360 * theta / np.pi
xyz = self.pure / np.sin(theta)
return xyz, angle
def getRotMatrix(self, shape=(4, 4), transpose=None):
"""return the rotation matrix as a Numeric array of shape shape."""
try:
assert shape in [(3, 3), (4, 4), (9,), (16,)]
except Exception:
raise ValueError("shape must be (3,3), (4,4), (9,) or (16,)")
# get the inverse 4x4 from rotax
mtx = rotax.rotax(
np.array([0.0, 0.0, 0.0], "f"), self.pure, 2 * np.arccos(self.real)
)
# strip if necessary
if shape in ((3, 3), (9,)):
mtx = [x[:3] for x in mtx]
mtx = mtx[:3]
if not transpose:
return np.reshape(np.transpose(mtx), shape)
else:
return np.reshape(mtx, shape)
def apply(self, points):
# apply the rotational part alone to a point or list of points
# can be homogeneous coordinates or not.
pshape = np.shape(points)
homogeneous = 1
if len(pshape) == 1:
if pshape[0] == 3:
points = np.array(np.concatenate((points, np.ones(1, "f")), 1))
homogeneous = 0
elif len(pshape) == 2:
if pshape[1] == 3:
points = np.array(
np.concatenate((np.array(points), np.ones((pshape[0], 1), "f")), 1)
)
homogeneous = 0
mtx = self.getRotMatrix((4, 4), transpose=1)
newpoints = np.dot(points, mtx)
if homogeneous:
return newpoints
else:
# strip the final zero off the coordinates
if len(pshape) == 1:
return newpoints[:3]
else:
newpoints = [x[:3] for x in newpoints]
return newpoints
class Transformation(UnitQuaternion):
"""Base class for manipulating transformations."""
def __init__(
self,
trans=np.array([0.0, 0.0, 0.0, 1.0], "f"),
quaternion=np.array([0.0, 0.0, 0.0, 0.0], "f"),
scale=np.array([1.0, 1.0, 1.0, 1.0], "f"),
):
UnitQuaternion.__init__(self, quaternion)
# make the translation homogeneous if it isn't
if len(trans) == 3:
trans = list(trans)
trans.append(1.0)
self.trans = np.array((trans[0], trans[1], trans[2], trans[3]), "f")
def __repr__(self):
"""Representation is of the form tx,ty,tz,qx,qy,qz,theta"""
# first check for identity quaternion to avoid nans
if self.real != 1:
theta = np.arccos(self.real)
angle = 360 * theta / np.pi
xyz = self.pure / np.sin(theta)
else:
angle = 0.0
xyz = self.pure
result = "Transformation: tx ty tz rx ry rz angle\n %g %g %g %g %g %g %g" % (
self.trans[0],
self.trans[1],
self.trans[2],
xyz[0],
xyz[1],
xyz[2],
angle,
)
return result
def output(self):
"""As __repr__ but without the explanation. For getting the numbers only"""
if self.real != 1:
theta = np.arccos(self.real)
angle = 360 * theta / np.pi
xyz = self.pure / np.sin(theta)
else:
angle = 0.0
xyz = self.pure
result = "%g %g %g %g %g %g %g" % (
self.trans[0],
self.trans[1],
self.trans[2],
xyz[0],
xyz[1],
xyz[2],
angle,
)
return result
def __mul__(self, other):
"""concatenate two transformations. self*other (other performed first)."""
# combined rotation is the product of the two rotations (Rself*Rother):
v1 = self.pure
v2 = other.pure
real = self.real * other.real - np.inner(v1, v2)
cofactor1 = v1[1] * v2[2] - v1[2] * v2[1]
cofactor2 = v1[2] * v2[0] - v1[0] * v2[2]
cofactor3 = v1[0] * v2[1] - v1[1] * v2[0]
pure = (
np.array([cofactor1, cofactor2, cofactor3])
+ self.real * other.pure
+ other.real * self.pure
)
# combined translation
trans = self.getQuaternion().apply(other.trans) + self.trans
trans[3] = 1.0
return Transformation(trans=trans, quaternion=(real, pure))
def reset(self):
self.real = 1.0
self.pure = np.array((0.0, 0.0, 0.0))
self.trans = np.array([0.0, 0.0, 0.0, 1.0])
def getQuaternion(self):
return UnitQuaternion((self.real, self.pure))
def getTranslation(self, shape=(4,)):
"""get the translation vector with shape = (3,) or (4,)
(default is (4,))
"""
if shape == (3,):
return self.trans[:3]
elif shape == (4,):
return self.trans
else:
raise ValueError("Shape must be (3,) or (4,)")
def getMatrix(self, shape=(4, 4), transpose=None):
mtx = self.getRotMatrix((4, 4), transpose=transpose) # from Quaternion
mtx[3] = self.getTranslation()
if transpose:
return np.reshape(mtx, shape)
else:
return np.reshape(np.transpose(mtx), shape)
def getDejaVuMatrix(self):
"""returns a 4x matrix usable as an instance matrix"""
mtx = self.getRotMatrix((4, 4), transpose=None) # from Quaternion
mtx[3] = self.getTranslation()
mtx[:3, 3] = mtx[3, :3]
mtx[3, :3] = [0, 0, 0]
return mtx
def apply(self, points):
"""Apply the entire transformation to a list of points"""
pshape = np.shape(points)
homogeneous = 1
if len(pshape) == 1:
if pshape[0] == 3:
points = np.array(np.concatenate((points, np.ones(1, "f")), 1))
homogeneous = 0
elif len(pshape) == 2:
if pshape[1] == 3:
points = np.array(
np.concatenate((np.array(points), np.ones((pshape[0], 1), "f")), 1)
)
homogeneous = 0
mtx = self.getMatrix((4, 4), transpose=1)
newpoints = np.dot(points, mtx)
if homogeneous:
return newpoints
else:
# strip the final one off the coordinates
if len(pshape) == 1:
return newpoints[:3]
else:
newpoints = [x[:3] for x in newpoints]
return newpoints
def inverse(self):
# inverse rotation is the same as for a pure rotation
real = self.real
pure = -self.pure
# inverse translation is application of inverse rotation
transl = -np.dot(self.getRotMatrix(transpose=1, shape=(3, 3)), self.trans[:3])
return Transformation(trans=transl, quaternion=(real, pure))
def getScrewAxis(self, center=None, linelength=None):
"""Get the representation of a transformation in screw
format. Returns two points on the axis and the translation
component along the axis.
Takes an optional center argument. The first point returned is
then the point on the axis nearest to the center.
The optional linelength argument defines the distance between the
two points returned. The default is the translation component.
"""
# first check that there is a rotational component. If not, abort
# if there is a rotation, self.real != 1.0
if self.real <= 0.99999999:
# need the direction to determine which way to draw the line
trans = Vector(self.trans[:3])
theta = np.arccos(self.real)
axis = self.pure / np.sin(theta)
axis = Vector(axis)
screw = trans * axis
tpar = screw * axis
tper = trans - tpar
cpt1 = tper / 2.0
length = tper.length()
height = length / (2 * np.tan(theta))
cpt2 = height * (axis.cross(tper)).normal()
point = cpt1 + cpt2
if center:
try:
center = Vector(center)
except Exception:
raise ValueError("center must be a Numeric array of shape (3,)")
m = (center - point) * axis
point = point + m * axis
if not linelength:
return point, point + axis * screw, screw
else:
return point, point + linelength * np.sign(screw) * axis, screw
else:
return None
| StarcoderdataPython |
3219220 | from basis import node, Table, Stream, Context
@node(
inputs=[
Stream("source1_transactions", schema="common.Transaction"),
Table("customer_summary", schema="stripe.Charge"),
],
outputs=[
Table("customer_sales"),
Stream(
"customer_sales_stream",
schema={"field1": "Integer", "field2": "Text NotNull"},
),
],
parameters=[],
)
def node1(ctx: Context):
cust_table = ctx.get_table("customer_summary")
cust_df = cust_table.as_dataframe()
for record in ctx.get_records("source1_transactions"):
# TODO:
cust_ltv = cust_table.execute_sql(
f"select ltv from customer_summary where customer_id = {record['customer_id']}"
)
cust_ltv = cust_df.find(record["customer_id"])["ltv"]
record["ltv"] = cust_ltv
ctx.emit_record(record)
ctx.save_progress("source1_transactions")
@node(
inputs=[
Stream("source1_transactions", schema="common.Transaction"),
Table("customer_summary", schema="stripe.Charge"),
],
outputs=[
Table("customer_sales"),
Stream(
"customer_sales_stream",
schema={"field1": "Integer", "field2": "Text NotNull"},
),
],
parameters=[],
)
def node1(ctx: Context):
cust_table = ctx.get_table("customer_summary")
cust_df = cust_table.as_dataframe()
for record in ctx.get_records("source1_transactions"):
# TODO:
cust_ltv = cust_table.execute_sql(
f"select ltv from customer_summary where customer_id = {record['customer_id']}"
)
cust_ltv = cust_df.find(record["customer_id"])["ltv"]
record["ltv"] = cust_ltv
ctx.emit_record(record)
ctx.save_progress("source1_transactions")
| StarcoderdataPython |
3290330 | # *******************************************************
# Copyright (c) VMware, Inc. 2020. All Rights Reserved.
# SPDX-License-Identifier: MIT
# *******************************************************
# *
# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""Tests for the Live Response API."""
import mox as pymox
import pytest
import copy
import io
import sys
from cbc_sdk.errors import ApiError, ObjectNotFoundError, ServerError, TimeoutError
from cbc_sdk.live_response_api import LiveResponseError, LiveResponseSessionManager
from cbc_sdk.winerror import HRESULT_FROM_WIN32, Win32Error
from cbc_sdk.rest_api import CBCloudAPI
from tests.unit.fixtures.CBCSDKMock import CBCSDKMock
from tests.unit.fixtures.live_response.mock_command import (DIRECTORY_LIST_START_RESP, DIRECTORY_LIST_END_RESP,
DELETE_FILE_START_RESP, DELETE_FILE_END_RESP,
DELETE_FILE_ERROR_RESP, PUT_FILE_START_RESP,
PUT_FILE_END_RESP, CREATE_DIRECTORY_START_RESP,
CREATE_DIRECTORY_END_RESP, WALK_RETURN_1, WALK_RETURN_2,
WALK_RETURN_3, KILL_PROC_START_RESP, KILL_PROC_END_RESP,
CREATE_PROC_START_RESP, CREATE_PROC_END_RESP,
RUN_PROC_START_RESP, RUN_PROC_END_RESP,
LIST_PROC_START_RESP, LIST_PROC_END_RESP,
REG_ENUM_START_RESP, REG_ENUM_END_RESP, REG_GET_START_RESP,
REG_GET_END_RESP, REG_SET_START_RESP, REG_SET_END_RESP,
REG_CREATE_KEY_START_RESP, REG_CREATE_KEY_END_RESP,
REG_DELETE_KEY_START_RESP, REG_DELETE_KEY_END_RESP,
REG_DELETE_START_RESP, REG_DELETE_END_RESP,
MEMDUMP_START_RESP, MEMDUMP_END_RESP,
MEMDUMP_DEL_START_RESP, MEMDUMP_DEL_END_RESP)
from tests.unit.fixtures.live_response.mock_device import DEVICE_RESPONSE, UDEVICE_RESPONSE
from tests.unit.fixtures.live_response.mock_session import (SESSION_INIT_RESP, SESSION_POLL_RESP,
SESSION_POLL_RESP_ERROR, SESSION_CLOSE_RESP,
USESSION_INIT_RESP, USESSION_POLL_RESP, USESSION_CLOSE_RESP)
@pytest.fixture(scope="function")
def cb():
"""Create CBCloudAPI singleton"""
return CBCloudAPI(url="https://example.com",
org_key="test",
token="<KEY>",
ssl_verify=False)
@pytest.fixture(scope="function")
def cbcsdk_mock(monkeypatch, cb):
"""Mocks CBC SDK for unit tests"""
return CBCSDKMock(monkeypatch, cb)
FILE_NOT_FOUND_ERR = {'status': 'error', 'result_type': 'WinHresult',
'result_code': HRESULT_FROM_WIN32(Win32Error.ERROR_FILE_NOT_FOUND)}
# ==================================== UNIT TESTS BELOW ====================================
@pytest.mark.parametrize("details, message, decoded_win32", [
(FILE_NOT_FOUND_ERR, "Win32 error code 0x-7FF8FFFE (ERROR_FILE_NOT_FOUND)", "ERROR_FILE_NOT_FOUND"),
({'status': 'error', 'result_type': 'WinHresult', 'result_code': HRESULT_FROM_WIN32(10203)},
"Win32 error code 0x-7FF8D825", None),
({'status': 'error', 'result_type': 'int', 'result_code': HRESULT_FROM_WIN32(Win32Error.ERROR_FILE_NOT_FOUND)},
"", ""),
({'status': 'warning', 'result_type': 'WinHResult',
'result_code': HRESULT_FROM_WIN32(Win32Error.ERROR_FILE_NOT_FOUND)},
"", "")
])
def test_live_response_error(details, message, decoded_win32):
"""Test the creation of a LiveResponseError."""
err = LiveResponseError(details)
assert err.message == message
assert err.decoded_win32_error == decoded_win32
def test_create_manager(cbcsdk_mock):
"""Test creating the Live Response session manager."""
sut = LiveResponseSessionManager(cbcsdk_mock.api, 35)
assert sut._timeout == 35
assert not sut._keepalive_sessions
assert sut._job_scheduler is None
def test_create_session(cbcsdk_mock):
"""Test creating a Live Response session."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
assert session.session_id == '1:2468'
assert session.device_id == 2468
assert session._cblr_manager is manager
assert session._cb is cbcsdk_mock.api
assert session.os_type == 1
def test_create_session_with_poll_error(cbcsdk_mock):
"""Test creating a Live Response session with an error in the polling."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP_ERROR)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with pytest.raises(TimeoutError) as excinfo:
manager.request_session(2468)
assert excinfo.value.uri == '/integrationServices/v3/cblr/session/1:2468'
assert excinfo.value.error_code == 404
def test_create_session_with_init_poll_timeout(cbcsdk_mock):
"""Test creating a Live Response session with a timeout in the initial polling."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
manager._init_poll_delay = 1.25
manager._init_poll_timeout = 1
with pytest.raises(TimeoutError) as excinfo:
manager.request_session(2468)
assert excinfo.value.uri == '/integrationServices/v3/cblr/session/1:2468'
assert excinfo.value.error_code == 404
def test_create_session_with_keepalive_option(cbcsdk_mock):
"""Test creating a Live Response session using the keepalive option."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api, 100000, True)
try:
with manager.request_session(2468) as session1:
assert session1.session_id == '1:2468'
assert session1.device_id == 2468
assert session1._cblr_manager is manager
assert session1._cb is cbcsdk_mock.api
assert session1.os_type == 1
with manager.request_session(2468) as session2:
assert session2 is session1
assert len(manager._sessions) == 1
manager._maintain_sessions()
assert len(manager._sessions) == 0
finally:
manager.stop_keepalive_thread()
@pytest.mark.parametrize("thrown_exception", [
(ObjectNotFoundError('/integrationServices/v3/cblr/session/1:2468'),),
(ServerError(404, 'test error'),)
])
def test_session_maintenance_sends_keepalive(cbcsdk_mock, thrown_exception):
"""Test to ensure the session maintenance sends the keepalive messages as needed."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/keepalive', {})
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/keepalive', thrown_exception)
manager = LiveResponseSessionManager(cbcsdk_mock.api, 100000, True)
try:
with manager.request_session(2468):
manager._maintain_sessions()
assert len(manager._sessions) == 1
manager._maintain_sessions()
finally:
manager.stop_keepalive_thread()
def test_list_directory(cbcsdk_mock):
"""Test the response to the 'list directory' command."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', DIRECTORY_LIST_START_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/6', DIRECTORY_LIST_END_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
files = session.list_directory('C:\\\\TEMP\\\\')
assert files[0]['filename'] == '.'
assert 'DIRECTORY' in files[0]['attributes']
assert files[1]['filename'] == '..'
assert 'DIRECTORY' in files[1]['attributes']
assert files[2]['filename'] == 'test.txt'
assert 'ARCHIVE' in files[2]['attributes']
def test_delete_file(cbcsdk_mock):
"""Test the response to the 'delete file' command."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', DELETE_FILE_START_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/3', DELETE_FILE_END_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
session.delete_file('C:\\\\TEMP\\\\foo.txt')
def test_delete_file_with_error(cbcsdk_mock):
"""Test the response to the 'delete file' command when it returns an error."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', DELETE_FILE_START_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/3', DELETE_FILE_ERROR_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
with pytest.raises(LiveResponseError) as excinfo:
session.delete_file('C:\\\\TEMP\\\\foo.txt')
assert excinfo.value.decoded_win32_error == "ERROR_FILE_NOT_FOUND"
def test_put_file(cbcsdk_mock, mox):
"""Test the response to the 'put file' command."""
def respond_to_post(url, body, **kwargs):
assert body['session_id'] == '1:2468'
assert body['name'] == 'put file'
assert body['file_id'] == 10203
assert body['object'] == 'foobar.txt'
return PUT_FILE_START_RESP
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', respond_to_post)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/6', PUT_FILE_END_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
filep = io.StringIO('This is a test')
with manager.request_session(2468) as session:
mox.StubOutWithMock(session, '_upload_file')
session._upload_file(filep).AndReturn(10203)
mox.ReplayAll()
session.put_file(filep, 'foobar.txt')
mox.VerifyAll()
def test_create_directory(cbcsdk_mock):
"""Test the response to the 'create directory' command."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', CREATE_DIRECTORY_START_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/7', CREATE_DIRECTORY_END_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
session.create_directory('C:\\\\TEMP\\\\TRASH')
def test_walk(cbcsdk_mock, mox):
"""Test the logic of the directory walking."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
mox.StubOutWithMock(session, 'list_directory')
session.list_directory('C:\\TEMP\\*').AndReturn(WALK_RETURN_1)
session.list_directory('C:\\TEMP\\FOO\\*').AndReturn(WALK_RETURN_2)
session.list_directory('C:\\TEMP\\BAR\\*').AndReturn(WALK_RETURN_3)
mox.ReplayAll()
index = 1
for entry in session.walk('C:\\TEMP\\'):
if index == 1:
assert entry[0] == 'C:\\TEMP\\'
assert len(entry[1]) == 2
assert 'FOO' in entry[1]
assert 'BAR' in entry[1]
assert len(entry[2]) == 1
assert 'test.txt' in entry[2]
elif index == 2:
assert entry[0] == 'C:\\TEMP\\FOO\\'
assert len(entry[1]) == 0
assert len(entry[2]) == 2
assert 'hoopy.doc' in entry[2]
assert 'frood.doc' in entry[2]
elif index == 3:
assert entry[0] == 'C:\\TEMP\\BAR\\'
assert len(entry[1]) == 0
assert len(entry[2]) == 1
assert 'evil.exe' in entry[2]
else:
pytest.fail("Index went out of range")
index = index + 1
mox.VerifyAll()
def test_walk_bottomup_with_error(cbcsdk_mock, mox):
"""Test the logic of the directory walking with an error in one of the directories."""
called_error_response = 0
def error_response(err):
assert err.decoded_win32_error == "ERROR_FILE_NOT_FOUND"
nonlocal called_error_response
called_error_response = called_error_response + 1
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
mox.StubOutWithMock(session, 'list_directory')
session.list_directory('C:\\TEMP\\*').AndReturn(WALK_RETURN_1)
session.list_directory('C:\\TEMP\\FOO\\*').AndRaise(LiveResponseError(FILE_NOT_FOUND_ERR))
session.list_directory('C:\\TEMP\\BAR\\*').AndReturn(WALK_RETURN_3)
mox.ReplayAll()
index = 1
for entry in session.walk('C:\\TEMP\\', False, error_response):
if index == 1:
assert entry[0] == 'C:\\TEMP\\BAR\\'
assert len(entry[1]) == 0
assert len(entry[2]) == 1
assert 'evil.exe' in entry[2]
elif index == 2:
assert entry[0] == 'C:\\TEMP\\'
assert len(entry[1]) == 2
assert 'FOO' in entry[1]
assert 'BAR' in entry[1]
assert len(entry[2]) == 1
assert 'test.txt' in entry[2]
else:
pytest.fail("Index went out of range")
index = index + 1
mox.VerifyAll()
assert called_error_response == 1
def test_kill_process(cbcsdk_mock):
"""Test the response to the 'kill' command."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', KILL_PROC_START_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/13', KILL_PROC_END_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
assert session.kill_process(601)
def test_kill_process_timeout(cbcsdk_mock):
"""Test the response to the 'kill' command when it times out."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', KILL_PROC_START_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/13', KILL_PROC_START_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api, 2)
with manager.request_session(2468) as session:
assert not session.kill_process(601)
def test_create_process(cbcsdk_mock):
"""Test the response to the 'create process' command with wait for completion."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', CREATE_PROC_START_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/52', CREATE_PROC_END_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
assert session.create_process('start_daemon', False) is None
def test_spawn_process(cbcsdk_mock):
"""Test the response to the 'create process' command without wait for completion."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', CREATE_PROC_START_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
assert session.create_process('start_daemon', False, None, None, 30, False) is None
@pytest.mark.parametrize("remotefile", [('junk.txt',), (None,)])
def test_run_process_with_output(cbcsdk_mock, mox, remotefile):
"""Test the response to the 'create process' command with output that we retrieve."""
def respond_to_post(url, body, **kwargs):
assert body['session_id'] == '1:2468'
if body['name'] == 'create process':
return RUN_PROC_START_RESP
elif body['name'] == 'delete file':
resp = copy.deepcopy(DELETE_FILE_START_RESP)
resp['object'] = body['object']
return resp
else:
pytest.fail(f"Invalid command name seen: {body['name']}")
def validate_get_file(name):
if name is None:
return False
if remotefile is not None:
return name == remotefile
return True
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', respond_to_post)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/9', RUN_PROC_END_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
mox.StubOutWithMock(session, 'get_file')
session.get_file(pymox.Func(validate_get_file)).AndReturn('I Got It')
mox.ReplayAll()
rc = session.create_process('gimme', True, remotefile)
assert rc == 'I Got It'
mox.VerifyAll()
def test_list_processes(cbcsdk_mock):
"""Test the response to the 'list processes' command."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', LIST_PROC_START_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/10', LIST_PROC_END_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
plist = session.list_processes()
assert len(plist) == 3
assert plist[0]['path'] == 'proc1'
assert plist[1]['path'] == 'server'
assert plist[2]['path'] == 'borg'
def test_registry_enum(cbcsdk_mock):
"""Test the response to the 'reg enum keys' command."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', REG_ENUM_START_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/56', REG_ENUM_END_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
rc1 = session.list_registry_keys_and_values('HKLM\\SYSTEM\\CurrentControlSet\\services\\ACPI')
assert len(rc1['sub_keys']) == 2
assert 'Parameters' in rc1['sub_keys']
assert 'Enum' in rc1['sub_keys']
value_names = ['Start', 'Type', 'ErrorControl', 'ImagePath', 'DisplayName', 'Group', 'DriverPackageId', 'Tag']
assert len(rc1['values']) == len(value_names)
for keyitem in rc1['values']:
assert keyitem['value_name'] in value_names
rc2 = session.list_registry_values('HKLM\\SYSTEM\\CurrentControlSet\\services\\ACPI')
assert len(rc2) == len(value_names)
for keyitem in rc2:
assert keyitem['value_name'] in value_names
def test_registry_get(cbcsdk_mock):
"""Test the response to the 'reg get value' command."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', REG_GET_START_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/61', REG_GET_END_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
val = session.get_registry_value('HKLM\\SYSTEM\\CurrentControlSet\\services\\ACPI\\Start')
assert val['value_data'] == 0
assert val['value_name'] == 'Start'
assert val['value_type'] == 'REG_DWORD'
@pytest.mark.parametrize("set_val,check_val,overwrite,set_type,check_type", [
(42, 42, False, None, 'REG_DWORD'),
(['a', 'b', 'c'], ['a', 'b', 'c'], True, None, 'REG_MULTI_SZ'),
([10, 20, 30], ['10', '20', '30'], False, None, 'REG_MULTI_SZ'),
('Quimby', 'Quimby', True, None, 'REG_SZ'),
(80231, 80231, False, 'REG_QWORD', 'REG_QWORD')
])
def test_registry_set(cbcsdk_mock, set_val, check_val, overwrite, set_type, check_type):
"""Test the response to the 'reg set value' command."""
def respond_to_post(url, body, **kwargs):
assert body['session_id'] == '1:2468'
assert body['name'] == 'reg set value'
assert body['object'] == 'HKLM\\SYSTEM\\CurrentControlSet\\services\\ACPI\\testvalue'
assert body['overwrite'] == overwrite
assert body['value_type'] == check_type
assert body['value_data'] == check_val
return REG_SET_START_RESP
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', respond_to_post)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/62', REG_SET_END_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
session.set_registry_value('HKLM\\SYSTEM\\CurrentControlSet\\services\\ACPI\\testvalue', set_val,
overwrite, set_type)
def test_registry_create_key(cbcsdk_mock):
"""Test the response to the 'reg create key' command."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', REG_CREATE_KEY_START_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/63', REG_CREATE_KEY_END_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
session.create_registry_key('HKLM\\SYSTEM\\CurrentControlSet\\services\\ACPI\\Nonsense')
def test_registry_delete_key(cbcsdk_mock):
"""Test the response to the 'reg delete key' command."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', REG_DELETE_KEY_START_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/64', REG_DELETE_KEY_END_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
session.delete_registry_key('HKLM\\SYSTEM\\CurrentControlSet\\services\\ACPI\\Nonsense')
def test_registry_delete(cbcsdk_mock):
"""Test the response to the 'reg delete value' command."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', REG_DELETE_START_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/65', REG_DELETE_END_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
session.delete_registry_value('HKLM\\SYSTEM\\CurrentControlSet\\services\\ACPI\\testvalue')
def test_registry_unsupported_command(cbcsdk_mock):
"""Test the response to a command that we know isn't supported on the target node."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/7777', USESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:7777', USESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/7777', UDEVICE_RESPONSE)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', USESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(7777) as session:
with pytest.raises(ApiError) as excinfo:
session.create_registry_key('HKLM\\SYSTEM\\CurrentControlSet\\services\\ACPI\\Nonsense')
assert excinfo.value.__str__().startswith("Command reg create key not supported")
def test_memdump(cbcsdk_mock):
"""Test the response to the 'memdump' command."""
generated_file_name = None
target_file_name = None
def respond_to_post(url, body, **kwargs):
assert body['session_id'] == '1:2468'
nonlocal generated_file_name, target_file_name
if body['name'] == 'memdump':
generated_file_name = body['object']
target_file_name = generated_file_name
if body['compress']:
target_file_name += '.zip'
retval = copy.deepcopy(MEMDUMP_START_RESP)
retval['object'] = generated_file_name
return retval
elif body['name'] == 'delete file':
assert body['object'] == target_file_name
retval = copy.deepcopy(MEMDUMP_DEL_START_RESP)
retval['object'] = target_file_name
return retval
else:
pytest.fail(f"Invalid command name seen: {body['name']}")
def respond_get1(url, query_parameters, default):
retval = copy.deepcopy(MEMDUMP_END_RESP)
retval['object'] = generated_file_name
return retval
def respond_get2(url, query_parameters, default):
retval = copy.deepcopy(MEMDUMP_DEL_END_RESP)
retval['object'] = target_file_name
return retval
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', respond_to_post)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/101', respond_get1)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/102', respond_get2)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
memdump = session.start_memdump()
assert memdump.lr_session is session
assert memdump.remote_filename == target_file_name
memdump.wait()
memdump.delete()
| StarcoderdataPython |
1606549 | <filename>collectionPointEvent.py
"""
Common event to send out about activity from this collection point
author: DaViD bEnGe
date: 6/9/2017
TODO: define this object better
"""
import datetime
class CollectionPointEvent():
def __init__(self, cpid, cptype, topic, extendedData={}, localOnly=False):
self._cpid=cpid
self._cptype=cptype
self._topic=topic
self._eventTime='{0}'.format(datetime.datetime.utcnow())
self._extendedData=extendedData
self._localOnly=localOnly #this flag triggers the scope of broadcast, some events we dont want to send everywhere and just want to echo to local websocket
@property
def cpid(self):
"""Get the id for the collection point that generated the Collection Point Event"""
return self._cpid
@cpid.setter
def cpid(self, value):
"""SetGet the id of the collection point that generated the Collection Point Event"""
self._cpid = value
@property
def cptype(self):
"""Get the Collection Point type"""
return self.cptype
@cptype.setter
def cptype(self, value):
"""Set the Collection Point type"""
self.cptype = value
@property
def topic(self):
"""Get the event topic"""
return self._topic
@topic.setter
def topic(self, value):
"""Set the event topic"""
self._topic = value
@property
def eventTime(self):
"""Get the event creation time"""
return self._eventTime
@property
def localOnly(self):
"""Get localOnly flag from event"""
return self._localOnly
@localOnly.setter
def localOnly(self,value):
"""Set localOnly on event"""
self._localOnly = value
@property
def extendedData(self):
"""Get extended data from event"""
return self._extendedData
@extendedData.setter
def extendedData(self,value):
"""Set extended data from event"""
self._extendedData = value
| StarcoderdataPython |
4922561 | <gh_stars>0
# --------------
import numpy as np
from collections import Counter
# Not every data format will be in csv there are other file formats also.
# This exercise will help you deal with other file formats and how to read it.
data = np.genfromtxt(path, dtype = 'str', delimiter = ',', skip_header=1)
print(data.shape)
# Number of unique matches
number_of_unique_matches = len(np.unique(data[0]))
print("No.of unique matches played:- ", number_of_unique_matches)
# How many matches were held in total we need to know so that we can analyze further statistics keeping that in mind.
total_number_of_matches = data[:, 3].shape
print("Total Number of matches played:- ", total_number_of_matches[0])
# Number of unique teams
set_1_teams = set(data[:, 3])
set_2_teams = set(data[:, 4])
set_of_unique_teams = set_1_teams.union(set_2_teams)
print("Set of unique teams:- ", set_of_unique_teams)
# this exercise deals with you getting to know that which are all those six teams that played in the tournament.
# Sum of all extras
sum_of_all_extras = data[:, 17].astype('int16').sum()
print("Sum of all extras:- ", sum_of_all_extras)
# An exercise to make you familiar with indexing and slicing up within data.
# Delivery number when a given player got out
# Get the array of all delivery numbers when a given player got out. Also mention the wicket type.
wicket_filter = (data[:, 20] == 'SR Tendulkar')
wickets_arr = data[wicket_filter]
wickets_arr[:, 11]
# Number of times Mumbai Indians won the toss
Mi_team_toss_win = data[data[:, 5] == 'Mumbai Indians']
unique_matches = len(set(Mi_team_toss_win[:, 0]))
print('Number of times Mumbai Indians won the toss: ', unique_matches)
# this exercise will help you get the statistics on one particular team
# Filter record where batsman scored six and player with most number of sixes
sixes = data[data[:, 16].astype(np.int16) == 6]
most_sixes = Counter(sixes[:, 13],).most_common(1)
print('Player with maximum sixes: ', most_sixes)
# An exercise to know who is the most aggresive player or maybe the scoring player
| StarcoderdataPython |
6685138 | <filename>sdg/inputs/InputMetaFiles.py
import os
import re
import git
import pandas as pd
from sdg.inputs import InputFiles
class InputMetaFiles(InputFiles):
"""Sources of SDG metadata that are local files."""
def __init__(self, path_pattern='', git=True, git_data_dir='data',
git_data_filemask='indicator_*.csv', metadata_mapping=None,
logging=None, column_map=None, code_map=None):
"""Constructor for InputMetaFiles.
Keyword arguments:
path_pattern -- path (glob) pattern describing where the files are
git -- whether to use git information for dates in the metadata
git_data_dir -- location of folder containing git data for dates
git_data_filemask -- a pattern for data filenames, where "*" is the
indicator id. Alternatively, each indicator can contain a metadata
field called "data_filename" with the name of the data file for
that indicator.
metadata_mapping -- a dict mapping human-readable labels to machine keys
or a path to a CSV file
"""
InputFiles.__init__(self, path_pattern, logging=logging,
column_map=column_map, code_map=code_map)
self.git = git
self.git_data_dir = git_data_dir
self.git_data_filemask = git_data_filemask
self.metadata_mapping = metadata_mapping
def execute(self, indicator_options):
InputFiles.execute(self, indicator_options)
"""Get the metadata from the files."""
self.load_metadata_mapping()
indicator_map = self.get_indicator_map()
for inid in indicator_map:
meta = self.read_meta(indicator_map[inid])
self.apply_metadata_mapping(meta)
self.fix_booleans(meta)
name = meta['indicator_name'] if 'indicator_name' in meta else None
self.add_indicator(inid, name=name, meta=meta, options=indicator_options)
def read_meta(self, filepath):
meta = self.read_meta_at_path(filepath)
self.add_language_folders(meta, filepath)
if self.git:
self.add_git_dates(meta, filepath)
return meta
def read_meta_at_path(self, filepath):
"""This must be implemented by child classes."""
raise NotImplementedError
def add_language_folders(self, meta, filepath):
meta_folder = os.path.dirname(filepath)
filename = os.path.basename(filepath)
languages = next(os.walk(meta_folder))[1]
for language in languages:
translated_filepath = os.path.join(meta_folder, language, filename)
if os.path.isfile(translated_filepath):
translated_meta = self.read_meta_at_path(translated_filepath)
self.apply_metadata_mapping(translated_meta)
self.fix_booleans(translated_meta)
meta[language] = translated_meta
def fix_booleans(self, meta):
for key in meta:
value = meta[key]
if isinstance(value, str):
if value.lower() == 'true':
meta[key] = True
elif value.lower() == 'false':
meta[key] = False
def add_git_dates(self, meta, filepath):
git_update = self.get_git_dates(meta, filepath)
for k in git_update.keys():
meta[k] = git_update[k]
# @deprecated start
# For now continue to populate the deprecated link fields:
# * national_metadata_update_url / national_metadata_update_url_text
# * national_data_update_url / national_data_update_url_text
deprecated_fields = self.get_git_updates(meta, filepath)
for k in deprecated_fields.keys():
meta[k] = deprecated_fields[k]
# @deprecated end
def get_git_dates(self, meta, filepath):
updates = {}
updates['national_metadata_updated_date'] = self.get_git_date(filepath)
if 'data_filename' in meta:
data_filename = meta['data_filename']
else:
indicator_id = self.convert_path_to_indicator_id(filepath)
data_filename = self.git_data_filemask.replace('*', indicator_id)
src_dir = os.path.dirname(os.path.dirname(self.path_pattern))
data_filepath = os.path.join(src_dir, self.git_data_dir, data_filename)
if os.path.isfile(data_filepath):
updates['national_data_updated_date'] = self.get_git_date(data_filepath)
return updates
def get_git_date(self, filepath):
"""Change into the working directory of the file (it might be a submodule)
and get the latest git history"""
folder = os.path.split(filepath)[0]
repo = git.Repo(folder, search_parent_directories=True)
# Need to translate relative to the repo root (this may be a submodule)
repo_dir = os.path.relpath(repo.working_dir, os.getcwd())
filepath = os.path.relpath(filepath, repo_dir)
commit = next(repo.iter_commits(paths=filepath, max_count=1))
return str(commit.committed_datetime.date())
# @deprecated start
def get_git_updates(self, meta, filepath):
meta_update = self.get_git_update(filepath)
updates = {
'national_metadata_update_url_text': meta_update['date'] + ': see changes on GitHub',
'national_metadata_update_url': meta_update['commit_url']
}
indicator_id = self.convert_path_to_indicator_id(filepath)
data_filename = self.git_data_filemask.replace('*', indicator_id)
if 'data_filename' in meta:
data_filename = meta['data_filename']
src_dir = os.path.dirname(os.path.dirname(self.path_pattern))
data_filepath = os.path.join(src_dir, self.git_data_dir, data_filename)
if os.path.isfile(data_filepath):
data_update = self.get_git_update(data_filepath)
updates['national_data_update_url_text'] = data_update['date'] + ': see changes on GitHub'
updates['national_data_update_url'] = data_update['commit_url']
return updates
# @deprecated end
# @deprecated start
def get_git_update(self, filepath):
"""Change into the working directory of the file (it might be a submodule)
and get the latest git history"""
folder = os.path.split(filepath)[0]
repo = git.Repo(folder, search_parent_directories=True)
# Need to translate relative to the repo root (this may be a submodule)
repo_dir = os.path.relpath(repo.working_dir, os.getcwd())
filepath = os.path.relpath(filepath, repo_dir)
commit = next(repo.iter_commits(paths=filepath, max_count=1))
git_date = str(commit.committed_datetime.date())
git_sha = commit.hexsha
# Turn the remote URL into a commit URL
remote = repo.remote().url
remote_bare = re.sub('^.*github\.com(:|\/)', '', remote).replace('.git','')
commit_url = 'https://github.com/'+remote_bare+'/commit/'+git_sha
return {
'date': git_date,
'commit_url': commit_url
}
# @deprecated end
def load_metadata_mapping(self):
mapping = None
if self.metadata_mapping is None:
mapping = {}
elif isinstance(self.metadata_mapping, dict):
mapping = self.metadata_mapping
# Otherwise assume it is a path to a file.
else:
extension = os.path.splitext(self.metadata_mapping)[1]
if extension.lower() == '.csv':
mapping = pd.read_csv(self.metadata_mapping, header=None, index_col=0, squeeze=True).to_dict()
if mapping is None:
raise Exception('Format of metadata_mapping should be a dict or a path to a CSV file.')
self.metadata_mapping = mapping
def apply_metadata_mapping(self, metadata):
for human_key in self.metadata_mapping:
machine_key = self.metadata_mapping[human_key]
if human_key in metadata and human_key != machine_key:
metadata[machine_key] = metadata[human_key]
del metadata[human_key]
| StarcoderdataPython |
13681 | <reponame>alekratz/jayk
"""Common utilities used through this codebase."""
import logging
import logging.config
class LogMixin:
"""
A logging mixin class, which provides methods for writing log messages.
"""
def __init__(self, logger_name: str):
"""
Creates the logger with the specified name.
:param logger_name: the name for this logger. When in doubt, use MyType.__name__.
"""
self.__logger = logging.getLogger(logger_name)
def critical(self, message, *args, **kwargs):
"""
Passes a critical logging message on to the internal logger.
"""
self.__logger.critical(message, *args, **kwargs)
def error(self, message, *args, **kwargs):
"""
Passes an error logging message on to the internal logger.
"""
self.__logger.error(message, *args, **kwargs)
def warning(self, message, *args, **kwargs):
"""
Passes an warning logging message on to the internal logger.
"""
self.__logger.warning(message, *args, **kwargs)
def info(self, message, *args, **kwargs):
"""
Passes an info logging message on to the internal logger.
"""
self.__logger.info(message, *args, **kwargs)
def debug(self, message, *args, **kwargs):
"""
Passes a debug logging message on to the internal logger.
"""
self.__logger.debug(message, *args, **kwargs)
def exception(self, message, *args, **kwargs):
"""
Passes an exception logging message on to the internal logger. This should only be called
when in the "except" clause of an exception handler.
"""
self.__logger.exception(message, *args, **kwargs)
| StarcoderdataPython |
1691171 | <gh_stars>1-10
"""
@package mi.dataset.parser
@file /mi/dataset/parser/vel3d_cd_dcl.py
@author <NAME>
@brief Parser for the vel3d instrument series c,d through dcl dataset driver
"""
__author__ = '<NAME>'
__license__ = 'Apache 2.0'
import struct
import re
import os
import binascii
import base64
import ntplib
import pandas as pd
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.core.exceptions import UnexpectedDataException, SampleException, DataSourceLocationException
from mi.dataset.dataset_parser import SimpleParser
from mi.core.instrument.dataset_data_particle import DataParticle
from mi.dataset.parser.common_regexes import DATE_YYYY_MM_DD_REGEX, TIME_HR_MIN_SEC_MSEC_REGEX
import vel3d_velpt_common
DATE_TIME_REGEX = DATE_YYYY_MM_DD_REGEX + r' ' + TIME_HR_MIN_SEC_MSEC_REGEX + r' '
DATE_TIME_MATCHER = re.compile(DATE_TIME_REGEX)
VELOCITY_ID = b'\x10'
SYSTEM_ID = b'\x11'
HEADER_DATA_ID = b'\x12'
# some records do not contain size, store their sizes here
RECORD_SIZE_DICT = {
VELOCITY_ID: 24,
b'\x36': 24,
b'\x51': 22
}
# map bit index to hex bit mask
BIT_MASK_DICT = {
0: 0x0001,
1: 0x0002,
2: 0x0004,
3: 0x0008,
4: 0x0010,
5: 0x0020,
6: 0x0040,
7: 0x0080,
8: 0x0100,
9: 0x0200
}
class Vel3dCdDclDataParticleType(BaseEnum):
USER_CONFIG = 'vel3d_cd_dcl_user_configuration'
USER_CONFIG_RECOV = 'vel3d_cd_dcl_user_configuration_recovered'
HARDWARE_CONFIG = 'vel3d_cd_dcl_hardware_configuration'
HARDWARE_CONFIG_RECOV = 'vel3d_cd_dcl_hardware_configuration_recovered'
HEAD_CONFIG = 'vel3d_cd_dcl_head_configuration'
HEAD_CONFIG_RECOV = 'vel3d_cd_dcl_head_configuration_recovered'
DATA_HEADER = 'vel3d_cd_dcl_data_header'
DATA_HEADER_RECOV = 'vel3d_cd_dcl_data_header_recovered'
VELOCITY = 'vel3d_cd_dcl_velocity_data'
VELOCITY_RECOV = 'vel3d_cd_dcl_velocity_data_recovered'
SYSTEM = 'vel3d_cd_dcl_system_data'
SYSTEM_RECOV = 'vel3d_cd_dcl_system_data_recovered'
class Vel3dCdDclUserConfigCommonParticle(DataParticle):
# dictionary for unpacking ints that directly map to a parameter
UNPACK_DICT = {
'transmit_pulse_length': 0,
'blanking_distance': 1,
'receive_length': 2,
'time_between_pings': 3,
'time_between_bursts': 4,
'number_pings': 5,
'average_interval': 6,
'number_beams': 7,
'compass_update_rate': 16,
'coordinate_system': 17,
'number_cells': 18,
'cell_size': 19,
'measurement_interval': 20,
'wrap_mode': 22,
'diagnostics_interval': 29,
'sound_speed_adjust_factor': 31,
'number_diagnostics_samples': 32,
'number_beams_per_cell': 33,
'number_pings_diagnostic': 34,
'analog_input_address': 36,
'software_version': 37,
'percent_wave_cell_position': 42,
'wave_transmit_pulse': 43,
'fixed_wave_blanking_distance': 44,
'wave_measurement_cell_size': 45,
'number_diagnostics_per_wave': 46,
'number_samples_per_burst': 49,
'analog_scale_factor': 51,
'correlation_threshold': 52,
'transmit_pulse_length_2nd': 54
}
# map for unpacking bits, contains name, index of unpacked byte, bit index within that byte
UNPACK_BIT_MAP = (
('profile_type', 8, 1),
('mode_type', 8, 2),
('power_level_tcm1', 8, 5),
('power_level_tcm2', 8, 6),
('sync_out_position', 8, 7),
('sample_on_sync', 8, 8),
('start_on_sync', 8, 9),
('power_level_pcr1', 9, 5),
('power_level_pcr2', 9, 6),
('use_specified_sound_speed', 30, 0),
('diagnostics_mode_enable', 30, 1),
('analog_output_enable', 30, 2),
('output_format_nortek', 30, 3),
('scaling', 30, 4),
('serial_output_enable', 30, 5),
('stage_enable', 30, 7),
('analog_power_output', 30, 8),
('use_dsp_filter', 35, 0),
('filter_data_output', 35, 1),
('wave_data_rate', 41, 0),
('wave_cell_position', 41, 1),
('dynamic_position_type', 41, 2),
)
def _build_parsed_values(self):
"""
Return an array of dictionaries containing parameters for the user config data particle
:return: array of dictionary of parameters
"""
parameters = []
# unpack binary raw data string into an array of individual values
unpacked_data = struct.unpack_from('<10H 6B 5H 6s H 6B I 9H 180s 180s 14H 30B 16s', self.raw_data, 4)
deployment_start = unpacked_data[23:29]
parameters.append(self._encode_value('deployment_start_time', deployment_start, list))
# string encoding based on nortek instrument driver
# these strings may have extra nulls at the end, remove them
parameters.append(self._encode_value('deployment_name', unpacked_data[21].split('\x00', 1)[0], str))
parameters.append(self._encode_value('file_comments', unpacked_data[40].split('\x00', 1)[0], str))
# encode as base 64
parameters.append(self._encode_value('velocity_adjustment_factor', base64.b64encode(unpacked_data[39]), str))
parameters.append(self._encode_value('filter_constants', base64.b64encode(unpacked_data[85]), str))
# unpack dict contains all ints
for name, index in self.UNPACK_DICT.iteritems():
parameters.append(self._encode_value(name, unpacked_data[index], int))
for name, index, bit_index in self.UNPACK_BIT_MAP:
parameters.append(self._encode_value(name, (unpacked_data[index] & BIT_MASK_DICT[bit_index]) >> bit_index,
int))
return parameters
class Vel3dCdDclUserConfigTelemeteredParticle(Vel3dCdDclUserConfigCommonParticle):
_data_particle_type = Vel3dCdDclDataParticleType.USER_CONFIG
class Vel3dCdDclUserConfigRecoveredParticle(Vel3dCdDclUserConfigCommonParticle):
_data_particle_type = Vel3dCdDclDataParticleType.USER_CONFIG_RECOV
class Vel3dCdDclHardwareConfigCommonParticle(DataParticle):
# map for unpacking ints and strings that directly map to a parameter
UNPACK_MAP = [
('board_frequency', 2, int),
('pic_version', 3, int),
('hardware_revision', 4, int),
('recorder_size', 5, int),
('firmware_version', 19, str)
]
# map for unpacking bits, contains name, index of unpacked byte, bit index within that byte
UNPACK_BIT_MAP = [
('recorder_installed', 1, 0),
('compass_installed', 1, 1),
('velocity_range', 6, 0)
]
def _build_parsed_values(self):
"""
Return an array of dictionaries containing parameters for the hardware configuration particle
:return: array of dictionary of parameters
"""
parameters = []
# unpack binary raw data string into an array of individual values, starting at byte 4
unpacked_data = struct.unpack_from('<14s 6H 12B 4s', self.raw_data, 4)
# this string may have extra nulls at the end, remove them
parameters.append(self._encode_value('instrmt_type_serial_number', unpacked_data[0].split('\x00', 1)[0], str))
for name, index, data_type in self.UNPACK_MAP:
parameters.append(self._encode_value(name, unpacked_data[index], data_type))
# unpack bit fields
for name, index, bit_index in self.UNPACK_BIT_MAP:
parameters.append(self._encode_value(name,
(unpacked_data[index] & BIT_MASK_DICT.get(bit_index)) >> bit_index,
int))
return parameters
class Vel3dCdDclHardwareConfigTelemeteredParticle(Vel3dCdDclHardwareConfigCommonParticle):
_data_particle_type = Vel3dCdDclDataParticleType.HARDWARE_CONFIG
class Vel3dCdDclHardwareConfigRecoveredParticle(Vel3dCdDclHardwareConfigCommonParticle):
_data_particle_type = Vel3dCdDclDataParticleType.HARDWARE_CONFIG_RECOV
class Vel3dCdDclHeadConfigCommonParticle(DataParticle):
# dictionary for unpacking ints that directly map to a parameter
UNPACK_DICT = {
'head_frequency': 1,
'number_beams': 27
}
# map for unpacking bits, contains name, index of unpacked byte, bit index within that byte
UNPACK_BIT_MAP = [
('pressure_sensor', 0, 0),
('magnetometer_sensor', 0, 1),
('tilt_sensor', 0, 2),
('tilt_sensor_mounting', 0, 3),
]
def _build_parsed_values(self):
"""
Return an array of dictionaries containing parameters for the head configuration particle
:return: array of dictionary of parameters
"""
parameters = []
# unpack binary raw data string into an array of individual values, starting at byte 4
unpacked_data = struct.unpack_from('<2H 2s 12s 176s 22B 2H', self.raw_data, 4)
# string encoding based on nortek instrument driver
# these strings may have extra nulls at the end, remove them
parameters.append(self._encode_value('head_type', unpacked_data[2].split('\x00', 1)[0], str))
parameters.append(self._encode_value('head_serial_number', unpacked_data[3].split('\x00', 1)[0], str))
parameters.append(self._encode_value('system_data', base64.b64encode(unpacked_data[4]), str))
# pull out bits from head config, the first unpacked byte
for name, index, bit_index in self.UNPACK_BIT_MAP:
parameters.append(self._encode_value(name,
(unpacked_data[index] & BIT_MASK_DICT.get(bit_index)) >> bit_index,
int))
for name, index in self.UNPACK_DICT.iteritems():
parameters.append(self._encode_value(name, unpacked_data[index], int))
return parameters
class Vel3dCdDclHeadConfigTelemeteredParticle(Vel3dCdDclHeadConfigCommonParticle):
_data_particle_type = Vel3dCdDclDataParticleType.HEAD_CONFIG
class Vel3dCdDclHeadConfigRecoveredParticle(Vel3dCdDclHeadConfigCommonParticle):
_data_particle_type = Vel3dCdDclDataParticleType.HEAD_CONFIG_RECOV
class Vel3dCdDclDataHeaderCommonParticle(DataParticle):
# store index into unpacked raw data by parameter name, starting from byte 9, all are ints
UNPACK_DICT = {
'number_velocity_records': 0,
'noise_amp_beam1': 1,
'noise_amp_beam2': 2,
'noise_amp_beam3': 3,
# index 4 is spare
'noise_correlation_beam1': 5,
'noise_correlation_beam2': 6,
'noise_correlation_beam3': 7,
}
def _build_parsed_values(self):
"""
Return an array of dictionaries containing parameters for the data header particle
:return: array of dictionary of parameters
"""
# unpack the raw data starting at byte 9
unpacked_data = struct.unpack_from('<H 7B', self.raw_data, 10)
# get the date time string
date_time_string = vel3d_velpt_common.get_date_time_string(self.raw_data)
parameters = [self._encode_value('date_time_string', date_time_string, str)]
for name, index in self.UNPACK_DICT.iteritems():
parameters.append(self._encode_value(name, unpacked_data[index], int))
return parameters
class Vel3dCdDclDataHeaderTelemeteredParticle(Vel3dCdDclDataHeaderCommonParticle):
_data_particle_type = Vel3dCdDclDataParticleType.DATA_HEADER
class Vel3dCdDclDataHeaderRecoveredParticle(Vel3dCdDclDataHeaderCommonParticle):
_data_particle_type = Vel3dCdDclDataParticleType.DATA_HEADER_RECOV
class Vel3dCdDclVelocityCommonParticle(DataParticle):
# store index into unpacked raw data by parameter name, all are ints
UNPACK_DICT = {
'ensemble_counter': 3,
'analog_input_1': 7,
'turbulent_velocity_east': 8,
'turbulent_velocity_north': 9,
'turbulent_velocity_vertical': 10,
'amplitude_beam_1': 11,
'amplitude_beam_2': 12,
'amplitude_beam_3': 13,
'correlation_beam_1': 14,
'correlation_beam_2': 15,
'correlation_beam_3': 16,
}
def _build_parsed_values(self):
"""
Return an array of dictionaries containing parameters for the velocity particle
:return: array of dictionary of parameters
"""
parameters = []
# unpack binary raw data string into an array of individual values
unpacked_data = struct.unpack('<6B 2H 3h 6B H', self.raw_data)
# unpack the data into parameters and values using the dictionary
for name, index in self.UNPACK_DICT.iteritems():
parameters.append(self._encode_value(name, unpacked_data[index], int))
# some parameters need extra calculations
analog_2_lsb = unpacked_data[2]
analog_2_msb = unpacked_data[5]
# combine least and most significant byte
analog_2 = (analog_2_msb << 8) + analog_2_lsb
parameters.append(self._encode_value('analog_input_2', analog_2, int))
pressure_msb = unpacked_data[4]
pressure_lsw = unpacked_data[6]
# combine least significant word and byte,
pressure = (pressure_msb << 16) + pressure_lsw
parameters.append(self._encode_value('seawater_pressure_mbar', pressure, int))
return parameters
class Vel3dCdDclVelocityTelemeteredParticle(Vel3dCdDclVelocityCommonParticle):
_data_particle_type = Vel3dCdDclDataParticleType.VELOCITY
class Vel3dCdDclVelocityRecoveredParticle(Vel3dCdDclVelocityCommonParticle):
_data_particle_type = Vel3dCdDclDataParticleType.VELOCITY_RECOV
class Vel3dCdDclSystemCommonParticle(DataParticle):
def _build_parsed_values(self):
"""
Return an array of dictionaries containing parameters for the system particle
:return: array of dictionary of parameters
"""
unpacked_data = struct.unpack_from('<2H 4h 2B H', self.raw_data, 10)
# get the date time string from the raw data
date_time_string = vel3d_velpt_common.get_date_time_string(self.raw_data)
parameters = [self._encode_value('date_time_string', date_time_string, str),
self._encode_value('battery_voltage_dV', unpacked_data[0], int),
self._encode_value('sound_speed_dms', unpacked_data[1], int),
self._encode_value('heading_decidegree', unpacked_data[2], int),
self._encode_value('pitch_decidegree', unpacked_data[3] , int),
self._encode_value('roll_decidegree', unpacked_data[4], int),
self._encode_value('temperature_centidegree', unpacked_data[5], int),
self._encode_value('error_code', unpacked_data[6], int),
self._encode_value('status_code', unpacked_data[7], int),
self._encode_value('analog_input', unpacked_data[8], int)]
return parameters
class Vel3dCdDclSystemTelemeteredParticle(Vel3dCdDclSystemCommonParticle):
_data_particle_type = Vel3dCdDclDataParticleType.SYSTEM
class Vel3dCdDclSystemRecoveredParticle(Vel3dCdDclSystemCommonParticle):
_data_particle_type = Vel3dCdDclDataParticleType.SYSTEM_RECOV
class Vel3dCdDclParser(SimpleParser):
"""
Class used to parse the vel3d_cd_dcl data set.
"""
def __init__(self,
file_handle,
exception_callback,
source_file_path,
is_telemetered):
self._file_handle = file_handle
self.source_file_path = source_file_path
self.stored_velocity_records = []
self.stored_n_velocity_records = 0
self.previous_system_timestamp = None
self.first_timestamp = None
self.stored_hardware_config = None
self.stored_head_config = None
self.instrument_timestamp_adjustment = None
if is_telemetered:
# use telemetered classes
self.user_config_class = Vel3dCdDclUserConfigTelemeteredParticle
self.hardware_config_class = Vel3dCdDclHardwareConfigTelemeteredParticle
self.head_config_class = Vel3dCdDclHeadConfigTelemeteredParticle
self.data_header_class = Vel3dCdDclDataHeaderTelemeteredParticle
self.velocity_class = Vel3dCdDclVelocityTelemeteredParticle
self.system_class = Vel3dCdDclSystemTelemeteredParticle
else:
# use recovered classes
self.user_config_class = Vel3dCdDclUserConfigRecoveredParticle
self.hardware_config_class = Vel3dCdDclHardwareConfigRecoveredParticle
self.head_config_class = Vel3dCdDclHeadConfigRecoveredParticle
self.data_header_class = Vel3dCdDclDataHeaderRecoveredParticle
self.velocity_class = Vel3dCdDclVelocityRecoveredParticle
self.system_class = Vel3dCdDclSystemRecoveredParticle
# no config for this parser, pass in empty dict
super(Vel3dCdDclParser, self).__init__({}, file_handle, exception_callback)
def parse_file(self):
"""
Main parsing function which loops through the file and interprets it by building particles
"""
end_of_file = False
# loop until the entire file is read
while not end_of_file:
# read up to the start of a record by finding the sync marker
end_of_file = self.find_record_start()
if end_of_file:
# make sure we break out of this loop if there are no more bytes in the file
continue
# now that the sync marker has been found, get the record type which follows
record_type = self._file_handle.read(1)
if record_type in RECORD_SIZE_DICT.keys():
# this record type does not contain the record size, get it from the dictionary
record_size_bytes = RECORD_SIZE_DICT.get(record_type)
full_record = vel3d_velpt_common.SYNC_MARKER + record_type
else:
# this record type does contain the record size, read it from the file
record_size_words = self._file_handle.read(2)
# unpack and convert from words to bytes
record_size_bytes = struct.unpack('<H', record_size_words)[0] * 2
full_record = vel3d_velpt_common.SYNC_MARKER + record_type + record_size_words
# based on the obtained record size, read the rest of the record
remain_bytes = record_size_bytes - len(full_record)
remain_record = self._file_handle.read(remain_bytes)
# store the full record
full_record += remain_record
if len(remain_record) < remain_bytes:
# if we did not read as many bytes as were requested, we ran into the end of the file
msg = 'Incomplete record 0x%s' % binascii.hexlify(full_record)
log.warning(msg)
self._exception_callback(SampleException(msg))
end_of_file = True
continue
# compare checksums
if not vel3d_velpt_common.match_checksum(full_record):
# checksums did not match, do not process this record further
msg = 'Checksums do not match for record type 0x%s' % binascii.hexlify(record_type)
log.warn(msg)
self._exception_callback(SampleException(msg))
continue
# process record based on the type
self.process_records(record_type, full_record)
if self.stored_velocity_records:
# If stored velocity records are present here, we only got a partial set at the end of the file
# without a terminating system record. Use the previous number of samples.
if self.stored_n_velocity_records != 0:
time_offset = 1.0/float(self.stored_n_velocity_records)
self.extract_velocities(time_offset)
else:
msg = 'Unable to calculating timestamp for last set of velocity records'
log.warn(msg)
self._exception_callback(SampleException(msg))
def find_record_start(self):
"""
Find the start of the next record by looking for the sync marker
:return: True if the end of the file was found, False if it was not
"""
end_of_file = False
read_buffer = ''
# read one byte at a time until the sync marker is found
one_byte = self._file_handle.read(1)
while one_byte != vel3d_velpt_common.SYNC_MARKER:
# store anything we find before the sync marker in the read buffer
read_buffer += one_byte
one_byte = self._file_handle.read(1)
if one_byte == '':
# no more bytes to read, break out of this loop
end_of_file = True
break
if len(read_buffer) > 1 and not DATE_TIME_MATCHER.match(read_buffer):
# we expect a version of the file to have ascii date time strings prior to each record, if this
# is something other than that call the exception
msg = 'Found unexpected data 0x%s' % binascii.hexlify(read_buffer)
log.warning(msg)
self._exception_callback(UnexpectedDataException(msg))
return end_of_file
def process_records(self, record_type, full_record):
"""
based on the record type process the data, if the record type is not mentioned here it is ignored
:param record_type: the record type associated with this record
:param full_record: the full data string associated with this record
"""
if record_type == vel3d_velpt_common.USER_CONFIGURATION_ID:
self.process_user_config(full_record)
elif record_type == vel3d_velpt_common.HARDWARE_CONFIGURATION_ID:
self.process_hardware_config(full_record)
elif record_type == vel3d_velpt_common.HEAD_CONFIGURATION_ID:
self.process_head_config(full_record)
elif record_type == VELOCITY_ID:
# append velocity record to buffer, these are collected until the timestamp can be calculated
self.stored_velocity_records.append(full_record)
elif record_type == SYSTEM_ID:
self.process_system(full_record)
elif record_type == HEADER_DATA_ID:
self.process_header_data(full_record)
def process_user_config(self, full_record):
"""
Extract the user config particle, and set the first timestamp if it has not been set yet
:param full_record: The raw data string of the user config particle
"""
# get the timestamp for this particle
timestamp = vel3d_velpt_common.get_timestamp(full_record, start_byte=48)
timestamp = self.adjust_timestamp(timestamp)
# if the first timestamp has not been set, set it here
if self.first_timestamp is None:
self.first_timestamp = timestamp
# check if head or hardware messages have been received and not sent yet
self.extract_h_config()
self.simple_extract(self.user_config_class, full_record, timestamp)
def process_hardware_config(self, full_record):
"""
If the first timestamp has been set, use this as the timestamp of this particle and extract it,
otherwise store it until the first timestamp has been set
:param full_record: The raw data string to pass into the hardware configuration particle
"""
# first_timestamp is used as the timestamp of this particle, if it is not set yet wait until it is
if self.first_timestamp:
self.simple_extract(self.hardware_config_class, full_record, self.first_timestamp)
else:
self.stored_hardware_config = full_record
def process_head_config(self, full_record):
"""
If the first timestamp has been set, use this as the timestamp of this particle and extract it,
otherwise store it until the first timestamp has been set
:param full_record: The raw data string to pass into the head configuration particle
"""
# first_timestamp is used as the timestamp of this particle, if it is not set yet wait until it is
if self.first_timestamp:
self.simple_extract(self.head_config_class, full_record, self.first_timestamp)
else:
self.stored_head_config = full_record
def process_system(self, full_record):
"""
Extract a system record, and if there is a pair of system records with velocities in between determine
the time offset between velocity timestamps and extract the velocity records. Also if the first timestamp
has not been set yet, set it
:param full_record: The raw data string to pass into the system particle
"""
if self.previous_system_timestamp is not None and self.stored_velocity_records != []:
# there has been a pair of system records and with velocity records in between
n_vel_records = len(self.stored_velocity_records)
time_offset = 1.0/float(n_vel_records)
# calculate the timestamps and extract velocity records
self.extract_velocities(time_offset)
self.stored_n_velocity_records = n_vel_records
# get the timestamp associated with this system record
timestamp = vel3d_velpt_common.get_timestamp(full_record)
timestamp = self.adjust_timestamp(timestamp)
# extract the system record
self.simple_extract(self.system_class, full_record, timestamp)
self.previous_system_timestamp = float(timestamp)
if self.first_timestamp is None:
self.first_timestamp = timestamp
# check if head or hardware messages have been received and not sent yet
self.extract_h_config()
def extract_velocities(self, time_offset):
"""
loop calculating timestamp and extracting stored velocity records
:param time_offset: The time offset (in seconds) between velocity records to use in calculating the timestamp
"""
for i in range(0, len(self.stored_velocity_records)):
timestamp = self.previous_system_timestamp + (i * time_offset)
self.simple_extract(self.velocity_class, self.stored_velocity_records[i], timestamp)
# now that they have been extracted, clear the velocity record buffer
self.stored_velocity_records = []
def extract_h_config(self):
"""
If hardware config or head config messages have been received and not extracted yet, extract them here
"""
if self.stored_hardware_config:
self.simple_extract(self.hardware_config_class, self.stored_hardware_config, self.first_timestamp)
self.stored_hardware_config = None
if self.stored_head_config:
self.simple_extract(self.head_config_class, self.stored_head_config, self.first_timestamp)
self.stored_head_config = None
def process_header_data(self, full_record):
"""
Extract the header data particle, and set the first timestamp if it has not been set
:param full_record: The raw data string to pass into the header data particle
"""
# get the timestamp for this particle
timestamp = vel3d_velpt_common.get_timestamp(full_record)
timestamp = self.adjust_timestamp(timestamp)
# check if the first timestamp has been set, if not set it
if self.first_timestamp is None:
self.first_timestamp = timestamp
# check if head or hardware messages have been received and not sent yet
self.extract_h_config()
# extract the data header particle
self.simple_extract(self.data_header_class, full_record, timestamp)
def simple_extract(self, class_type, data, timestamp):
"""
Extract the particle and appending it to the record buffer
:param class_type: The class of the particle to extract
:param data: The raw data to pass into the particle
:param timestamp: The timestamp to pass into the particle
"""
particle = self._extract_sample(class_type, None, data, internal_timestamp=timestamp)
self._record_buffer.append(particle)
def adjust_timestamp(self, timestamp):
"""
The instrument runs every half hour. It is powered down between runs so its internal clock is reset
to 19700101 before every run. All timestamps in the file are therefor incorrect. To correct the sample
times, extract the DCL logging start time from filename that it generates and round to nearest 30 minutes
to approximate when the instrument started up. Subtract the first timestamp reported in the file from
this value to get the adjustment offset to be used to correct all the other timestamps in the file.
:param timestamp: The raw (incorrect) NTP timestamp from a record in file
:return: The corrected NTP timestamp
"""
if self.instrument_timestamp_adjustment is None:
# This is the first timestamp in the file which was generated shortly after the instrument
# started up. Compare this timestamp to date extracted from the file name generated by the
# DCL to determine how much the timestamps in the file need to be adjusted by.
instrument_startup_timestamp = self.get_instrument_startup_time()
self.instrument_timestamp_adjustment = instrument_startup_timestamp - timestamp
return instrument_startup_timestamp
# Is not the first record so apply the adjustment
return timestamp + self.instrument_timestamp_adjustment
def get_instrument_startup_time(self):
"""
Derive instrument startup time from the datetime extracted from file name rounded to the nearest half hour.
:return: Approximate NTP time of instrument startup
"""
source_file_dir, source_file_name = os.path.split(self.source_file_path)
m = re.match('^[0-9]{8}_[0-9]{6}', source_file_name)
if not m:
raise DataSourceLocationException("Invalid file name: %s" % self.source_file_path)
pd_datetime = pd.to_datetime(m.group(0), format='%Y%m%d_%H%M%S').round(freq='30T')
return float(ntplib.system_to_ntp_time(pd_datetime.timestamp()))
| StarcoderdataPython |
9702052 | <reponame>NinjasCL-labs/masonite-i18n<filename>lang/helpers/filesystem/openers.py
# coding: utf-8
# See https://docs.pyfilesystem.org/en/latest/openers.html
OPERATING_SYSTEM = "osfs://"
MEMORY = "mem://"
| StarcoderdataPython |
270099 | <reponame>cerevo/-listnr-server-sample-py
# -*- coding: utf-8 -*-
# reference
# http://stackoverflow.com/questions/680305/using-multipartposthandler-to-post-form-data-with-python
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
import urllib2
import json
import sys
from ConfigParser import SafeConfigParser
config = SafeConfigParser()
config.read('config.ini')
def recognize(wav_filename, language=None):
# Register the streaming http handlers with urllib2
register_openers()
datagen, headers = multipart_encode({"a": open(wav_filename)})
# Create the Request object
apikey = config.get("docomo_developer", "apikey")
apiurl = "https://api.apigw.smt.docomo.ne.jp/amiVoice/v1/recognize?APIKEY={0}".format(apikey)
request = urllib2.Request(apiurl, datagen, headers)
# Actually do the request, and get the response
result = urllib2.urlopen(request).read()
# print(result.decode('unicode_escape'))
result_json = json.loads(result)
print(result_json["text"])
return result_json["text"]
if __name__ == '__main__':
argvs = sys.argv
argc = len(argvs)
if (argc != 2):
print 'Usage: # python %s wav_filename' % argvs[0]
quit()
wav_filename = argvs[1]
recognize(wav_filename)
| StarcoderdataPython |
20684 | from sklearn.cluster import KMeans
import cv2
import PIL
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from matplotlib import image as img1
import pandas as pd
from scipy.cluster.vq import whiten
import os
class DominantColors:
CLUSTERS = None
IMAGEPATH = None
IMAGE = None
COLORS = None
LABELS = None
BASEWIDTH = 256
def __init__(self, image, clusters=3):
self.CLUSTERS = clusters
self.IMAGEPATH = image
def dominantColors(self):
# read image
img = cv2.imread(self.IMAGEPATH)
# resize image
imgh, imgw, _ = img.shape
wpercent = (self.BASEWIDTH / float(imgw))
hsize = int((float(imgh) * float(wpercent)))
img = cv2.resize(img, (self.BASEWIDTH, hsize), PIL.Image.ANTIALIAS)
# convert to rgb from bgr
img = cv2.cvtColor(img, cv2.COLOR_RGB2Luv)
# reshaping to a list of pixels
img = img.reshape((img.shape[0] * img.shape[1], 3))
# save image after operations
self.IMAGE = img
# using k-means to cluster pixels
kmeans = KMeans(n_clusters=self.CLUSTERS)
kmeans.fit(img)
# the cluster centers are our dominant colors.
self.COLORS = kmeans.cluster_centers_
# save labels
self.LABELS = kmeans.labels_
# returning after converting to integer from float
return self.COLORS.astype(int)
def rgb_to_hex(self, rgb):
return '#%02x%02x%02x' % (int(rgb[0]), int(rgb[1]), int(rgb[2]))
def analyseRGB(self):
r = []
g = []
b = []
image = img1.imread(self.IMAGEPATH)
for line in image:
for pixel in line:
# print(pixel)
temp_r, temp_g, temp_b = pixel
r.append(temp_r)
g.append(temp_g)
b.append(temp_b)
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(r, g, b)
plt.show()
df = pd.DataFrame({'red': r, 'blue': b, 'green': g})
df['scaled_red'] = whiten(df['red'])
df['scaled_blue'] = whiten(df['blue'])
df['scaled_green'] = whiten(df['green'])
df.sample(n=10)
from scipy.cluster.vq import kmeans
cluster_centers, distortion = kmeans(df[['scaled_red', 'scaled_green', 'scaled_blue']], 2)
print(cluster_centers)
colors = []
r_std, g_std, b_std = df[['red', 'green', 'blue']].std()
for cluster_center in cluster_centers:
scaled_r, scaled_g, scaled_b = cluster_center
colors.append((scaled_r * r_std / 255, scaled_g * g_std / 255, scaled_b * b_std / 255))
plt.imshow([colors])
plt.show()
def plotClusters(self):
# plotting
fig = plt.figure()
ax = Axes3D(fig)
for label, pix in zip(self.LABELS, self.IMAGE):
ax.scatter(pix[0], pix[1], pix[2], color=self.rgb_to_hex(self.COLORS[label]))
plt.show()
def plotHistogram(self):
# labels form 0 to no. of clusters
numLabels = np.arange(0, self.CLUSTERS + 1)
# create frequency count tables
(hist, _) = np.histogram(self.LABELS, bins=numLabels)
hist = hist.astype("float")
hist /= hist.sum()
# appending frequencies to cluster centers
colors = self.COLORS
# descending order sorting as per frequency count
colors = colors[(-hist).argsort()]
hist = hist[(-hist).argsort()]
# creating empty chart
chart = np.zeros((50, 500, 3), np.uint8)
start = 0
# creating color rectangles
for i in range(self.CLUSTERS):
end = start + hist[i] * 500
# getting rgb values
r = colors[i][0]
g = colors[i][1]
b = colors[i][2]
# using cv2.rectangle to plot colors
cv2.rectangle(chart, (int(start), 0), (int(end), 50), (r, g, b), -1)
start = end
# display chart
plt.figure()
plt.axis("off")
plt.imshow(chart)
plt.show()
def _main_():
clusters = 8
for img in sorted(os.listdir('output\\predicted\\')):
print(img)
dc = DominantColors('..\\..\\data\\output\\predicted\\{0}'.format(img), clusters)
colors = dc.dominantColors()
dc.analyseRGB()
if __name__ == '__main__':
_main_()
| StarcoderdataPython |
11384119 | """
Generated by CHARMM-GUI (http://www.charmm-gui.org)
omm_readparams.py
This module is for reading coordinates and parameters in OpenMM.
Correspondance: <EMAIL> or <EMAIL>
Last update: March 29, 2017
"""
import os
from simtk.unit import *
from simtk.openmm import *
from simtk.openmm.app import *
def read_psf(filename):
psf = CharmmPsfFile(filename)
return psf
def read_crd(filename):
crd = CharmmCrdFile(filename)
return crd
def read_charmm_rst(filename):
charmm_rst = CharmmRstFile(filename)
for i, line in enumerate(charmm_rst.header):
line = line.strip()
words = line.split()
if len(line) != 0:
if words[0] == 'CRYSTAL' or words[0] == '!CRYSTAL':
line1 = charmm_rst.header[i+1]
line2 = charmm_rst.header[i+2]
boxlx = Vec3(float(line1.split()[0].replace("D", "E")), 0.0, 0.0)
boxly = Vec3(0.0, float(line1.split()[2].replace("D", "E")), 0.0)
boxlz = Vec3(0.0, 0.0, float(line2.split()[2].replace("D", "E")))
box = (boxlx, boxly, boxlz)
break
positions = charmm_rst.positionsold
new_positions = []
for position in positions:
oldx = position[0]/angstrom
oldy = position[1]/angstrom
oldz = position[2]/angstrom
newx = oldx + boxlx[0]/2.0
newy = oldy + boxly[1]/2.0
newz = oldz + boxlz[2]/2.0
new_positions.append(Vec3(newx, newy, newz))
charmm_rst.box = Quantity(box, angstroms)
charmm_rst.positions = Quantity(new_positions, angstroms)
return charmm_rst
def read_params(filename):
extlist = ['rtf', 'prm', 'str']
parFiles = ()
for line in open(filename, 'r'):
if '!' in line: line = line.split('!')[0]
parfile = line.strip()
if len(parfile) != 0:
ext = parfile.lower().split('.')[-1]
if not ext in extlist: continue
parFiles += ( parfile, )
params = CharmmParameterSet( *parFiles )
return params
def read_box(psf, filename):
for line in open(filename, 'r'):
segments = line.split('=')
if segments[0].strip() == "BOXLX": boxlx = float(segments[1])
if segments[0].strip() == "BOXLY": boxly = float(segments[1])
if segments[0].strip() == "BOXLZ": boxlz = float(segments[1])
psf.setBox(boxlx*angstroms, boxly*angstroms, boxlz*angstroms)
return psf
def gen_box(psf, crd):
coords = crd.positions
min_crds = [coords[0][0], coords[0][1], coords[0][2]]
max_crds = [coords[0][0], coords[0][1], coords[0][2]]
for coord in coords:
min_crds[0] = min(min_crds[0], coord[0])
min_crds[1] = min(min_crds[1], coord[1])
min_crds[2] = min(min_crds[2], coord[2])
max_crds[0] = max(max_crds[0], coord[0])
max_crds[1] = max(max_crds[1], coord[1])
max_crds[2] = max(max_crds[2], coord[2])
boxlx = max_crds[0]-min_crds[0]
boxly = max_crds[1]-min_crds[1]
boxlz = max_crds[2]-min_crds[2]
psf.setBox(boxlx, boxly, boxlz)
return psf
| StarcoderdataPython |
1806886 | # Copyright (c) 2020, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Authors: <NAME>, <NAME>
import logging
import os
import shutil
from git import Repo
from git.exc import GitCommandError
from skilletlib.exceptions import SkilletLoaderException
logger = logging.getLogger(__name__)
class Git:
"""
Git remote
This class provides an interface to Github repositories containing Skillets or XML snippets.
"""
def __init__(self, repo_url, store=os.getcwd()):
"""
Initialize a new Git repo object
:param repo_url: URL path to repository.
:param store: Directory to store repository in. Defaults to the current directory.
"""
if not self.check_git_exists():
raise SkilletLoaderException('A git client must be installed to use this remote!')
self.repo_url = repo_url
self.store = store
self.Repo = None
self.name = ''
self.path = ''
self.update = ''
def clone(self, name: str) -> str:
"""
Clone a remote directory into the store.
:param name: Name of repository
:return: (string): Path to cloned repository
"""
if not name:
raise ValueError("Missing or bad name passed to Clone command.")
self.name = name
path = self.store + os.sep + name
self.path = path
if os.path.exists(path):
self.Repo = Repo(path)
# FIX for #56
if self.repo_url not in self.Repo.remotes.origin.urls:
logger.info('Found new remote URL for this named repo')
try:
# only recourse is to remove the .git directory
if os.path.exists(os.path.join(path, '.git')):
shutil.rmtree(path)
else:
raise SkilletLoaderException('Refusing to remove non-git directory')
except OSError:
raise SkilletLoaderException('Repo directory exists!')
logger.debug("Cloning into {}".format(path))
try:
self.Repo = Repo.clone_from(self.repo_url, path)
except GitCommandError as gce:
raise SkilletLoaderException(f'Could not clone repository {gce}')
else:
logger.debug("Updating repository...")
try:
self.Repo.remotes.origin.pull()
except GitCommandError as gce:
logger.error('Could not clone repository!')
raise SkilletLoaderException(f'Error Cloning repository {gce}')
return path
else:
logger.debug("Cloning into {}".format(path))
self.Repo = Repo.clone_from(self.repo_url, path)
self.path = path
return path
def branch(self, branch_name: str) -> None:
"""
Checkout the specified branch.
:param branch_name: Branch to checkout.
:return: None
"""
logger.debug("Checking out: " + branch_name)
if self.update:
logger.debug("Updating branch.")
self.Repo.remotes.origin.pull()
self.Repo.git.checkout(branch_name)
@staticmethod
def check_git_exists():
return shutil.which("git")
| StarcoderdataPython |
1738334 | <reponame>bgeisberger/gnsq
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
import logging
from collections import defaultdict, deque
import blinker
import gevent
from gevent.event import AsyncResult
from gevent.pool import Group
from gevent.queue import Queue, Empty
from . import protocol as nsq
from .backofftimer import BackoffTimer
from .decorators import cached_property
from .errors import NSQException, NSQNoConnections
from .nsqd import NsqdTCPClient
from .states import INIT, RUNNING, CLOSED
from .util import parse_nsqds
class Producer(object):
"""High level NSQ producer.
A Producer will connect to the nsqd tcp addresses and support async
publishing (``PUB`` & ``MPUB`` & ``DPUB``) of messages to `nsqd` over the
TCP protocol.
Example publishing a message::
from gnsq import Producer
producer = Producer('localhost:4150')
producer.start()
producer.publish('topic', b'hello world')
:param nsqd_tcp_addresses: a sequence of string addresses of the nsqd
instances this consumer should connect to
:param max_backoff_duration: the maximum time we will allow a backoff state
to last in seconds. If zero, backoff wil not occur
:param **kwargs: passed to :class:`~gnsq.NsqdTCPClient` initialization
"""
def __init__(self, nsqd_tcp_addresses=[], max_backoff_duration=128,
**kwargs):
if not nsqd_tcp_addresses:
raise ValueError('must specify at least one nsqd or lookupd')
self.nsqd_tcp_addresses = parse_nsqds(nsqd_tcp_addresses)
self.max_backoff_duration = max_backoff_duration
self.conn_kwargs = kwargs
self.logger = logging.getLogger(__name__)
self._state = INIT
self._connections = Queue()
self._connection_backoffs = defaultdict(self._create_backoff)
self._response_queues = {}
self._workers = Group()
@cached_property
def on_response(self):
"""Emitted when a response is received.
The signal sender is the consumer and the ` ` is sent as an
argument.
"""
return blinker.Signal(doc='Emitted when a response is received.')
@cached_property
def on_error(self):
"""Emitted when an error is received.
The signal sender is the consumer and the ``error`` is sent as an
argument.
"""
return blinker.Signal(doc='Emitted when a error is received.')
@cached_property
def on_auth(self):
"""Emitted after a connection is successfully authenticated.
The signal sender is the consumer and the ``conn`` and parsed
``response`` are sent as arguments.
"""
return blinker.Signal(doc='Emitted when a response is received.')
@cached_property
def on_close(self):
"""Emitted after :meth:`close`.
The signal sender is the consumer.
"""
return blinker.Signal(doc='Emitted after the consumer is closed.')
def start(self):
"""Start discovering and listing to connections."""
if self._state == CLOSED:
raise NSQException('producer already closed')
if self.is_running:
self.logger.warning('producer already started')
return
self.logger.debug('starting producer...')
self._state = RUNNING
for address in self.nsqd_tcp_addresses:
address, port = address.split(':')
self.connect_to_nsqd(address, int(port))
def close(self):
"""Immediately close all connections and stop workers."""
if not self.is_running:
return
self._state = CLOSED
self.logger.debug('closing connection(s)')
while True:
try:
conn = self._connections.get(block=False)
except Empty:
break
conn.close_stream()
self.on_close.send(self)
def join(self, timeout=None, raise_error=False):
"""Block until all connections have closed and workers stopped."""
self._workers.join(timeout, raise_error)
@property
def is_running(self):
"""Check if the producer is currently running."""
return self._state == RUNNING
def connect_to_nsqd(self, address, port):
if not self.is_running:
return
conn = NsqdTCPClient(address, port, **self.conn_kwargs)
self.logger.debug('[%s] connecting...', conn)
conn.on_response.connect(self.handle_response)
conn.on_error.connect(self.handle_error)
conn.on_auth.connect(self.handle_auth)
try:
conn.connect()
conn.identify()
except NSQException as error:
self.logger.warning('[%s] connection failed (%r)', conn, error)
self.handle_connection_failure(conn)
return
# Check if we've closed since we started
if not self.is_running:
self.handle_connection_failure(conn)
return
self.logger.info('[%s] connection successful', conn)
self.handle_connection_success(conn)
def _listen(self, conn):
try:
conn.listen()
except NSQException as error:
self.logger.warning('[%s] connection lost (%r)', conn, error)
self.handle_connection_failure(conn)
def handle_connection_success(self, conn):
self._response_queues[conn] = deque()
self._put_connection(conn)
self._workers.spawn(self._listen, conn)
self._connection_backoffs[conn].success()
def handle_connection_failure(self, conn):
conn.close_stream()
self._clear_responses(conn, NSQException('connection closed'))
if not self.is_running:
return
seconds = self._connection_backoffs[conn].failure().get_interval()
self.logger.debug('[%s] retrying in %ss', conn, seconds)
gevent.spawn_later(
seconds, self.connect_to_nsqd, conn.address, conn.port)
def handle_auth(self, conn, response):
metadata = []
if response.get('identity'):
metadata.append("Identity: %r" % response['identity'])
if response.get('permission_count'):
metadata.append("Permissions: %d" % response['permission_count'])
if response.get('identity_url'):
metadata.append(response['identity_url'])
self.logger.info('[%s] AUTH accepted %s', conn, ' '.join(metadata))
self.on_auth.send(self, conn=conn, response=response)
def handle_response(self, conn, response):
self.logger.debug('[%s] response: %s', conn, response)
if response == nsq.OK:
if conn in self._response_queues:
result = self._response_queues[conn].popleft()
result.set(response)
self.on_response.send(self, response=response)
def handle_error(self, conn, error):
self.logger.debug('[%s] error: %s', conn, error)
self._clear_responses(conn, error)
self.on_error.send(self, error=error)
def _create_backoff(self):
return BackoffTimer(max_interval=self.max_backoff_duration)
def _clear_responses(self, conn, error):
# All relevent errors are fatal
for result in self._response_queues.pop(conn, []):
result.set_exception(error)
def _get_connection(self, block=True, timeout=None):
if not self.is_running:
raise NSQException('producer not running')
while True:
try:
conn = self._connections.get(block=block, timeout=timeout)
except Empty:
raise NSQNoConnections
if conn.is_connected:
return conn
# Discard closed connections
def _put_connection(self, conn):
if not self.is_running:
return
self._connections.put(conn)
def publish(self, topic, data, defer=None, block=True, timeout=None,
raise_error=True):
"""Publish a message to the given topic.
:param topic: the topic to publish to
:param data: bytestring data to publish
:param defer: duration in milliseconds to defer before publishing
(requires nsq 0.3.6)
:param block: wait for a connection to become available before
publishing the message. If block is `False` and no connections
are available, :class:`~gnsq.errors.NSQNoConnections` is raised
:param timeout: if timeout is a positive number, it blocks at most
``timeout`` seconds before raising
:class:`~gnsq.errors.NSQNoConnections`
:param raise_error: if ``True``, it blocks until a response is received
from the nsqd server, and any error response is raised. Otherwise
an :class:`~gevent.event.AsyncResult` is returned
"""
result = AsyncResult()
conn = self._get_connection(block=block, timeout=timeout)
try:
self._response_queues[conn].append(result)
conn.publish(topic, data, defer=defer)
finally:
self._put_connection(conn)
if raise_error:
return result.get()
return result
def multipublish(self, topic, messages, block=True, timeout=None,
raise_error=True):
"""Publish an iterable of messages to the given topic.
:param topic: the topic to publish to
:param messages: iterable of bytestrings to publish
:param block: wait for a connection to become available before
publishing the message. If block is `False` and no connections
are available, :class:`~gnsq.errors.NSQNoConnections` is raised
:param timeout: if timeout is a positive number, it blocks at most
``timeout`` seconds before raising
:class:`~gnsq.errors.NSQNoConnections`
:param raise_error: if ``True``, it blocks until a response is received
from the nsqd server, and any error response is raised. Otherwise
an :class:`~gevent.event.AsyncResult` is returned
"""
result = AsyncResult()
conn = self._get_connection(block=block, timeout=timeout)
try:
self._response_queues[conn].append(result)
conn.multipublish(topic, messages)
finally:
self._put_connection(conn)
if raise_error:
return result.get()
return result
| StarcoderdataPython |
3598579 | <filename>10_telephone/telephone.py
#!/usr/bin/env python3
"""
Author : hongm <<EMAIL>>
Date : 2022-04-02
Purpose: Rock the Casbah
"""
import argparse
import random
import os
import string
import sys
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Telephone',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text',
metavar='str',
help='Input text')
parser.add_argument('-s',
'--seed',
help='Random seed value',
metavar='seed',
type=int,
default=None)
parser.add_argument('-m',
'--mutations',
help='Percent mutations',
metavar='mutations',
type=float,
default=0.0)
args = parser.parse_args()
if os.path.isfile(args.text):
args.text = open(args.text).read().rstrip()
if not 0 <= args.mutations < 1:
parser.error(f'--mutations "{args.mutations}" must be between 0 and 1')
return args
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
random.seed(args.seed)
text = args.text
num_mutations = round(len(text) * args.mutations)
alpha = string.ascii_letters + string.punctuation
new_text = text
#random.sample(range(len(text)), num_mutations))
for index in random.sample(range(len(text)), num_mutations):
#print(index)
#print(text[index])
new_char = random.choice(alpha)
#print(new_char)
new_text = new_text[:index] + new_char + new_text[index + 1:]
print(f'You said: "{text}"')
print(f'I heard : "{new_text}"')
#print(f"I will change{num_mutations}.")
def random_string():
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=5))
# --------------------------------------------------
if __name__ == '__main__':
main()
| StarcoderdataPython |
4849329 | <gh_stars>0
from itertools import product
a, b = list(map(int, input().split())), list(map(int, input().split()))
print(*list(product(a, b))) | StarcoderdataPython |
6494554 | import bpy
import time
from bpy.types import Operator, Panel, PropertyGroup
from bpy.props import PointerProperty, StringProperty, FloatProperty
class VideoSMaskSettings(PropertyGroup):
url: StringProperty(
name="URL",
description="Server address",
default="localhost:9999",
)
threshold: FloatProperty(
name="Treshold",
description="The min likeness with template",
default=0.50,
min=0.0,
max=1.0,
)
# VIDEO_OT_SMask
# VIDEO_OT_Color
# VIDEO_OT_Clean
# VIDEO_OT_Zoomx
# VIDEO_OT_Slowx
class VIDEO_OT_SMask(Operator):
bl_idname = "video.smask"
bl_label = "SMask"
bl_description = "Search Mask with AI"
def execute(self, context):
settings = context.scene.video_smask_settings
print(settings.url)
print("bl_idname = ", self.bl_idname)
print("bl_label = ", self.bl_label)
print("bl_description = ", self.bl_description)
return {"FINISHED"}
class VIDEO_PT_SMask(Panel):
bl_label = "SMask"
bl_category = "SMask"
# bl_space_type = "CLIP_EDITOR"
bl_space_type = "SEQUENCE_EDITOR"
bl_region_type = "UI"
@classmethod
def poll(cls, context):
try:
sc = context.space_data
print("sc.mode, view = ", sc.mode, sc.view)
return True
# return sc.mode == "MASK" and sc.view == "CLIP"
except:
pass
return True
def draw(self, context):
layout = self.layout
settings = context.scene.video_smask_settings
layout.prop(settings, "url")
layout.prop(settings, "threshold")
layout.operator("video.smask", icon="MOD_MASK")
classes = (VIDEO_OT_SMask, VIDEO_PT_SMask, VideoSMaskSettings)
def register():
from bpy.utils import register_class
for cls in classes:
register_class(cls)
bpy.types.Scene.video_smask_settings = PointerProperty(type=VideoSMaskSettings)
def unregister():
from bpy.utils import unregister_class
for cls in reversed(classes):
unregister_class(cls)
del bpy.types.Scene.video_smask_settings
if __name__ == "__main__":
register()
| StarcoderdataPython |
12820738 | <gh_stars>0
#!/usr/bin/env python3
import numpy as np
import struct
import sys
import nibabel as nib
import pdb
def readNii(fname):
"""Read a given filename and return a dict"""
img = nib.load(fname)
if type(img) != nib.nifti1.Nifti1Image\
and type(img) != nib.nifti1.Nifti2Image:
raise ValueError(fname + " doesn't look like a NIFTI image.")
retdict = {}
# Copy header information into new dict
for key in img.header:
retdict[key] = img.header[key]
# Force conversion from np.memmap to np.ndarray. This might choke weaker
# machines, but should work for most of the CVC lab machines
retdict['data'] = np.array(img.get_data())
return retdict
def writeNii(data, fname, spacings):
"""Write the given numpy array to file."""
assert np.ndim(data) == 3, "Need to have 3D data for writeNii"
# Extract spacings and use them to generate the affine transform
(xSpace,ySpace,zSpace) = spacings
affineTransform = np.diag([xSpace,ySpace,zSpace,1]) # Stretch each dimension
outImg = nib.Nifti1Image(data, affineTransform)
outImg.to_filename(fname)
return
def isNii(fname):
"""Determine if the file is a valid NIFTI file"""
# Idea from https://brainder.org/2015/04/03/the-nifti-2-file-format
# under section titled "NIFTI-1 or NIFTI-2?"
niftiType = 0 # Encode "failure" as 0, and Nifti 1/2 as 1/2
with open(fname,'rb') as infile:
# Read first 4 bytes: value must be 348 for Nifti1 or 540 for Nifti2
hdrSz = infile.read(4)
# Try a little-endian read. Need either 348 or 540
littleVal = struct.unpack("<i", hdrSz)[0]
if littleVal == 348:
niftiType = 1
elif littleVal == 540:
niftiType = 2
else:
# Try a big-endian read
bigVal = struct.unpack(">i", hdrSz)[0]
if bigVal == 348:
niftiType = 1
elif bigVal == 540:
niftiType = 2
else:
niftiType = 0
# If neither number matched, this is not a NIFTI
if niftiType == 0:
return niftiType
# Check magic location to make sure this is a NIFTI and not a lucky hit
if niftiType == 1:
infile.seek(344)
magicStr = infile.read(4)
if magicStr == b"n+1\x00" or magicStr == b"ni1\x00":
return niftiType #It matches!
else:
return 0 #This ain't no NIFTI
if niftiType == 2:
magicStr = infile.read(8)
if magicStr == b"n+2\x00\x0d\x0a\x1a\x0a":
return niftiType # Matches known NIFTI2 string
else:
return 0 # Not a NIFTI
| StarcoderdataPython |
12846938 | # Generated by Django 3.1 on 2020-09-09 19:21
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patient_app', '0005_auto_20200817_1713'),
]
operations = [
migrations.AlterField(
model_name='booking_patient',
name='date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 9, 9, 19, 21, 45, 74281)),
),
migrations.AlterField(
model_name='patient_register',
name='date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 9, 9, 19, 21, 45, 73284)),
),
migrations.AlterField(
model_name='patient_register',
name='image',
field=models.ImageField(default='log.png', upload_to='patient_app/images'),
),
]
| StarcoderdataPython |
3250243 | import rlp
from rlp.sedes import big_endian_int, binary
from ethereum import utils
from plasma.utils.utils import get_sender, sign
from enum import IntEnum
from web3 import Web3
class Transaction(rlp.Serializable):
TxnType = IntEnum('TxnType', 'transfer make_order take_order')
UTXOType = IntEnum('UTXOType', 'transfer make_order')
SigType = IntEnum('SigType', 'txn utxo')
fields = [
('txntype', big_endian_int),
('sigtype', big_endian_int),
('blknum1', big_endian_int),
('txindex1', big_endian_int),
('oindex1', big_endian_int),
('blknum2', big_endian_int),
('txindex2', big_endian_int),
('oindex2', big_endian_int),
('utxotype1', big_endian_int),
('newowner1', utils.address),
('amount1', big_endian_int),
('tokenprice1', big_endian_int),
('cur1', utils.address),
('utxotype2', big_endian_int),
('newowner2', utils.address),
('amount2', big_endian_int),
('tokenprice2', big_endian_int),
('cur2', utils.address),
('utxotype3', big_endian_int),
('newowner3', utils.address),
('amount3', big_endian_int),
('tokenprice3', big_endian_int),
('cur3', utils.address),
('utxotype4', big_endian_int),
('newowner4', utils.address),
('amount4', big_endian_int),
('tokenprice4', big_endian_int),
('cur4', utils.address),
('sig1', binary),
('sig2', binary),
('txnsig', binary)
]
def __init__(self,
txntype,
blknum1, txindex1, oindex1,
blknum2, txindex2, oindex2,
utxotype1, newowner1, amount1, tokenprice1, cur1,
utxotype2, newowner2, amount2, tokenprice2, cur2,
utxotype3, newowner3, amount3, tokenprice3, cur3,
utxotype4, newowner4, amount4, tokenprice4, cur4,
sigtype=SigType.utxo,
sig1=b'\x00' * 65,
sig2=b'\x00' * 65,
txnsig=b'\x00' * 65):
# Transaction Type
self.txntype = txntype
# Signature Type
self.sigtype = sigtype
self.txnsig = txnsig
# Input 1
self.blknum1 = blknum1
self.txindex1 = txindex1
self.oindex1 = oindex1
self.sig1 = sig1
# Input 2
self.blknum2 = blknum2
self.txindex2 = txindex2
self.oindex2 = oindex2
self.sig2 = sig2
# Outputs
self.utxotype1 = utxotype1
self.newowner1 = utils.normalize_address(newowner1)
self.amount1 = amount1
self.tokenprice1 = tokenprice1 # This field is only used if utxotype1 == make_order
self.cur1 = utils.normalize_address(cur1)
self.utxotype2 = utxotype2
self.newowner2 = utils.normalize_address(newowner2)
self.amount2 = amount2
self.tokenprice2 = tokenprice2 # This field is only used if utxotype2 == make_order
self.cur2 = utils.normalize_address(cur2)
self.utxotype3 = utxotype3
self.newowner3 = utils.normalize_address(newowner3)
self.amount3 = amount3
self.tokenprice3 = tokenprice3 # This field is only used if utxotype3 == make_order
self.cur3 = utils.normalize_address(cur3)
self.utxotype4 = utxotype4
self.newowner4 = utils.normalize_address(newowner4)
self.amount4 = amount4
self.tokenprice4 = tokenprice4 # This field is only used if utxotype3 == make_order
self.cur4 = utils.normalize_address(cur4)
self.confirmation1 = None
self.confirmation2 = None
self.spent1 = False
self.spent2 = False
self.spent3 = False
self.spent4 = False
@property
def hash(self):
return utils.sha3(rlp.encode(self, UnsignedTransaction))
self.utxotype1 = utxotype1
self.newowner1 = utils.normalize_address(newowner1)
self.amount1 = amount1
self.tokenprice1 = tokenprice1 # This field is only used if utxotype1 == make_order
self.cur1 = utils.normalize_address(cur1)
@property
def readable_str(self):
output_str = ""
output_str += "input_utxos:\n"
output_str += "\tutxo1 - blknum: %d\ttxindex: %d\toindex: %d\n" % (self.blknum1, self.txindex1, self.oindex1) if self.blknum1 else ""
output_str += "\tutxo2 - blknum: %d\ttxindex: %d\toindex: %d\n" % (self.blknum2, self.txindex2, self.oindex2) if self.blknum2 else ""
output_str += "output_utxos:\n"
output_str += "\tutxo1 - utxotype: %s\tnewowner: %s\tamount: %f\ttokenprice: %f\ttoken address: %s\n" % (self.UTXOType(self.utxotype1).name,
self.newowner1.hex(),
Web3.fromWei(self.amount1, 'ether'),
Web3.fromWei(self.tokenprice1, 'ether'),
self.cur1.hex()) if self.utxotype1 else ""
output_str += "\tutxo2 - utxotype: %s\tnewowner: %s\tamount: %f\ttokenprice: %f\ttoken address: %s\n" % (self.UTXOType(self.utxotype2).name,
self.newowner2.hex(),
Web3.fromWei(self.amount2, 'ether'),
Web3.fromWei(self.tokenprice2, 'ether'),
self.cur2.hex()) if self.utxotype2 else ""
output_str += "\tutxo3 - utxotype: %s\tnewowner: %s\tamount: %f\ttokenprice: %f\ttoken address: %s\n" % (self.UTXOType(self.utxotype3).name,
self.newowner3.hex(),
Web3.fromWei(self.amount3, 'ether'),
Web3.fromWei(self.tokenprice3, 'ether'),
self.cur3.hex()) if self.utxotype3 else ""
output_str += "\tutxo4 - utxotype: %s\tnewowner: %s\tamount: %f\ttokenprice: %f\ttoken address: %s\n" % (self.UTXOType(self.utxotype4).name,
self.newowner4.hex(),
Web3.fromWei(self.amount4, 'ether'),
Web3.fromWei(self.tokenprice4, 'ether'),
self.cur4.hex()) if self.utxotype4 else ""
return Web3.toHex(text=output_str)
@property
def merkle_hash(self):
return utils.sha3(self.hash + self.sig1 + self.sig2)
def sign1(self, key):
self.sig1 = sign(self.hash, key)
def sign2(self, key):
self.sig2 = sign(self.hash, key)
@property
def is_single_utxo(self):
if self.blknum2 == 0:
return True
return False
@property
def sender1(self):
return get_sender(self.hash, self.sig1)
@property
def sender2(self):
return get_sender(self.hash, self.sig2)
def __repr__(self):
inputs = "inputs: [blknum1: %d; txindex1: %d; oindex1: %d\n\t blknum2: %d; txindex2: %d; oindex2: %d]" % \
(self.blknum1,
self.txindex1,
self.oindex1,
self.blknum2,
self.txindex2,
self.oindex2)
outputs = "ouputs: [utxotype1: %s, newowner1: 0x%s..., amount1: %d, tokenprice1: %d, cur1: 0x%s...]\n" % \
(None if self.utxotype1 == 0 else self.UTXOType(self.utxotype1).name,
self.newowner1.hex()[0:5],
self.amount1,
self.tokenprice1,
self.cur1.hex()[0:5]) + \
"\t [utxotype2: %s, newowner2: 0x%s..., amount2: %d, tokenprice2: %d, cur2: 0x%s...]\n" % \
(None if self.utxotype2 == 0 else self.UTXOType(self.utxotype2).name,
self.newowner2.hex()[0:5],
self.amount2,
self.tokenprice2,
self.cur2.hex()[0:5]) + \
"\t [utxotype3: %s, newowner3: 0x%s..., amount3: %d, tokenprice3: %d, cur3: 0x%s...]\n" % \
(None if self.utxotype3 == 0 else self.UTXOType(self.utxotype3).name,
self.newowner3.hex()[0:5],
self.amount3,
self.tokenprice3,
self.cur3.hex()[0:5]) + \
"\t [utxotype3: %s, newowner3: 0x%s..., amount3: %d, tokenprice3: %d, cur3: 0x%s...]\n" % \
(None if self.utxotype4 == 0 else self.UTXOType(self.utxotype4).name,
self.newowner4.hex()[0:5],
self.amount4,
self.tokenprice4,
self.cur4.hex()[0:5])
return "[%s\n %s]" % (inputs, outputs)
UnsignedTransaction = Transaction.exclude(['sig1', 'sig2'])
| StarcoderdataPython |
1884113 | from grtoolkit.Math import solveEqs
def kinematicsEq(find, printEq=False, **kwargs):
"""variables:
d=distance, d0=initial distance,
v=velocity, v0=initial velocity,
a=acceleration,
t=time"""
eq = list()
eq.append("Eq(d, v*t)")
eq.append("Eq(v, v0 + a*t)") #constant x-acceleration only
eq.append("Eq(d, d0 + v0*t + 0.5*a*t**2)") #constant x-acceleration only
eq.append("Eq(d, d0 + v*t - 0.5*a*t**2)")
eq.append("Eq(v**2, v0**2 + 2*a*d)")
return solveEqs(eq, find, printEq=printEq, **kwargs) | StarcoderdataPython |
8146057 | # 앞서 계산된 결과를 저장하기 위한 DP 테이블 초기화
d = [0] * 100
# 첫 번째 피보나치 수와 두 번째 피보나치 수는 1
d[1] = 1
d[2] = 1
n = 99
# 피보나치 함수(Fibonacci Function) 반복문으로 구현(보텀업 다이나믹 프로그래밍)
for i in range(3, n + 1):
d[i] = d[i - 1] + d[i - 2]
print(d[n]) # 실행결과 : 218922995834555169026 | StarcoderdataPython |
6651210 | # Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .engine import Engine
from ..patterns import rectangular, cuboid, circular
from ...log import PCG_ROOT_LOGGER
from ...simulation import Light
from ...simulation.properties import Pose
from ...utils import is_array
class PatternEngine(Engine):
_LABEL = 'pattern'
_PATTERNS = ['rectangular', 'circular', 'cuboid']
def __init__(
self,
assets_manager,
constraints_manager=None,
models=None,
constraints=None,
pose=[0, 0, 0, 0, 0, 0],
pattern=None,
collision_checker=None,
**kwargs):
Engine.__init__(
self,
assets_manager=assets_manager,
constraints_manager=constraints_manager,
models=models,
constraints=constraints,
collision_checker=collision_checker)
if pattern not in self._PATTERNS:
msg = 'Invalid pattern mode, options={}'.format(
self._PATTERNS)
PCG_ROOT_LOGGER.error(msg)
raise ValueError(msg)
self._pattern = pattern
self._params = dict()
self.set_pattern_parameters(**kwargs)
@property
def pattern(self):
return self._pattern
@pattern.setter
def pattern(self, value):
assert value in self._PATTERNS, \
'Invalid pattern mode, options={}'.format(
self._PATTERNS)
self._pattern = value
def set_pattern_parameters(self, **kwargs):
if self._pattern == 'rectangular':
assert 'x_length' in kwargs and 'y_length' in kwargs
assert ('step_x' in kwargs and 'step_y' in kwargs) or \
('n_x' in kwargs and 'n_y' in kwargs)
if 'center' in kwargs:
assert isinstance(kwargs['center'], bool)
elif self._pattern == 'circular':
assert 'radius' in kwargs
assert kwargs['radius'] > 0, 'Radius must be greater than 0'
assert 'max_theta' in kwargs
assert kwargs['max_theta'] >= 0 and \
kwargs['max_theta'] <= 2 * np.pi
assert 'step_theta' in kwargs or \
'n_theta' in kwargs
if 'n_radius' in kwargs:
assert kwargs['n_radius'] > 0
if 'step_radius' in kwargs:
assert kwargs['step_radius'] > 0
elif self._pattern == 'cuboid':
assert 'x_length' in kwargs and \
'y_length' in kwargs and \
'z_length' in kwargs
assert ('step_x' in kwargs and
'step_y' in kwargs and
'step_z' in kwargs) or \
('n_x' in kwargs and
'n_y' in kwargs and
'n_z' in kwargs)
if 'center' in kwargs:
assert isinstance(kwargs['center'], bool)
if 'pose_offset' in kwargs:
assert isinstance(kwargs['pose_offset'], Pose) or \
is_array(kwargs['pose_offset'])
if is_array(kwargs['pose_offset']):
assert len(kwargs['pose_offset']) in [6, 7]
self._params = kwargs
def run(self):
assert len(self._models) == 1
models = list()
if self._pattern == 'rectangular':
poses = rectangular(**self._params)
elif self._pattern == 'circular':
poses = circular(**self._params)
elif self._pattern == 'cuboid':
poses = cuboid(**self._params)
else:
raise ValueError(
'Invalid posioning pattern, '
'provided={}'.format(self._pattern))
for pose in poses:
model = self._get_model(self._models[0])
model.pose = pose
# Enforce local constraints
model = self.apply_local_constraints(model)
models.append(model)
self._logger.info('Adding model {}'.format(model.name))
self._logger.info('\t {}'.format(model.pose))
# Add models to collision checker
for model in models:
if not isinstance(model, Light):
self._collision_checker.add_fixed_model(model)
self._logger.info(
'Adding model <{}> as fixed model '
'in the collision checker'.format(
model.name))
return models
| StarcoderdataPython |
1680217 | <gh_stars>1-10
import os
import sys
import requests
APP_TEMPLATE_IDS = {
'Production' : 'f340796d-d2b8-4957-a544-0eaa3716c5f7',
'Preview' : 'bdbaaf2c-2925-401e-8ce8-c2b3fe6491e0',
'Staging' : 'd5e6a0c7-2540-4421-a961-eaf454e8f6b5',
'Test' : 'a8398609-32fa-4f3f-bad5-b9b9717ff64f',
'Integration' : '9e995b15-be2f-444a-b242-e8d2e10c15ec',
'Sandbox' : '556229e7-4823-41d8-8203-83a12ae4ff10'
}
AUTH_URIS = {
'Production' : 'https://auth.globus.org',
'Preview' : 'https://auth.preview.globus.org',
'Staging' : 'https://auth.staging.globuscs.info',
'Test' : 'https://auth.test.globuscs.info',
'Integration' : 'https://auth.integration.globuscs.info',
'Sandbox' : 'https://auth.sandbox.globuscs.info'
}
def _get_long_option(args, option_name, requires_value):
found = False
value = None
for i in range(len(args)):
if args[i] == option_name:
if requires_value:
if i == (len(args)-1):
raise SystemError(option_name + ' requires a value')
value = args.pop(i+1)
found = True
del args[i]
break
return (args, found, value)
###################################################################
#
# CREATE CLIENT FUNCTIONS
#
###################################################################
def create_client(environment, client_name=None, template_id=None):
if client_name is None:
client_name = 'Auto generated Globus Auth client'
if template_id is None:
template_id = APP_TEMPLATE_IDS[environment]
auth_uri = AUTH_URIS[environment]
data = {
'client': {
'template_id' : template_id,
'name' : client_name
}
}
r = requests.post(auth_uri + '/v2/api/clients', json=data)
return r.json()
def _create_main(args0, cmd, args):
usage = (
"Usage: %s %s environment [client_name] [--template-id <id>]"
% (args0, '|'.join(APP_TEMPLATE_IDS.keys()))
)
(args, found, template_id) = _get_long_option(args, '--template-id', True)
if len(args) < 1 or len(args) > 2 or args[0] not in APP_TEMPLATE_IDS.keys():
raise SystemExit(usage)
environment = args[0]
client_name = args[1] if len(args) == 2 else None
resp = create_client( environment, client_name, template_id)
print ("="*60)
print (resp)
print ("="*60)
###################################################################
#
# DELETE CLIENT FUNCTIONS
#
###################################################################
def delete_client(auth_client_id,
auth_client_secret,
environment,
target_client_id
):
auth_uri = AUTH_URIS[environment]
url = auth_uri + '/v2/api/clients/%s' % target_client_id
r = requests.delete(url, auth=(auth_client_id, auth_client_secret))
return r.json()
def _delete_main(args0, cmd, args):
usage = "Some useful usage message"
auth_client_id = os.environ.get('CLIENT_ID')
if auth_client_id is None:
raise SystemExit(usage)
auth_client_secret = os.environ.get('CLIENT_SECRET')
if auth_client_secret is None:
raise SystemExit(usage)
if len(args) != 2:
raise SystemExit(usage)
environments = '|'.join(AUTH_URIS.keys())
environment = args[0]
if environment not in environments:
raise SystemExit(usage)
target_client_id = args[1]
resp = delete_client(
auth_client_id,
auth_client_secret,
environment,
target_client_id)
print ("="*60)
print (resp)
print ("="*60)
###################################################################
#
# GET CLIENT FUNCTIONS
#
###################################################################
def get_client(auth_client_id,
auth_client_secret,
environment,
target_client_id
):
auth_uri = AUTH_URIS[environment]
url = auth_uri + '/v2/api/clients/%s' % target_client_id
r = requests.get(url, auth=(auth_client_id, auth_client_secret))
return r.json()
def _get_main(args0, cmd, args):
usage = "Some useful usage message"
auth_client_id = os.environ.get('CLIENT_ID')
if auth_client_id is None:
raise SystemExit(usage)
auth_client_secret = os.environ.get('CLIENT_SECRET')
if auth_client_secret is None:
raise SystemExit(usage)
if len(args) != 2:
raise SystemExit(usage)
environments = '|'.join(AUTH_URIS.keys())
environment = args[0]
if environment not in environments:
raise SystemExit(usage)
target_client_id = args[1]
resp = get_client(
auth_client_id,
auth_client_secret,
environment,
target_client_id)
print ("="*60)
print (resp)
print ("="*60)
###################################################################
#
# UPDATE CLIENT FUNCTIONS
#
###################################################################
def update_client(auth_client_id,
auth_client_secret,
environment,
client_id,
client_name,
redirect_uris
):
data = { 'client': {} }
if client_name is not None:
data['client']['name'] = name
if redirect_uris is not None:
data['client']['redirect_uris'] = redirect_uris
print ('PAYLOAD: ' + str(data))
auth_uri = AUTH_URIS[environment]
r = requests.put(auth_uri + '/v2/api/clients/'+client_id, json=data,
auth=(auth_client_id, auth_client_secret))
return r.json()
def _update_main(args0, cmd, args):
usage = (
"Usage: %s %s environment [client_id] [--name <name>] "
"[--redirect-uris \"space-separated list\"] "
% (args0, '|'.join(APP_TEMPLATE_IDS.keys()))
)
auth_client_id = os.environ.get('CLIENT_ID')
if auth_client_id is None:
raise SystemExit(usage)
auth_client_secret = os.environ.get('CLIENT_SECRET')
if auth_client_secret is None:
raise SystemExit(usage)
if len(args) < 2:
raise SystemExit(usage)
(args, found, client_name) = _get_long_option(args, '--client-name', True)
(args, found, uris) = _get_long_option(args, '--redirect-uris', True)
if uris is not None:
uris = uris.split(' ')
environment = args[0]
client_id = args[1]
environments = '|'.join(AUTH_URIS.keys())
environment = args[0]
if environment not in environments:
raise SystemExit(usage)
resp = update_client(auth_client_id,
auth_client_secret,
environment,
client_id,
client_name,
uris)
print ("="*60)
print (resp)
print ("="*60)
###################################################################
#
# MAIN FUNCTIONS
#
###################################################################
def _parse_args(args):
cmds = ['create', 'delete', 'get', 'update']
usage = ( "Usage: %s [%s]") % (args[0], '|'.join(cmds))
if len(args) == 1 or args[1] not in cmds:
raise SystemExit(usage)
return (args[0], args[1], args[2:])
def main():
(args0, cmd, args) = _parse_args(sys.argv)
cmd_main = globals().get('_' + cmd + '_main')
assert cmd_main
return cmd_main(args0, cmd, args)
if __name__ == "__main__":
main()
| StarcoderdataPython |
11217705 | <gh_stars>0
# -*- coding: utf-8 -*- #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for listing Cloud CDN cache invalidations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import sys
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import constants
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.url_maps import flags
from googlecloudsdk.command_lib.compute.url_maps import url_maps_utils
from googlecloudsdk.core import properties
from googlecloudsdk.core.resource import resource_projector
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA)
class ListCacheInvalidations(base.ListCommand):
"""List Cloud CDN cache invalidations for a URL map."""
detailed_help = {
'DESCRIPTION': """\
List Cloud CDN cache invalidations for a URL map. A cache invalidation instructs
Cloud CDN to stop using cached content. You can list invalidations to check
which have completed.
""",
}
@staticmethod
def _Flags(parser):
parser.add_argument(
'--limit',
type=arg_parsers.BoundedInt(1, sys.maxsize, unlimited=True),
help='The maximum number of invalidations to list.')
@staticmethod
def Args(parser):
parser.display_info.AddFormat("""\
table(
description,
operation_http_status():label=HTTP_STATUS,
status,
insertTime:label=TIMESTAMP
)""")
parser.add_argument('urlmap', help='The name of the URL map.')
def GetUrlMapGetRequest(self, client, args):
return (
client.apitools_client.urlMaps,
'Get',
client.messages.ComputeUrlMapsGetRequest(
project=properties.VALUES.core.project.GetOrFail(),
urlMap=args.urlmap))
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
args.uri = None
get_request = self.GetUrlMapGetRequest(client, args)
objects = client.MakeRequests([get_request])
urlmap_id = objects[0].id
filter_expr = ('(operationType eq invalidateCache) (targetId eq '
'{urlmap_id})').format(urlmap_id=urlmap_id)
max_results = args.limit or constants.MAX_RESULTS_PER_PAGE
project = properties.VALUES.core.project.GetOrFail()
requests = [(client.apitools_client.globalOperations, 'AggregatedList',
client.apitools_client.globalOperations.GetRequestType(
'AggregatedList')(
filter=filter_expr,
maxResults=max_results,
orderBy='creationTimestamp desc',
project=project))]
return resource_projector.MakeSerializable(
client.MakeRequests(requests=requests))
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class ListCacheInvalidationsAlpha(base.ListCommand):
"""List Cloud CDN cache invalidations for a URL map."""
detailed_help = {
'DESCRIPTION':
"""\
List Cloud CDN cache invalidations for a URL map. A cache invalidation instructs
Cloud CDN to stop using cached content. You can list invalidations to check
which have completed.
""",
}
URL_MAP_ARG = None
@classmethod
def Args(cls, parser):
cls.URL_MAP_ARG = flags.UrlMapArgument(include_alpha=True)
cls.URL_MAP_ARG.AddArgument(parser, operation_type='describe')
parser.display_info.AddFormat("""\
table(
description,
operation_http_status():label=HTTP_STATUS,
status,
insertTime:label=TIMESTAMP
)""")
def GetUrlMapGetRequest(self, args, url_map_ref, client):
if url_maps_utils.IsGlobalUrlMapRef(url_map_ref):
return (client.apitools_client.urlMaps, 'Get',
client.messages.ComputeUrlMapsGetRequest(
project=properties.VALUES.core.project.GetOrFail(),
urlMap=url_map_ref.Name()))
else:
return (client.apitools_client.regionUrlMaps, 'Get',
client.messages.ComputeRegionUrlMapsGetRequest(
project=properties.VALUES.core.project.GetOrFail(),
urlMap=url_map_ref.Name(),
region=url_map_ref.region))
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
url_map_ref = self.URL_MAP_ARG.ResolveAsResource(args, holder.resources)
get_request = self.GetUrlMapGetRequest(args, url_map_ref, client)
objects = client.MakeRequests([get_request])
urlmap_id = objects[0].id
filter_expr = ('(operationType eq invalidateCache) (targetId eq '
'{urlmap_id})').format(urlmap_id=urlmap_id)
max_results = args.limit or constants.MAX_RESULTS_PER_PAGE
project = properties.VALUES.core.project.GetOrFail()
requests = [(client.apitools_client.globalOperations, 'AggregatedList',
client.apitools_client.globalOperations.GetRequestType(
'AggregatedList')(
filter=filter_expr,
maxResults=max_results,
orderBy='creationTimestamp desc',
project=project))]
return resource_projector.MakeSerializable(
client.MakeRequests(requests=requests))
| StarcoderdataPython |
345376 | <gh_stars>1-10
# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The basis for this file before inclusion and extension here is
# Copyright (c) 2017, The PyPy Project
#
# The MIT License
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
def raises(exc, func, *args):
try:
func(*args)
except exc:
pass
else:
assert False
class ExecTests:
def test_string(self):
g = {}
l = {}
exec("a = 3", g, l)
assert l['a'] == 3
def test_localfill(self):
g = {}
exec("a = 3", g)
assert g['a'] == 3
def test_builtinsupply(self):
g = {}
exec("pass", g)
assert '__builtins__' in g
def test_invalidglobal(self):
def f():
exec('pass', 1)
raises(TypeError, f)
def test_invalidlocal(self):
def f():
exec('pass', {}, 2)
raises(TypeError, f)
def test_codeobject(self):
co = compile("a = 3", '<string>', 'exec')
g = {}
l = {}
exec(co, g, l)
assert l['a'] == 3
def test_implicit(self):
a = 4
exec("a = 3")
assert a == 4
def test_tuplelocals(self):
g = {}
l = {}
exec("a = 3", g, l)
assert l['a'] == 3
def test_tupleglobals(self):
g = {}
exec("a = 3", g)
assert g['a'] == 3
def test_exceptionfallthrough(self):
def f():
exec('raise TypeError', {})
raises(TypeError, f)
def test_global_stmt(self):
g = {}
l = {}
co = compile("global a; a=5", '', 'exec')
#import dis
#dis.dis(co)
exec(co, g, l)
assert l == {}
assert g['a'] == 5
def test_specialcase_free_load(self):
exec("""if 1:
def f():
exec('a=3')
return a
raises(NameError, f)\n""")
def test_specialcase_free_load2(self):
exec("""if 1:
def f(a):
exec('a=3')
return a
x = f(4)\n""")
assert eval("x") == 4
def test_nested_names_are_not_confused(self):
def get_nested_class():
method_and_var = "var"
class Test(object):
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
return Test()
t = get_nested_class()
assert t.actual_global() == "global"
assert t.test() == 'var'
assert t.method_and_var() == 'method'
def test_exec_load_name(self):
d = {'x': 2}
exec("""if 1:
def f():
save = x
exec("x=3")
return x,save
\n""", d)
res = d['f']()
assert res == (2, 2)
def test_space_bug(self):
d = {}
exec("x=5 ", d)
assert d['x'] == 5
def test_synerr(self):
def x():
exec("1 2")
raises(SyntaxError, x)
def test_mapping_as_locals(self):
class M(object):
def __getitem__(self, key):
return key
def __setitem__(self, key, value):
self.result[key] = value
def setdefault(self, key, value):
assert key == '__builtins__'
m = M()
m.result = {}
exec("x=m", {}, m)
assert m.result == {'x': 'm'}
try:
exec("y=n", m)
except TypeError:
pass
else:
assert False, 'Expected TypeError'
raises(TypeError, eval, "m", m)
def test_filename(self):
try:
exec("'unmatched_quote")
except SyntaxError as msg:
assert msg.filename == '<string>', msg.filename
try:
eval("'unmatched_quote")
except SyntaxError as msg:
assert msg.filename == '<string>', msg.filename
def test_exec_and_name_lookups(self):
ns = {}
exec("""def f():
exec('x=1', globals())
return x\n""", ns)
f = ns['f']
try:
res = f()
except NameError as e: # keep py.test from exploding confused
raise e
assert res == 1
def test_exec_unicode(self):
# 's' is a bytes string
s = b"x = '\xd0\xb9\xd1\x86\xd1\x83\xd0\xba\xd0\xb5\xd0\xbd'"
# 'u' is a unicode
u = s.decode('utf-8')
ns = {}
exec(u, ns)
x = ns['x']
assert len(x) == 6
assert ord(x[0]) == 0x0439
assert ord(x[1]) == 0x0446
assert ord(x[2]) == 0x0443
assert ord(x[3]) == 0x043a
assert ord(x[4]) == 0x0435
assert ord(x[5]) == 0x043d
def test_compile_bytes(self):
s = b"x = '\xd0\xb9\xd1\x86\xd1\x83\xd0\xba\xd0\xb5\xd0\xbd'"
c = compile(s, '<input>', 'exec')
ns = {}
exec(c, ns)
x = ns['x']
assert len(x) == 6
assert ord(x[0]) == 0x0439
def test_issue3297(self):
c = compile("a, b = '\U0001010F', '\\U0001010F'", "dummy", "exec")
d = {}
exec(c, d)
assert d['a'] == d['b']
assert len(d['a']) == len(d['b'])
assert d['a'] == d['b']
def test_locals_call(self):
l = locals()
exec("""if 1:
assert locals() is l
def f(a):
exec('a=3')
return a
x = f(4)\n""")
assert eval("locals() is l")
assert l["x"] == 4
def test_custom_locals(self):
class M(object):
def __getitem__(self, key):
return self.result[key]
def __setitem__(self, key, value):
self.result[key] = value
m = M()
m.result = {"m": m, "M": M}
exec("""if 1:
assert locals() is m
def f(a):
exec('a=3')
return a
x = f(4)
assert locals()["x"] == 4
x = 12
assert isinstance(locals(), M)
assert locals()["x"] == 12\n""", None, m)
assert eval("locals() is m", None, m)
assert m["x"] == 12
def test_locals_is_globals(self):
exec("assert locals() is globals()", globals())
def test_custom_locals2(self):
class M(object):
def __getitem__(self, key):
return key
m = M()
ns = {}
exec("global x; x = y", ns, m)
assert ns["x"] == "y";
assert eval("x", None, m) == "x"
| StarcoderdataPython |
9673125 | import numpy as np
import pandas as pd
from pprint import pprint
import argparse
from pytorch_pretrained_bert.tokenization import (BasicTokenizer,
BertTokenizer, whitespace_tokenize)
import collections
import torch
from torch.utils.data import TensorDataset
from pytorch_pretrained_bert.modeling import BertForQuestionAnswering, BertConfig
import math
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
# from tqdm import tqdm
from parafinder import ParaFinder
torch.manual_seed(123)
class SquadExample(object):
"""
A single training/test example for the Squad dataset.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
example_id,
para_text,
qas_id,
question_text,
doc_tokens,
unique_id):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.example_id = example_id
self.para_text = para_text
self.unique_id = unique_id
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (self.qas_id)
s += ", question_text: %s" % (
self.question_text)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
return s
### Convert paragraph to tokens and returns question_text
def read_squad_examples(input_data):
"""Read a SQuAD json file into a list of SquadExample."""
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
i = 0
examples = []
for entry in input_data:
example_id = entry['id']
paragraph_text = entry['text']
doc_tokens = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
for qa in entry['ques']:
qas_id = i
question_text = qa
example = SquadExample(example_id=example_id,
qas_id=qas_id,
para_text=paragraph_text,
question_text=question_text,
doc_tokens=doc_tokens,
unique_id=i)
i += 1
examples.append(example)
return examples
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_is_max_context,
token_to_orig_map,
input_ids,
input_mask,
segment_ids):
self.doc_span_index = doc_span_index
self.unique_id = unique_id
self.example_index = example_index
self.tokens = tokens
self.token_is_max_context = token_is_max_context
self.token_to_orig_map = token_to_orig_map
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length):
"""Loads a data file into a list of `InputBatch`s."""
features = []
unique_id = 1
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
### Truncate the query if query length > max_query_length..
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(InputFeatures(unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_is_max_context=token_is_max_context,
token_to_orig_map=token_to_orig_map,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids))
unique_id += 1
return features
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
def predict(examples, all_features, all_results, max_answer_length):
n_best_size = 10
### Adding index to feature ###
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
for example in examples:
index = 0
features = example_index_to_features[example.unique_id]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
#### we remove the indexes which are invalid @
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, True)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
if not nbest:
nbest.append(
_NbestPrediction(text="No result found", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example] = (nbest_json[0]["text"], nbest_json[0]["probability"])
index = +1
return all_predictions
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--paragraph", default=None, type=str)
parser.add_argument("--question", default=None, type=str)
parser.add_argument("--model", default=None, type=str)
parser.add_argument("--max_seq_length", default=384, type=int)
parser.add_argument("--doc_stride", default=128, type=int)
parser.add_argument("--max_query_length", default=64, type=int)
parser.add_argument("--config_file", default=None, type=str)
parser.add_argument("--max_answer_length", default=30, type=int)
args = parser.parse_args()
para_file = args.paragraph
question_file = args.question
model_path = args.model
device = torch.device("cuda")
### Raeding paragraph
# f = open(para_file, 'r')
# para = f.read()
# f.close()
## Reading question
# f = open(ques_file, 'r')
# ques = f.read()
# f.close()
# para_list = para.split('\n\n')
f = open(para_file, "rb")
para = f.read()
para = para.decode('windows-1252')
para = para.strip("\n").replace("\r", " ").replace("\n", "")
#print(para)
# print(para)
f.close()
f_ = open(question_file, "r")
question = f_.read()
question = question.split("\n")
while "" in question:
question.remove("")
for q in question:
q = q.strip("\n")
f_.close()
input_data = []
pfinder = ParaFinder(para)
i = 0
for q in question:
closest_para = pfinder.closestParagraph(q)
paragraphs = {}
paragraphs["id"] = i
paragraphs["text"] = closest_para
paragraphs["ques"] = [q]
i += 1
input_data.append(paragraphs)
# print(input_data)
## input_data is a list of dictionary which has a paragraph and questions
examples = read_squad_examples(input_data)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
eval_features = convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
### Loading Pretrained model for QnA
config = BertConfig(args.config_file)
model = BertForQuestionAnswering(config)
model.load_state_dict(torch.load(model_path, map_location=device))
model.to(device)
pred_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
# Run prediction for full data
pred_sampler = SequentialSampler(pred_data)
pred_dataloader = DataLoader(pred_data, sampler=pred_sampler, batch_size=9)
predictions = []
for input_ids, input_mask, segment_ids, example_indices in pred_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask)
features = []
example = []
all_results = []
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
feature = eval_features[example_index.item()]
unique_id = int(feature.unique_id)
features.append(feature)
all_results.append(RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
output = predict(examples, features, all_results, args.max_answer_length)
predictions.append(output)
### For printing the results ####
index = None
for example in examples:
if index != example.example_id:
# print(example.para_text)
index = example.example_id
# print('\n')
# print(colored('***********Question and Answers *************', 'red'))
ques_text = example.question_text
print(ques_text)
prediction, prob = predictions[math.floor(example.unique_id / 12)][example]
if prob > 0.35:
print(prediction)
#print(type(prediction))
else:
print("No result found")
# print('\n')
## prediction is the answer of the question
if __name__ == "__main__":
main()
| StarcoderdataPython |
5002063 | <filename>portal/apps/videologue/templatetags/videologue_tags.py
# -*- coding: utf-8 -*-
from videologue.models import YouTubeVideo
from django.template import (Context, Library, loader, Node, TemplateSyntaxError)
from string import lower
register = Library()
TPL_DIR = 'videologue/templates/'
class RenderLatestVideoNode(Node):
def __init__(self, kwcontext):
self.kw = kwcontext
def render(self, context):
try:
video = YouTubeVideo.objects.latest()
except:
video = None
context.update({self.kw: video})
return ''
class RenderVideoNode(Node):
def __init__(self, kwcontext, vid):
self.kw = kwcontext
self.vid = vid
def render(self, context):
try:
video = YouTubeVideo.objects.get(id=self.vid)
except:
video = None
context.update({self.kw: video})
return ''
@register.tag
def get_latest_video(parser, token):
"""Usage: {% get_latest_video as video_object %}"""
bits = token.contents.split()
if len(bits) != 3 or bits[1] != 'as':
raise TemplateSyntaxError('Invalid arguments for %s' % bits[0])
return RenderLatestVideoNode(bits[2])
@register.tag
def get_video(parser, token):
"""Usage: {% get_video id as video_object %}"""
bits = token.contents.split()
if len(bits) != 4 or bits[2] != 'as':
raise TemplateSyntaxError('Invalid arguments for %s' % bits[0])
return RenderVideoNode(bits[3], bits[1])
@register.filter
def render_video(video):
if not video:
return ''
tpl = loader.get_template(
TPL_DIR + '%s/module.html' % lower(video.__class__.__name__))
return tpl.render({'video': video})
| StarcoderdataPython |
4975580 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import gym
class Numpy(gym.Wrapper):
"""
This wrapper converts
* states from pandas to numpy
"""
def _convert_state(self, state):
if isinstance(state, pd.DataFrame):
state = state.values.reshape(*self.observation_space.shape)
return state
def _convert_to_numpy(self, data):
if isinstance(data, (pd.Series, pd.DataFrame)):
data = data.values
if isinstance(data, dict):
data = {k: self._convert_to_numpy(data[k]) for k in data}
return data
def step(self, action):
state, reward, done, info = self.env.step(action)
state = self._convert_state(state)
info = self._convert_to_numpy(info)
return state, reward, done, info
def reset(self, *args, **kwargs):
state = self.env.reset(*args, **kwargs)
state = self._convert_state(state)
return state
| StarcoderdataPython |
159575 | <filename>consolidator_script.py<gh_stars>1-10
'''
This program downloads the following
1. One zerodha holding excel sheet
2. Two icici direct holding excel sheet
3. All listed active equities on NSE (not in active code)
4. All listed active equities on BSE (not in active code)
5. Moves all the above files to the current working directory
Credentials are stored in the appropriate json file.
Chromedriver needs to be in the same folder as python file, as well as json file.
Selenium and tqdm needs to be installed
This is built for Windows OS
'''
import json
import time
import glob
import os
import shutil
import sys
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from tqdm import tqdm
'''
Download zerodha holding report
'''
print ("\nZerodha portfolio holding report download in progress...")
for i in tqdm(range(1)):
with open("zerodha_credentials.json") as credentialsFile:
data = json.load(credentialsFile)
username = data['username']
password = data['password']
pin = data['pin']
driver = webdriver.Chrome()
driver.get('https://kite.zerodha.com/')
driver.minimize_window()
try:
driver.find_element_by_xpath("/html/body/div[1]/div/div/div[1]/div/div/div/form/div[2]/input").send_keys(username)
driver.find_element_by_xpath("/html/body/div[1]/div/div/div[1]/div/div/div/form/div[3]/input").send_keys(password)
driver.find_element_by_xpath("/html/body/div[1]/div/div/div[1]/div/div/div/form/div[4]/button").click()
except:
print("\nOPERATION FAIL: Login Credentials are either incorrect or need to be changed. Please login manually first!")
driver.implicitly_wait(20)
try:
driver.find_element_by_xpath("//*[@id='container']/div/div/div/form/div[2]/div/input").send_keys(pin)
driver.find_element_by_xpath("/html/body/div[1]/div/div/div[1]/div/div/div/form/div[3]/button").click()
except:
print("\nOPERATION FAIL: PIN is either incorrect or has to be changed. Please login manually first!")
try:
driver.find_element_by_xpath("/html/body/div[1]/div[1]/div/div[2]/div[1]/a[3]/span").click()
driver.find_element_by_xpath("/html/body/div[1]/div[2]/div[2]/div/div/section/div/div[1]/div/span[3]/span").click()
except:
print("\nOPERATION FAIL: Report link has perhaps changed. Please login manually first!")
time.sleep(5) #5 seconds to download and close browser
driver.close()
print ("\nZerodha portfolio holding report has successfully been downloaded!")
time.sleep(0.01)
'''
Downloaded zerodha file to be moved to current working directory and renamed
'''
print ("\n\nZerodha portfolio holding report being renamed & moved to working folder...")
for i in tqdm(range(1)):
downloaded_files = glob.iglob('C:\\Users\\PeepalCapital\\Downloads\\*') #put the path name of your donwload folder
latest_file_path_downloaded = max(downloaded_files , key = os.path.getctime)
shutil.copy(latest_file_path_downloaded, os.getcwd())
working_file = glob.iglob('C:\\Users\\PeepalCapital\\AppData\\Roaming\\Sublime Text 3\\Packages\\User\\*') #put the path name of your current working folder
working_file_path_copied = max(working_file , key = os.path.getctime)
file_name = os.path.basename (working_file_path_copied)
try:
os.rename (file_name, 'zerodha_holdings.csv')
except WindowsError:
os.remove('zerodha_holdings.csv')
os.rename (file_name, 'zerodha_holdings.csv')
print ("\nZerodha portfolio holding report has successfully been renamed and moved!")
time.sleep(0.01)
'''
Download ICICI direct holding report (1)
'''
print ("\n\nICICI Direct portfolio holding report (1) download in progress...")
for i in tqdm(range(1)):
with open("icici1_credentials.json") as credentialsFile1:
data1 = json.load(credentialsFile1)
user1 = data1['user1']
password1 = data1['<PASSWORD>']
dob1 = data1['dob1']
driver = webdriver.Chrome()
driver.get('https://secure.icicidirect.com/IDirectTrading/Customer/Login.aspx')
driver.minimize_window()
try:
driver.find_element_by_xpath("//*[@id='txtUserId']").send_keys(user1)
driver.find_element_by_xpath("//*[@id='txtPass']").send_keys(<PASSWORD>)
driver.find_element_by_xpath("//*[@id='txtDOB']").send_keys(dob1)
driver.find_element_by_xpath("//*[@id='lbtLogin']").click()
except:
print("\nOPERATION FAIL: Login Credentials are either incorrect or need to be changed. Please login manually first!")
driver.close()
sys.exit()
try:
driver.find_element_by_xpath("//*[@id='hypPF']").click()
driver.implicitly_wait(20)
driver.find_element_by_xpath("//*[@id='dvMenu']/ul/li[1]/div/ul/li[1]/a/label[1]").click()
driver.implicitly_wait(20)
driver.find_element_by_xpath("//*[@id='hypfilter']").click()
driver.implicitly_wait(20)
driver.find_element_by_xpath("//*[@id='dvfilter']/div[2]/ul/li[2]/img[1]").click()
except:
print("\nOPERATION FAIL: ICICI is either trying to sell something or wants a confirmation. Please login manually first!")
driver.close()
sys.exit()
time.sleep(5) #5 seconds to download and close browser
driver.close()
print ("\nICICI direct portfolio holding report (1) has successfully been downloaded!")
time.sleep(0.01)
'''
Downloaded icici direct file (1) to be moved to current working directory and renamed
'''
print ("\n\nICICI direct portfolio holding report (1) being renamed & moved to working folder...")
for i in tqdm(range(1)):
downloaded_files = glob.iglob('C:\\Users\\PeepalCapital\\Downloads\\*') #put the path name of your donwload folder
latest_file_path_downloaded = max(downloaded_files , key = os.path.getctime)
shutil.copy(latest_file_path_downloaded, os.getcwd())
working_file = glob.iglob('C:\\Users\\PeepalCapital\\AppData\\Roaming\\Sublime Text 3\\Packages\\User\\*') #put the path name of your current working folder
working_file_path_copied = max(working_file , key = os.path.getctime)
file_name = os.path.basename (working_file_path_copied)
try:
os.rename (file_name, 'icici_direct_holdings_1.xls')
except WindowsError:
os.remove('icici_direct_holdings_1.xls')
os.rename (file_name, 'icici_direct_holdings_1.xls')
print ("\nICICI direct portfolio holding report (1) has successfully been renamed and moved!")
time.sleep(0.01)
'''
Download ICICI direct holding report (2)
'''
print ("\n\nICICI Direct portfolio holding report (2) download in progress...")
for i in tqdm(range(1)):
with open("icici2_credentials.json") as credentialsFile2:
data2 = json.load(credentialsFile2)
user2 = data2['user2']
password2 = data2['<PASSWORD>']
dob2 = data2['dob2']
driver = webdriver.Chrome()
driver.get('https://secure.icicidirect.com/IDirectTrading/Customer/Login.aspx')
driver.minimize_window()
try:
driver.find_element_by_xpath("//*[@id='txtUserId']").send_keys(user2)
driver.find_element_by_xpath("//*[@id='txtPass']").send_keys(password2)
driver.find_element_by_xpath("//*[@id='txtDOB']").send_keys(dob2)
driver.find_element_by_xpath("//*[@id='lbtLogin']").click()
except:
print("\nOPERATION FAIL: Login Credentials are either incorrect or need to be changed. Please login manually first!")
driver.close()
sys.exit()
try:
driver.find_element_by_xpath("//*[@id='hypPF']").click()
driver.implicitly_wait(20)
driver.find_element_by_xpath("//*[@id='dvMenu']/ul/li[1]/div/ul/li[1]/a/label[1]").click()
driver.implicitly_wait(20)
driver.find_element_by_xpath("//*[@id='hypfilter']").click()
driver.implicitly_wait(20)
driver.find_element_by_xpath("//*[@id='dvfilter']/div[2]/ul/li[2]/img[1]").click()
except:
print("\nOPERATION FAIL: ICICI is either trying to sell something or wants a confirmation. Please login manually first!")
driver.close()
sys.exit()
time.sleep(5) #5 seconds to download and close browser
driver.close()
print ("\nICICI direct portfolio holding report (2) has successfully been downloaded!")
time.sleep(0.01)
'''
Downloaded icici direct file (2) to be moved to current working directory and renamed
'''
print ("\n\nICICI direct portfolio holding report (2) being renamed & moved to working folder...")
for i in tqdm(range(1)):
downloaded_files = glob.iglob('C:\\Users\\PeepalCapital\\Downloads\\*') #put the path name of your donwload folder
latest_file_path_downloaded = max(downloaded_files , key = os.path.getctime)
shutil.copy(latest_file_path_downloaded, os.getcwd())
working_file = glob.iglob('C:\\Users\\PeepalCapital\\AppData\\Roaming\\Sublime Text 3\\Packages\\User\\*') #put the path name of your current working folder
working_file_path_copied = max(working_file , key = os.path.getctime)
file_name = os.path.basename (working_file_path_copied)
try:
os.rename (file_name, 'icici_direct_holdings_2.xls')
except WindowsError:
os.remove('icici_direct_holdings_2.xls')
os.rename (file_name, 'icici_direct_holdings_2.xls')
print ("\nICICI direct portfolio holding report (2) has successfully been renamed and moved!")
time.sleep(0.01)
'''
Download all active NSE Equity
print ("\n\nAll active equities traded on NSE being downloaded...")
for i in tqdm(range(1)):
try:
driver = webdriver.Chrome()
driver.get('https://www.nseindia.com/corporates/content/securities_info.htm')
driver.minimize_window()
driver.find_element_by_xpath("//*[@id='wrapper_btm']/div[1]/div[4]/div/ul/li[1]/a").click()
time.sleep(5) #5 seconds to download and close browser
driver.close()
except:
print("\nOPERATION FAIL: Not able to download all active equities from NSE")
driver.close()
sys.exit()
print ("\nAll active equities traded on NSE has been successfully downloaded!")
time.sleep(0.01)
#reuse code for file movement as above
Download all active BSE Equity
print ("\n\nAll active equities traded on BSE being downloaded...")
for i in tqdm(range(1)):
try:
driver = webdriver.Chrome()
driver.get('https://www.bseindia.com/corporates/List_Scrips.aspx')
driver.minimize_window()
select = Select(driver.find_element_by_id('ContentPlaceHolder1_ddSegment'))
select.select_by_visible_text("Equity")
select = Select(driver.find_element_by_id('ContentPlaceHolder1_ddlStatus'))
select.select_by_visible_text("Active")
driver.find_element_by_xpath("//*[@id='ContentPlaceHolder1_btnSubmit']").click()
driver.implicitly_wait(20)
driver.find_element_by_xpath("//*[@id='ContentPlaceHolder1_lnkDownload']/i").click()
time.sleep(5) #5 seconds to download and close browser
driver.close()
except:
print("\nOPERATION FAIL: Not able to download all active equities from BSE")
driver.close()
sys.exit()
print ("\nAll active equities traded on BSE has been successfully downloaded!")
time.sleep(0.01)
#reuse code for file movement as above
'''
| StarcoderdataPython |
1619210 | #!/usr/bin/env python3
# _*_coding:utf-8_*_
import os
from common.LogManage import get_logger
from common.settingLib import get_mongodb_db
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
cookie_secret="<KEY>
login_url="/login",
xsrf_cookies=True,
debug=True,
)
# 日志模块
logger = get_logger(
strFileName="base_templet.log",
debug=10,
showStreamLog=True,
saveLogPath=None
)
# 获取数据库连接
if get_mongodb_db(settings.get("debug", "False")):
# debug=True 表示使用线上库,Flase表示使用测试库
database, g_motor_db, g_py_db = get_mongodb_db(settings.get("debug", "False"))
logger.info("(%s)数据库连接成功" % database)
else:
logger.info("数据库连接失败")
| StarcoderdataPython |
11335463 | <reponame>KTH-UrbanT/MUBES_UBEM
""" This example uses FMpy as a environment to make FMU simulation. It deals only with
changing the set point for 2 hours for each building one after the other. Thus change frequency depends
on the number of FMU considered in total."""
import os,sys
from fmpy import *
#from fmpy.fmi1 import FMU1Slave
from fmpy.fmi1 import fmi1OK
#from fmpy.fmi2 import FMU2Slave
from fmpy.fmi2 import fmi2OK
from fmpy.simulation import instantiate_fmu
path2addgeom = os.path.join(os.path.dirname(os.path.dirname(os.getcwd())),'geomeppy')
sys.path.append(path2addgeom)
sys.path.append('..')
import shutil
import pickle
import time as timedelay
from CoreFiles import LaunchSim as LaunchSim
##Callback function required to avoid having the prnted message when everything goes fine with 2.0 !
def log_message2(componentEnvironment, instanceName, status, category, message):
if status == fmi2OK:
pass # do nothing
else:
print(message.decode('utf-8'))
##Callback function required to avoid having the prnted message when everything goes fine with 1.0 !
def log_message1(componentEnvironment, instanceName, status, category, message):
if status == fmi1OK:
pass # do nothing
else:
print(message.decode('utf-8'))
def InstAndInitiV1(filelist,VarNames,start_time,stop_time) :
idx1 = ['_', 'v']
fmunNb = 0
FMUElement = {}
for file in filelist:
if file[-4:] == '.fmu':
fmunNb += 1
model_name = file[:-4]
FMUKeyName = int(model_name[model_name.index(idx1[0]) + 1:model_name.index(idx1[1])])
FMUElement[FMUKeyName] = {}
model_description = read_model_description(file)
FMUElement[FMUKeyName]['unzipdir'] = extract(file)
vrs = {}
for variable in model_description.modelVariables:
vrs[variable.name] = variable.valueReference
FMUElement[FMUKeyName]['Exch_Var'] = vrs
FMUElement[FMUKeyName]['fmu'] = instantiate_fmu(FMUElement[FMUKeyName]['unzipdir'], model_description,
fmi_type='CoSimulation', visible=False, debug_logging=False,
logger=log_message1, fmi_call_logger=None, library_path=None)
#old way with a bunch of messages
# FMUElement[FMUKeyName]['fmu'] = FMU1Slave(guid=model_description.guid,
# unzipDirectory=FMUElement[FMUKeyName]['unzipdir'],
# modelIdentifier=model_description.coSimulation.modelIdentifier,
# instanceName=model_name, fmiCallLogger = log_message1)
# FMUElement[FMUKeyName]['fmu'].instantiate()
for i,input in enumerate(VarNames['Inputs']):
FMUElement[FMUKeyName]['fmu'].setReal([vrs[input]],[VarNames['InitialValue'][i]])
FMUElement[FMUKeyName]['fmu'].initialize(tStart=start_time, stopTime=stop_time)
return FMUElement
def InstAndInitiV2(filelist,VarNames,start_time,stop_time) :
idx1 = ['_', 'v']
fmunNb = 0
FMUElement = {}
for file in filelist:
if file[-4:] == '.fmu':
fmunNb += 1
model_name = file[:-4]
FMUKeyName = int(model_name[model_name.index(idx1[0]) + 1:model_name.index(idx1[1])])
FMUElement[FMUKeyName] = {}
model_description = read_model_description(file)
FMUElement[FMUKeyName]['unzipdir'] = extract(file)
vrs = {}
for variable in model_description.modelVariables:
vrs[variable.name] = variable.valueReference
FMUElement[FMUKeyName]['Exch_Var'] = vrs
FMUElement[FMUKeyName]['fmu'] = instantiate_fmu(FMUElement[FMUKeyName]['unzipdir'], model_description,
fmi_type='CoSimulation', visible=False, debug_logging=False,
logger=log_message2,
fmi_call_logger=None, library_path=None)
# old way with a bunch of messages
# FMUElement[FMUKeyName]['fmu'] = FMU2Slave(guid=model_description.guid,
# unzipDirectory=FMUElement[FMUKeyName]['unzipdir'],
# modelIdentifier=model_description.coSimulation.modelIdentifier,
# instanceName=model_name)
# FMUElement[FMUKeyName]['fmu'].instantiate()
FMUElement[FMUKeyName]['fmu'].setupExperiment(startTime=start_time, stopTime=stop_time)
for i,input in enumerate(VarNames['Inputs']):
FMUElement[FMUKeyName]['fmu'].setReal([vrs[input]],[VarNames['InitialValue'][i]])
FMUElement[FMUKeyName]['fmu'].enterInitializationMode()
FMUElement[FMUKeyName]['fmu'].exitInitializationMode()
return FMUElement
def LaunchFMU_Sim(FMUElement,VarNames, start_time,stop_time,step_size):
time = start_time
day = 0
SetPoints = {}
MeanTemp = {}
HeatPow = {}
IntLoad = {}
bld = 0
for key in FMUElement.keys():
HeatPow[key] = [0]
MeanTemp[key] = [0]
SetPoints[key] = [21]
IntLoad[key] = [0]
# simulation loop
while time < stop_time:
if (time % (240 * 3600)) == 0:
day += 10
print(str(day) + ' simulation days done')
if time % (2 * 3600) == 0:
bld += 1
bld = bld % len(FMUElement.keys())
for i, key in enumerate(FMUElement.keys()):
SetPoints[key].append(21)
if i == bld:
SetPoints[key][-1] = 18
IntLoad[key].append(2) #a base of 2W/m2 is considered
if 6 <= time%(24*3600)/3600 <= 10:
IntLoad[key][-1] = 10
if 16 <= time%(24*3600)/3600 <= 22:
IntLoad[key][-1] = 10
FMUElement[key]['fmu'].setReal([FMUElement[key]['Exch_Var']['TempSetPoint']], [SetPoints[key][-1]])
FMUElement[key]['fmu'].setReal([FMUElement[key]['Exch_Var']['IntLoadPow']], [IntLoad[key][-1]])
FMUElement[key]['fmu'].doStep(currentCommunicationPoint=time, communicationStepSize=step_size)
#lets catch the outputs (even if not used in this example, it could be used to control the next inputs)
MeanTemp[key].append(FMUElement[key]['fmu'].getReal([FMUElement[key]['Exch_Var'][VarNames['Outputs'][0]]]))
HeatPow[key].append(FMUElement[key]['fmu'].getReal([FMUElement[key]['Exch_Var'][VarNames['Outputs'][1]]]))
time += step_size
for i, key in enumerate(FMUElement.keys()):
FMUElement[key]['fmu'].terminate()
FMUElement[key]['fmu'].freeInstance()
shutil.rmtree(FMUElement[key]['unzipdir'] , ignore_errors=True)
return time
def CleanUpSimRes(work_dir,keepLogFolder = False):
#now lets clean up al lthe folder and files
print('################################################')
print('Starting the cleanup process')
timedelay.sleep(5)
ResSimpath = os.path.join(work_dir,'Sim_Results')
if not os.path.exists(ResSimpath):
os.mkdir(ResSimpath)
liste = os.listdir()
for file in liste:
if 'Output_EPExport_' in file:
buildName = file[len('Output_EPExport_'):]
buildNameidf = buildName+'.idf'
with open(os.path.join(work_dir,buildName+'.pickle'), 'rb') as handle:
loadB = pickle.load(handle)
building = loadB['BuildData']
building.SaveLogFiles = keepLogFolder
LaunchSim.savecase(buildName,os.path.join(work_dir,file),building,ResSimpath,buildNameidf,work_dir,withFMU = True)
#unable to erase the fmu extracted folder as the dll is still open at this stage of the code....why ? still weird to me
#shutil.rmtree(buildName)
if __name__ == '__main__':
MainPath = os.getcwd()
SavedFolder = 'MUBES_SimResults/ForTest'
work_dir = os.path.normcase(
os.path.join(os.path.dirname(os.path.dirname(MainPath)), SavedFolder))
os.chdir(work_dir)
filelist = os.listdir(work_dir)
start_time = 0*24*3600
stop_time = 100*24*3600
step_size = 900
VarNames = {'Inputs': ['TempSetPoint','IntLoadPow'],
'InitialValue': [21,0],
'Outputs' : ['MeanBldTemp', 'HeatingPower']}
#to make it work if being either version1.0 or 2.0 or FMU Standards
try:
FMUElement = InstAndInitiV1(filelist,VarNames,start_time,stop_time)
print('FMU 1.0 used')
except:
FMUElement = InstAndInitiV2(filelist,VarNames, start_time, stop_time)
print('FMU 2.0 used')
LaunchFMU_Sim(FMUElement,VarNames, start_time, stop_time, step_size)
CleanUpSimRes(work_dir, keepLogFolder=True)
| StarcoderdataPython |
1913996 | <reponame>abhishekshah67/weebullet
#!/usr/bin/env python
# vim: set fileencoding=utf8 ts=4 sw=4 expandtab :
import json
import re
import urllib.parse
import time
import weechat as w
# Constant used to check if configs are required
REQUIRED = '_required'
w.register('weebullet',
'Lefty',
'0.5.1',
'BSD',
'[Python3]weebullet pushes notifications from IRC to Pushbullet.',
'', '')
w.hook_print("", "irc_privmsg", "", 1, "priv_msg_cb", "")
w.hook_command(
"send_push_note", # command
"send a push note", # description
"[message]" # arguments description,
"", # argument
"",
"",
"cmd_send_push_note", ""
)
w.hook_command(
"weebullet",
"pushes notifications from IRC to Pushbullet",
"[command]",
"Available commands are:\n"
" help : prints config options and defaults\n"
" listdevices : prints a list of all devices associated"
" with your Pushbullet API key\n"
" listignores : prints a list of channels that highlights "
" won't be pushed for\n"
" ignore : adds a channel to the blacklist\n"
" unignore : removes a channel from the blacklist",
"",
"cmd_help", ""
)
configs = {
"api_key": REQUIRED,
"away_only": "1", # only send when away
"inactive_only": "1", # only send if buffer inactive
"device_iden": "all", # send to all devices
"ignored_channels": "", # no ignored channels
"min_notify_interval": "0", # seconds, don't notify
# more often than this
"debug": "0", # enable debugging
"ignore_on_relay": "0", # if relay connected,
# don't send push notification
}
last_notification = 0 # 0 seconds from the epoch
for option, default_value in configs.items():
if w.config_get_plugin(option) == "":
if configs[option] == REQUIRED:
w.prnt("", w.prefix("error") +
"pushbullet: Please set option: %s" % option)
if type(default_value) == "str":
w.prnt("", "pushbullet: /set plugins.var.python.weebullet.%s STRING" % option)
elif type(default_value) == "int":
w.prnt("", "pushbullet: /set plugins.var.python.weebullet.%s INT" % option)
else:
w.prnt("", "pushbullet: /set plugins.var.python.weebullet.%s VALUE" % option)
else:
w.config_set_plugin(option, configs[option])
def debug(msg):
if str(w.config_get_plugin("debug")) is not "0":
w.prnt("", "[weebullet] DEBUG: %s" % str(msg))
def process_devicelist_cb(data, url, status, response, err):
try:
devices = json.loads(response)["devices"]
w.prnt("", "Device List:")
for device in devices:
if device["pushable"]:
if "nickname" in device:
w.prnt("", "---\n%s" % device["nickname"])
else:
w.prnt("", "---\nUnnamed")
w.prnt("", "%s" % device["iden"])
except KeyError:
w.prnt("", "[weebullet] Error accessing device list: %s" % response)
return w.WEECHAT_RC_ERROR
return w.WEECHAT_RC_OK
def get_ignored_channels():
ignored_channels = w.config_get_plugin("ignored_channels")
if ignored_channels == "":
return []
else:
return [channel.strip() for channel in ignored_channels.split(',')]
def cmd_help(data, buffer, args):
# Get current list of ignored channels in list form
ignored_channels = get_ignored_channels()
# Used for checking for ignore/unignore commands and getting the arguments
ignore_command = re.match("^ignore\s+(.+)", args)
unignore_command = re.match("^unignore\s+(.+)", args)
if(ignore_command is not None):
channels_to_ignore = ignore_command.group(1).split(' ')
for channel in channels_to_ignore:
if channel not in ignored_channels:
ignored_channels.append(channel)
w.config_set_plugin("ignored_channels", ','.join(ignored_channels))
w.prnt("", "Updated. Ignored channels: %s" % w.config_get_plugin("ignored_channels"))
elif(unignore_command is not None):
channels_to_unignore = unignore_command.group(1).split(' ')
for channel in channels_to_unignore:
if channel in ignored_channels:
ignored_channels.remove(channel)
w.config_set_plugin("ignored_channels", ','.join(ignored_channels))
w.prnt("", "Updated. Ignored channels: %s" % w.config_get_plugin("ignored_channels"))
elif(args == "listignores"):
w.prnt("", "Ignored channels: %s" % w.config_get_plugin("ignored_channels"))
elif(args == "listdevices"):
apikey = w.string_eval_expression(w.config_get_plugin("api_key"), {}, {}, {})
apiurl = "https://%s@api.pushbullet.com/v2/devices" % (apikey)
w.hook_process("url:" + apiurl, 20000, "process_devicelist_cb", "")
else:
w.prnt("", """
Weebullet requires an API key from your Pushbullet account to work. Set your API key with (evaluated):
/set plugins.var.python.weebullet.api_key <KEY>
Weebullet will by default only send notifications when you are marked away on IRC. You can change this with:
/set plugins.var.python.weebullet.away_only [0|1]
Weebullet will by default send to all devices associated with your Pushbullet account. You can change this with:
/set plugins.var.python.weebullet.device_iden <ID>
Weebullet can ignore repeated notifications if they arrive too often. You can set this with (0 or blank to disable):
/set plugins.var.python.weebullet.min_notify_interval <NUMBER>
You can get a list of your devices from the Pushbullet website, or by using
/weebullet listdevices
""")
return w.WEECHAT_RC_OK
def process_pushbullet_cb(data, url, status, response, err):
body = None
headers = {}
lines = response.rstrip().splitlines()
status_code = int(lines.pop(0).split()[1])
for line in lines:
if body == "":
body += line
continue
header_line = line.split(":", 2)
if len(header_line) != 2:
body = ""
continue
headers[header_line[0].strip()] = header_line[1].strip()
# response is the string of http body
if status == w.WEECHAT_HOOK_PROCESS_ERROR:
w.prnt("", "[weebullet] Error sending to pushbullet: %s - %s" % (status, url))
return w.WEECHAT_RC_ERROR
if status_code is 401 or status_code is 403:
w.prnt("", "[weebullet] Invalid API Token: %s" % (w.string_eval_expression(w.config_get_plugin("api_key"), {}, {}, {})))
return w.WEECHAT_RC_ERROR
if status_code is not 200:
w.prnt("", "[weebullet] Error sending to pushbullet: %s - %s - %s" % (url, status_code, body))
return w.WEECHAT_RC_ERROR
return w.WEECHAT_RC_OK
def send_push(title, body):
global last_notification
interval = w.config_get_plugin("min_notify_interval")
if interval is not None and interval != "" and int(interval) != 0:
interval = int(interval)
earliest_notification = last_notification + int(interval)
if last_notification is not None and time.time() <= earliest_notification:
debug("Too soon since last notification, skipping")
return w.WEECHAT_RC_OK
last_notification = time.time()
# check to see if the relay is connected, ignore if so
check_relays = w.config_string_to_boolean(w.config_get_plugin('ignore_on_relay'))
CONNECTED_RELAY = False
if check_relays:
infolist = w.infolist_get('relay', '', '')
if infolist:
while w.infolist_next(infolist):
status = w.infolist_string(infolist, 'status_string')
if status == 'connected':
CONNECTED_RELAY = True
break
w.infolist_free(infolist)
if CONNECTED_RELAY is True:
# we have a relay conected, don't notify
debug("Relay is connected, not sending push.")
return w.WEECHAT_RC_OK
debug("Sending push. Title: [%s], body: [%s]" % (title, body))
apikey = w.string_eval_expression(w.config_get_plugin("api_key"), {}, {}, {})
apiurl = "https://%s@api.pushbullet.com/v2/pushes" % (apikey)
timeout = 20000 # FIXME - actually use config
if len(title) is not 0 or len(body) is not 0:
deviceiden = w.config_get_plugin("device_iden")
if deviceiden == "all":
payload = urllib.parse.urlencode({'type': 'note', 'title': title, 'body': body})
else:
payload = urllib.parse.urlencode({'type': 'note', 'title': title, 'body': body, 'device_iden': deviceiden})
w.hook_process_hashtable("url:" + apiurl, {"postfields": payload, "header": "1"}, timeout, "process_pushbullet_cb", "")
def cmd_send_push_note(data, buffer, args):
send_push(
title="Manual Notification from weechat",
body=args)
return w.WEECHAT_RC_OK
def priv_msg_cb(data, bufferp, uber_empty,
tagsn, isdisplayed,
ishilight, prefix, message):
"""Sends highlighted message to be printed on notification"""
if w.config_get_plugin("away_only") == "1":
am_away = w.buffer_get_string(bufferp, 'localvar_away')
else:
am_away = True
if not am_away:
# TODO: make debug a configurable
debug("Not away, skipping notification")
return w.WEECHAT_RC_OK
# If 'inactive_only' is enabled, we need to check if the notification is
# coming from the active buffer.
if w.config_get_plugin("inactive_only") == "1":
if w.current_buffer() == bufferp:
# The notification came from the current buffer - don't notify
debug("Notification came from the active buffer, "
"skipping notification")
return w.WEECHAT_RC_OK
notif_body = u"<%s> %s" % (
prefix,
message
)
# Check that it's in a "/q" buffer and that I'm not the one writing the msg
is_pm = w.buffer_get_string(bufferp, "localvar_type") == "private"
is_notify_private = re.search(r'(^|,)notify_private(,|$)', tagsn) is not None
# PM (query)
if (is_pm and is_notify_private):
send_push(
title="Privmsg from %s" % prefix,
body=notif_body
)
# Highlight (your nick is quoted)
elif (str(ishilight) == "1"):
bufname = (w.buffer_get_string(bufferp, "short_name") or
w.buffer_get_string(bufferp, "name"))
ignored_channels = get_ignored_channels()
if bufname not in ignored_channels:
send_push(
title="Highlight in %s" % bufname,
body=notif_body
)
else:
debug("[weebullet] Ignored channel, skipping notification in %s" % bufname)
return w.WEECHAT_RC_OK
| StarcoderdataPython |
6509350 | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import os
# TODO
# compare vectorizers
def countvectorizer(inputpath=None, text=None):
"""
docstring
"""
vectorizer = CountVectorizer(min_df=1)
if inputpath:
filenames = [os.path.join(inputpath, file) for file in os.listdir(inputpath)]
corpus = []
for file in filenames:
with open(file, 'r') as f:
data = f.read()
corpus.append(data)
if text:
corpus = text
X = vectorizer.fit_transform(corpus)
print(X.toarray())
print(vectorizer.get_feature_names())
def hashvectorizer(inputpath=None, text=None):
"""
docstring
"""
# TODO
hv = HashingVectorizer()
if inputpath:
filenames = [os.path.join(inputpath, file) for file in os.listdir(inputpath)]
corpus = []
for file in filenames:
with open(file, 'r') as f:
data = f.read()
corpus.append(data)
if text:
corpus = text
X = hv.transform(corpus)
print(X.toarray())
print(hv.get_params())
def tfidfvectorizer(inputpath=None, text=None):
"""
docstring
"""
# TODO
pass
if __name__ == "__main__":
inputpath = r'D:\ypai\data\json\text'
hashvectorizer(inputpath=inputpath)
| StarcoderdataPython |
5001276 | <filename>lib/spack/spack/tag.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Classes and functions to manage package tags"""
import collections
import copy
import sys
if sys.version_info >= (3, 5):
from collections.abc import Mapping # novm
else:
from collections import Mapping
import spack.error
import spack.util.spack_json as sjson
def _get_installed_package_names():
"""Returns names of packages installed in the active environment."""
specs = spack.environment.installed_specs()
return [spec.name for spec in specs]
def packages_with_tags(tags, installed, skip_empty):
"""
Returns a dict, indexed by tag, containing lists of names of packages
containing the tag or, if no tags, for all available tags.
Arguments:
tags (list or None): list of tags of interest or None for all
installed (bool): True if want names of packages that are installed;
otherwise, False if want all packages with the tag
skip_empty (bool): True if exclude tags with no associated packages;
otherwise, False if want entries for all tags even when no such
tagged packages
"""
tag_pkgs = collections.defaultdict(lambda: list)
spec_names = _get_installed_package_names() if installed else []
keys = spack.repo.path.tag_index if tags is None else tags
for tag in keys:
packages = [name for name in spack.repo.path.tag_index[tag] if
not installed or name in spec_names]
if packages or not skip_empty:
tag_pkgs[tag] = packages
return tag_pkgs
class TagIndex(Mapping):
"""Maps tags to list of packages."""
def __init__(self):
self._tag_dict = collections.defaultdict(list)
@property
def tags(self):
return self._tag_dict
def to_json(self, stream):
sjson.dump({'tags': self._tag_dict}, stream)
@staticmethod
def from_json(stream):
d = sjson.load(stream)
if not isinstance(d, dict):
raise TagIndexError("TagIndex data was not a dict.")
if 'tags' not in d:
raise TagIndexError("TagIndex data does not start with 'tags'")
r = TagIndex()
for tag, packages in d['tags'].items():
r[tag].extend(packages)
return r
def __getitem__(self, item):
return self._tag_dict[item]
def __iter__(self):
return iter(self._tag_dict)
def __len__(self):
return len(self._tag_dict)
def copy(self):
"""Return a deep copy of this index."""
clone = TagIndex()
clone._tag_dict = copy.deepcopy(self._tag_dict)
return clone
def get_packages(self, tag):
"""Returns all packages associated with the tag."""
return self.tags[tag] if tag in self.tags else []
def merge(self, other):
"""Merge another tag index into this one.
Args:
other (TagIndex): tag index to be merged
"""
other = other.copy() # defensive copy.
for tag in other.tags:
if tag not in self.tags:
self.tags[tag] = other.tags[tag]
continue
spkgs, opkgs = self.tags[tag], other.tags[tag]
self.tags[tag] = sorted(list(set(spkgs + opkgs)))
def update_package(self, pkg_name):
"""Updates a package in the tag index.
Args:
pkg_name (str): name of the package to be removed from the index
"""
package = spack.repo.path.get(pkg_name)
# Remove the package from the list of packages, if present
for pkg_list in self._tag_dict.values():
if pkg_name in pkg_list:
pkg_list.remove(pkg_name)
# Add it again under the appropriate tags
for tag in getattr(package, 'tags', []):
tag = tag.lower()
self._tag_dict[tag].append(package.name)
class TagIndexError(spack.error.SpackError):
"""Raised when there is a problem with a TagIndex."""
| StarcoderdataPython |
8012424 | from builtins import range
import numpy as np
from random import shuffle
from past.builtins import xrange
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
m = X.shape[0]
dim = X.shape[1]
num_class = W.shape[1]
for i in range(m):
score = np.dot(X[i].reshape((dim, 1)).T, W)
e_score = np.exp(score)
prob = e_score / np.sum(e_score)
loss += - np.log(prob[:,y[i]])
for c in range(num_class):
if c == y[i]:
dW[:,c] += X[i] * (prob[:,c] - 1)
else:
dW[:,c] += X[i] * prob[:,c]
loss /= m
loss += reg * np.sum(W * W)
dW /= m
dW += 2 * reg * W
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
m = len(y)
num_class = W.shape[1]
scores = np.dot(X, W)
exp_scores = np.exp(scores)
prob = exp_scores / np.sum(exp_scores, axis = 1, keepdims=True)
loss = np.sum(- np.log(prob[np.arange(len(y)),y])) / m
loss += reg * np.sum(W * W)
y_one_hot = np.zeros((m, num_class))
y_one_hot[np.arange(len(y)),y] = 1
result = prob - y_one_hot
dW = np.dot(X.T, result)
dW /= m
dW += 2 * reg * W
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss, dW
| StarcoderdataPython |
4837059 | <filename>src/rf_network/plugins.py
# -*- coding: utf-8 -*-
import importlib
import sys
import pluggy
from . import hookspecs
DEFAULT_PLUGINS = (
"rf_network.connection.netmiko",
"rf_network.connection.scrapli",
)
plugin_manager = pluggy.PluginManager("rf_network")
plugin_manager.add_hookspecs(hookspecs)
if not hasattr(sys, "_called_from_test"):
# Only load plugins if not running tests
plugin_manager.load_setuptools_entrypoints("rf_network")
# Load default plugins
for plugin in DEFAULT_PLUGINS:
mod = importlib.import_module(plugin)
plugin_manager.register(mod, plugin)
| StarcoderdataPython |
8025223 | <filename>bims/models/data_source.py
from django.contrib.gis.db import models
class DataSource(models.Model):
"""Data source for forms"""
name = models.CharField(
null=False,
blank=False,
max_length=200
)
category = models.CharField(
null=True,
blank=True,
max_length=100
)
description = models.TextField(
null=True,
blank=True
)
def __unicode__(self):
if self.category:
return '{name} - {category}'.format(
name=self.name,
category=self.category
)
return self.name
| StarcoderdataPython |
3341556 | <filename>resources/lib/settingsCommandline.py
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
"""
The base settings module
Copyright 2020, Mediathekview
"""
from resources.lib.settingsInterface import SettingsInterface
class SettingsCommandline(SettingsInterface):
""" Standalone implementation of the settings class """
def __init__(self, args):
self.__datapath = args.path if args.dbtype == 'sqlite' else './'
self.__type = {'sqlite': 0, 'mysql': 1}.get(args.dbtype, 0)
if self.__type == 0:
self.__updnative = args.native
elif self.__type == 1:
self.__host = args.host
self.__port = int(args.port)
self.__user = args.user
self.__password = <PASSWORD>
self.__database = args.database
self.__updmode = 4
self.__updinterval = args.intervall
self.__force = args.force
self.__full = args.full
self.__updateBatchSize = args.updateBatchSize
#
self._lastFullUpdate = 0
self._lastUpdate = 0
self._databaseStatus = 'UNINIT'
self._databaseVersion = 0
def getDatapath(self):
return self.__datapath
# Database
def getDatabaseType(self):
return self.__type
def getDatabaseHost(self):
return self.__host
def getDatabasePort(self):
return self.__port
def getDatabaseUser(self):
return self.__user
def getDatabasePassword(self):
return self.__password
def getDatabaseSchema(self):
return self.__database
def getDatabaseUpateMode(self):
if self.__full:
return 9
else:
return self.__updmode
def getDatabaseUpdateNative(self):
return self.__updnative
def getDatabaseUpdateInvterval(self):
if self.__force:
return 1
else:
return self.__updinterval
def getDatabaseImportBatchSize(self):
return self.__updateBatchSize
# RUNTIME
def is_user_alive(self):
return True
#
def getLastFullUpdate(self):
return self._lastFullUpdate
def setLastFullUpdate(self, aLastFullUpdate):
self._lastFullUpdate = (aLastFullUpdate)
def getLastUpdate(self):
return self._lastUpdate
def setLastUpdate(self, aLastUpdate):
self._lastUpdate = (aLastUpdate)
def getDatabaseStatus(self):
return self._databaseStatus
def setDatabaseStatus(self, aStatus):
self._databaseStatus = aStatus
def getDatabaseVersion(self):
return self._databaseVersion
def setDatabaseVersion(self, aVersion):
self._databaseVersion = aVersion
| StarcoderdataPython |
393331 | <reponame>kr-g/pttydev
import serial # its pyserial
def pttyopen(**kwargs):
# a open function is required to reconnect properly the device in case of error
ser = serial.Serial(**kwargs)
return ser
| StarcoderdataPython |
8085621 | """
ASGI entrypoint. Configures Django and then runs the application
defined in the ASGI_APPLICATION setting.
"""
import os
import django
from channels.http import AsgiHandler
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.security.websocket import AllowedHostsOriginValidator
from dakara_server.routing import websocket_urlpatterns
from dakara_server.token_auth import TokenAuthMiddlewareStack
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dakara_server.settings.development")
django.setup()
application = ProtocolTypeRouter(
{
"http": AsgiHandler(),
"websocket": AllowedHostsOriginValidator(
TokenAuthMiddlewareStack(URLRouter(websocket_urlpatterns))
),
}
)
| StarcoderdataPython |
9648792 | """
13C Pure In-phase D-CEST
========================
Analyzes chemical exchange in the presence of 1H composite decoupling during
the D-CEST block. This keeps the spin system purely in-phase throughout, and
is calculated using the (3n)×(3n), single-spin matrix, where n is the number
of states::
{ Ix(a), Iy(a), Iz(a),
Ix(b), Iy(b), Iz(b), ... }
References
----------
| Yuwen, Kay and Bouvignies. ChemPhysChem (2018) 19:1707-1710
| Yuwen, Bouvignies and Kay. J Mag Reson (2018) 292:1-7
Note
----
A sample configuration file for this module is available using the command::
$ chemex config dcest_13c
"""
import functools as ft
import numpy as np
import numpy.linalg as la
import chemex.experiments.helper as ceh
import chemex.helper as ch
import chemex.nmr.constants as cnc
import chemex.nmr.liouvillian as cnl
_SCHEMA = {
"type": "object",
"properties": {
"experiment": {
"type": "object",
"properties": {
"time_t1": {"type": "number"},
"carrier": {"type": "number"},
"b1_frq": {"type": "number"},
"pw90": {"type": "number"},
"sw": {"type": "number"},
"time_equil": {"type": "number", "default": 0.0},
"b1_inh_scale": {"type": "number", "default": 0.1},
"b1_inh_res": {"type": "integer", "default": 11},
"observed_state": {
"type": "string",
"pattern": "[a-z]",
"default": "a",
},
},
"required": ["time_t1", "carrier", "b1_frq", "pw90", "sw"],
}
},
}
def read(config):
ch.validate(config, _SCHEMA)
config["basis"] = cnl.Basis(type="ixyz", spin_system="ch")
config["fit"] = _fit_this()
return ceh.load_experiment(config=config, pulse_seq_cls=PulseSeq)
def _fit_this():
return {
"rates": ["r2_i_{states}", "r1_i_{observed_state}"],
"model_free": ["tauc_{observed_state}", "s2_{observed_state}"],
}
class PulseSeq:
def __init__(self, config, propagator):
self.prop = propagator
settings = config["experiment"]
self.time_t1 = settings["time_t1"]
self.prop.carrier_i = settings["carrier"]
self.sw = settings["sw"]
self.pw90 = 4.0 * settings["pw90"] * settings["b1_frq"] / settings["sw"]
self.time_eq = settings["time_equil"]
self.tau_dante = 1.0 / settings["sw"] - self.pw90
self.ncyc = int(settings["time_t1"] * settings["sw"] + 0.1)
self.prop.b1_i = 1.0 / (4.0 * settings["pw90"])
self.prop.b1_i_inh_scale = settings["b1_inh_scale"]
self.prop.b1_i_inh_res = settings["b1_inh_res"]
if "13C" in config["conditions"].label:
spin_system = config["spin_system"]
symbol = spin_system.symbols["i"]
atom = spin_system.atoms["i"]
self.prop.jeff_i = cnc.get_multiplet(symbol, atom.name)
self.observed_state = settings["observed_state"]
self.prop.detection = f"[iz_{self.observed_state}]"
self.dephased = settings["b1_inh_scale"] == np.inf
@ft.lru_cache(maxsize=10000)
def calculate(self, offsets, params_local):
self.prop.update(params_local)
start = self.prop.get_equilibrium()
intst = {}
d_eq = (
self.prop.delays(self.time_eq) if self.time_eq > 0.0 else self.prop.identity
)
for offset in set(offsets):
if abs(offset) <= 1e4:
self.prop.offset_i = offset
p_delay = self.prop.delays(self.tau_dante)
p_pulse = self.prop.pulse_i(self.pw90, 0.0)
intst[offset] = (
d_eq @ la.matrix_power(p_delay @ p_pulse, self.ncyc) @ start
)
else:
intst[offset] = d_eq @ start
return np.array([self.prop.detect(intst[offset]) for offset in offsets])
def offsets_to_ppms(self, offsets):
return self.prop.offsets_to_ppms(offsets)
def ppms_to_offsets(self, ppms):
return self.prop.ppms_to_offsets(ppms)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.