id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
9723388 | """
MEGNetTrain.py, SciML-SCD, RAL
Trains on the optical properties of materials using the MEGNet
of materials. Refer to https://github.com/materialsvirtuallab/megnet
for more information on MEGNet.
"""
import sys
import subprocess
import logging
import os
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"),
format="%(levelname)s:gp-net: %(message)s")
import numpy as np
from keras.callbacks import ModelCheckpoint
class training:
def train_test_split(datadir, prop, prev, model, batch, epochs, Xpool,
ypool, Xtest, ytest):
"""
training.train_test_split(datadir, prop, prev, model, batch, epochs,
Xpool, ypool, Xtest, ytest)
MEGNet training on train-test split dataset. In this instance, the
pool is the training set.
Inputs:
datadir- Directory into which results are written into.
prop- Optical property of interest.
prev- Best fitted MEGNet model.
model- Featurised structures for training with MEGNet.
batch- Batch size for training.
epochs- Number of training iterations.
Xpool- Structures for training.
ypool- Targets for training.
Xtest- Structures for testing.
ytest- targets for testing.
Outputs:
1- A fitted model of the optical property of interest.
2- Best model. Useable in the next round of training.
"""
if not os.path.isdir(datadir):
os.makedirs(datadir)
logging.info("Writing data to file ...")
np.save("%s/Xpool.npy" %datadir, Xpool)
np.save("%s/ypool.npy" %datadir, ypool)
np.save("%s/Xtest.npy" %datadir, Xtest)
np.save("%s/ytest.npy" %datadir, ytest)
if type(prev) == bool:
if prev == False:
logging.info("No previous model will be used ...")
prev_file = None
else:
logging.info("Searching for a previous model ...")
prev_file = "%s/model-best-new-%s.h5" %(datadir, prop)
if os.path.isfile(prev_file):
logging.info("Pre-trained model: %s found" %prev_file)
prev_file = prev_file
else:
logging.info("No previous model found ...")
logging.info("Training without a previous model ...")
prev_file = None
elif type(prev) == str:
# For passing k-fold cross validation best fitted model
if os.path.isfile(prev):
logging.info("Pre-trained model: %s found" %prev)
prev_file = prev
else:
logging.info("No previous model found ...")
logging.info("Training without a previous model ...")
prev_file = None
checkpoint = ModelCheckpoint("%s/model-best-new-%s.h5" %(datadir, prop),
verbose=1, monitor="val_loss",
save_best_only=True, mode="auto")
model.train(Xpool, ypool, epochs=epochs, batch_size=batch,
validation_structures=Xtest, validation_targets=ytest,
scrub_failed_structures=True, prev_model=prev_file,
callbacks=[checkpoint])
model.save_model("%s/fitted_%s_model.hdf5" %(datadir, prop))
subprocess.call(["rm", "-r", "callback/"])
def k_fold(datadir, fold, prop, prev, model, batch, epochs, Xtrain, ytrain,
Xval, yval):
"""
training.k_fold(fold, prop, prev, model, batch, epochs, Xtrain, ytrain
Xval, yval)
MEGNet training on each fold of the k-fold cross-validation datasets.
Inputs:
datadir- Directory into which results are written into.
fold- Number of fold to be processed.
prop- Optical property of interest.
prev- Best fitted MEGNet model.
model- Featurised structures for training with MEGNet.
batch- Batch size for training.
epochs- Number of training iterations.
Xtrain- Structures for training.
ytrain- Targets for training.
Xval- Structures for validation.
yval- Targets for validation.
Outputs:
1- A fitted model of the optical property of interest.
2- Best model. Useable in the next round of training.
"""
if not os.path.isdir(datadir):
os.makedirs(datadir)
logging.info("Writing data to file ...")
np.save("%s/Xtrain.npy" %datadir, Xtrain)
np.save("%s/ytrain.npy" %datadir, ytrain)
np.save("%s/Xval.npy" %datadir, Xval)
np.save("%s/yval.npy" %datadir, yval)
if prev == False:
logging.info("No previous model will be used ...")
prev_file = None
else:
logging.info("Searching for a previous model ...")
if fold == 0:
prev_file = "%s/model-best-new-%s.h5" %(datadir, prop)
else:
prev_file = "k_fold/%s_results/0%s_fold/model-best-new-%s.h5" %(
prop, fold-1, prop)
if os.path.isfile(prev_file):
logging.info("Pre-trained model: %s found" %prev_file)
prev_file = prev_file
else:
logging.info("No previous model found ...")
logging.info("Training without a previous model ...")
prev_file = None
checkpoint = ModelCheckpoint("%s/model-best-new-%s.h5" %(datadir, prop),
verbose=1, monitor="val_loss",
save_best_only=True, mode="auto")
model.train(Xtrain, ytrain, epochs=epochs, batch_size=batch,
validation_structures=Xval, validation_targets=yval,
scrub_failed_structures=True, prev_model=prev_file,
callbacks=[checkpoint])
model.save_model("%s/fitted_%s_model.hdf5" %(datadir, prop))
def active(datadir, i, prop, prev, model, sampling, batch, epochs, Xpool,
ypool, Xtest, ytest):
"""
training.active(datadir, i, prop, prev, model, sampling, batch, epochs,
Xpool, ypool, Xtest, ytest)
MEGNet training for active learning purposes. A pre-trained model
in a previous query is used in the next query.
Inputs:
datadir- Directory into which results are written into.
i- Number of active learning iterations
performed.
prop- Optical property of interest.
prev- Best fitted MEGNet model.
model- Featurised structures for training with
MEGNet.
sampling- Type of sampling for transfer of data
from the test set to the pool.
batch- Batch size for training.
epochs- Number of training iterations.
Xpool- Structures for training.
ypool- Targets for training.
Xtest- Structures for testing.
ytest- Targets for testing.
Outputs:
1- A fitted model of the optical property of
interest.
2- Best model. Useable in the next round of
training.
"""
if not os.path.isdir(datadir):
os.makedirs(datadir)
logging.info("Writing data to file ...")
np.save("%s/Xpool.npy" %datadir, Xpool)
np.save("%s/ypool.npy" %datadir, ypool)
np.save("%s/Xtest.npy" %datadir, Xtest)
np.save("%s/ytest.npy" %datadir, ytest)
# For identifying location of best models to be used in the next iteration, i
if i == 0:
j = 0
else:
j = i - 1
if prev == False:
logging.info("No previous model will be used ...")
prev_file = None
else:
logging.info("Searching for a previous model ...")
prev_file = "active_learn/repeat/%s_results/%s/0%s_model/model-best-new-%s.h5" %(
prop, sampling, j, prop)
if os.path.isfile(prev_file):
logging.info("Pre-trained model: %s found" %prev_file)
prev_file = prev_file
else:
logging.info("No previous model found ...")
logging.info("Training without a previous model ...")
prev_file = None
checkpoint = ModelCheckpoint("%s/model-best-new-%s.h5" %(datadir, prop), verbose=1,
monitor="val_loss", save_best_only=True, mode="auto")
model.train(Xpool, ypool, epochs=epochs, batch_size=batch, validation_structures=Xtest,
validation_targets=ytest, scrub_failed_structures=True, prev_model=prev_file,
callbacks=[checkpoint])
model.save_model("%s/fitted_%s_model.hdf5" %(datadir, prop))
| StarcoderdataPython |
6604004 | <reponame>laloe/djangoRest
from django.conf.urls import url, include
from .models import (Proveedor, Producto,
Unidad, Inventario, Entradas)
from rest_framework import routers, serializers, viewsets, generics
from rest_framework.response import Response
from rest_framework import filters
from .forms import ProveedorForm
class UnidadSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Unidad
fields = ('id', 'nombre')
class ProveedorProductosSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Proveedor
fields = ('id', 'nombre')
class ProductoSerializer(serializers.HyperlinkedModelSerializer):
unidad = UnidadSerializer()
proveedor = ProveedorProductosSerializer()
class Meta:
model = Producto
fields = ('id', 'upc', 'proveedor', 'nombre',
'unidad', 'precio_entrada', 'precio_salida', 'is_active')
class EntradasSerializer(serializers.HyperlinkedModelSerializer):
unidad = UnidadSerializer()
proveedor = ProveedorProductosSerializer()
class Meta:
model = Entradas
fields = ('id', 'upc', 'proveedor', 'nombre', 'unidad',
'precio_entrada', 'precio_salida', 'cantidad', 'fecha')
class ProveedorSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Proveedor
fields = ('id', 'nombre', 'telefono', 'correo', 'direccion')
class InventarioSerializer(serializers.HyperlinkedModelSerializer):
producto = ProductoSerializer()
class Meta:
model = Inventario
fields = ('id', 'producto', 'cantidad', 'fecha')
| StarcoderdataPython |
11264414 | <reponame>fei-protocol/checkthechain
from __future__ import annotations
import typing
from ctc import spec
from ctc import rpc
from . import block_time_search
from . import block_time_singular
async def async_get_blocks_of_timestamps(
timestamps: typing.Sequence[int],
block_timestamps: typing.Optional[typing.Mapping[int, int]] = None,
block_number_array: typing.Optional[spec.NumpyArray] = None,
block_timestamp_array: typing.Optional[spec.NumpyArray] = None,
nary: typing.Optional[int] = None,
cache: typing.Optional[block_time_search.BlockTimestampSearchCache] = None,
provider: spec.ProviderSpec = None,
use_db: bool = True,
) -> list[int]:
"""once parallel node search created, use that"""
if block_timestamps is not None or (
block_number_array is not None and block_timestamp_array is not None
):
import numpy as np
if block_timestamp_array is None:
if block_timestamps is None:
raise Exception('must specify more arguments')
block_timestamp_array = np.array(list(block_timestamps.values()))
if block_number_array is None:
if block_timestamps is None:
raise Exception('must specify more arguments')
block_number_array = np.array(list(block_timestamps.keys()))
blocks = []
for timestamp in timestamps:
block = block_time_singular.get_block_of_timestamp_from_arrays(
timestamp=timestamp,
block_timestamp_array=block_timestamp_array,
block_number_array=block_number_array,
verbose=False,
)
blocks.append(block)
return blocks
else:
# get timestamps form db
if use_db:
from ctc import db
network = rpc.get_provider_network(provider)
db_blocks = await db.async_query_timestamps_blocks(
network=network,
timestamps=timestamps,
)
# package non-null results
results = {}
remaining_timestamps: list[int] = []
for possible_block, timestamp in zip(db_blocks, timestamps):
if possible_block is None:
remaining_timestamps.append(timestamp)
else:
results[timestamp] = possible_block
else:
remaining_timestamps = list(timestamps)
results = {}
# get timestamps from rpc node
if len(remaining_timestamps) > 0:
coroutines = []
for timestamp in remaining_timestamps:
coroutine = block_time_singular.async_get_block_of_timestamp(
timestamp=timestamp,
verbose=False,
cache=cache,
nary=nary,
provider=provider,
use_db=False,
)
coroutines.append(coroutine)
import asyncio
node_blocks = await asyncio.gather(*coroutines)
node_results = dict(zip(remaining_timestamps, node_blocks))
results.update(node_results)
# combine
return [results[timestamp] for timestamp in timestamps]
| StarcoderdataPython |
6412409 | # -*- coding: utf-8 -*-
# ==============================================================================
#
# Authors: <NAME> <<EMAIL>>
#
# Copyright (c) 2017 <NAME> . All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# ==============================================================================
# Declare: Custom decorators taken from TextBlob library. refer to <textblob.decoractors.py>
from __future__ import absolute_import
from functools import wraps
from jgtextrank.exceptions import MissingCorpusError
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Credit to <NAME>, author of bottle.py.
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def requires_nltk_corpus(func):
"""Wraps a function that requires an NLTK corpus. If the corpus isn't found,
raise a :exc:`MissingCorpusError`.
"""
@wraps(func)
def decorated(*args, **kwargs):
try:
return func(*args, **kwargs)
except LookupError as err:
print(err)
raise MissingCorpusError()
return decorated
| StarcoderdataPython |
8125108 | <gh_stars>10-100
import scipy.stats as sc
import numpy as np
def test_lognorm_pdf():
from numba_stats import lognorm
x = np.linspace(0, 5, 10)
got = lognorm.pdf(x, 1.5, 0.1, 1.2)
expected = sc.lognorm.pdf(x, 1.5, 0.1, 1.2)
np.testing.assert_allclose(got, expected)
def test_lognorm_cdf():
from numba_stats import lognorm
x = np.linspace(0, 5, 10)
got = lognorm.cdf(x, 1.5, 0.1, 1.2)
expected = sc.lognorm.cdf(x, 1.5, 0.1, 1.2)
np.testing.assert_allclose(got, expected)
def test_lognorm_ppf():
from numba_stats import lognorm
p = np.linspace(0, 1, 10)
got = lognorm.ppf(p, 1.5, 0.1, 1.2)
expected = sc.lognorm.ppf(p, 1.5, 0.1, 1.2)
np.testing.assert_allclose(got, expected)
| StarcoderdataPython |
5181210 | # Provided under the CC-BY-SA license. Please cite the accompanying paper when using TOP dataset -
# @ARTICLE {
# author = "<NAME> and <NAME> and <NAME> and <NAME> and <NAME>",
# title = "Semantic Parsing for Task Oriented Dialog using Hierarchical Representations",
# journal = "Conference on Empirical Methods in Natural Language Processing (EMNLP)",
# year = "2018",
# month = "Oct"
# }
import os
import shutil
import tempfile
import unittest
from unittest.mock import patch, MagicMock
import transformers
from new_semantic_parsing.schema_tokenizer import TopSchemaTokenizer
class TransformersTokenizerMock:
cls_token = "[CLS]"
cls_token_id = 101
def encode(self, x, add_special_tokens=False):
subtokens = x.split(",")
return [int(t[3:]) for t in subtokens]
def decode(self, x):
return " ".join([f"tok{i}" for i in x])
@classmethod
def from_pretrained(cls, path):
return cls()
def save_pretrained(self, path):
pass
class TopSchemaTokenizerTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = next(tempfile._get_candidate_names())
def tearDown(self):
if os.path.exists(self.tmpdirname):
shutil.rmtree(self.tmpdirname)
def test_tokenize(self):
# Test cases are examples from TOP dataset arxiv.org/abs/1810.07942
schema_str = "[IN:INTENT1 tok1 tok2 tok3 [SL:SLOT1 tok4 tok5 ] ]"
schema_tok = "[ IN: INTENT1 tok1 tok2 tok3 [ SL: SLOT1 tok4 tok5 ] ]".split(" ")
res = TopSchemaTokenizer.tokenize(schema_str)
self.assertSequenceEqual(res, schema_tok)
schema_str = (
"[IN:GET_EVENT Any [SL:CATEGORY_EVENT festivals ] [SL:DATE_TIME this weekend ] ]"
)
schema_tok = (
"[ IN: GET_EVENT Any [ SL: CATEGORY_EVENT festivals ] [ SL: DATE_TIME this weekend ] ]"
).split(" ")
res = TopSchemaTokenizer.tokenize(schema_str)
self.assertSequenceEqual(res, schema_tok)
schema_str = (
"[IN:GET_ESTIMATED_ARRIVAL What time will I arrive at "
"[SL:DESTINATION [IN:GET_LOCATION_HOME [SL:CONTACT_RELATED "
"my ] [SL:TYPE_RELATION Mom ] 's house ] ] if I leave "
"[SL:DATE_TIME_DEPARTURE in five minutes ] ? ]"
)
schema_tok = (
"[ IN: GET_ESTIMATED_ARRIVAL What time will I arrive at "
"[ SL: DESTINATION [ IN: GET_LOCATION_HOME [ SL: CONTACT_RELATED "
"my ] [ SL: TYPE_RELATION Mom ] 's house ] ] if I leave "
"[ SL: DATE_TIME_DEPARTURE in five minutes ] ? ]"
)
schema_tok = schema_tok.split(" ")
tokens = TopSchemaTokenizer.tokenize(schema_str)
self.assertSequenceEqual(tokens, schema_tok)
def test_encode_nocls(self):
vocab = {"[", "]", "IN:", "INTENT1", "SL:", "SLOT1"}
src_tokenizer = TransformersTokenizerMock()
tokenizer = TopSchemaTokenizer(vocab, src_tokenizer)
schema_str = "[IN:INTENT1 tok6,tok2 tok31 [SL:SLOT1 tok42 tok5 ] ]"
source_tokens = [6, 2, 31, 42, 5]
# note that TransformersTokenizerMock splits tok6,tok2 into two subtokens
# note that the vocabulary is sorted
# fmt: off
expected_ids = [tokenizer.bos_token_id, 7, 3, 4, 9, 10, 11, 7, 5, 6, 12, 13, 8, 8, tokenizer.eos_token_id]
# fmt: on
ids = tokenizer.encode(schema_str, source_tokens)
self.assertSequenceEqual(ids, expected_ids)
def test_encode_cls(self):
vocab = ["[", "]", "IN:", "INTENT1", "SL:", "SLOT1"]
src_tokenizer = TransformersTokenizerMock()
tokenizer = TopSchemaTokenizer(vocab, src_tokenizer)
schema_str = "[IN:INTENT1 tok6,tok2 tok31 [SL:SLOT1 tok42 tok5 ] ]"
source_tokens = [TransformersTokenizerMock.cls_token_id, 6, 2, 31, 42, 5]
# note that TransformersTokenizerMock splits tok6,tok2 into two subtokens
# fmt: off
expected_ids = [tokenizer.bos_token_id, 7, 3, 4, 10, 11, 12, 7, 5, 6, 13, 14, 8, 8, tokenizer.eos_token_id]
# fmt: on
ids = tokenizer.encode(schema_str, source_tokens)
self.assertSequenceEqual(ids, expected_ids)
def test_keywords_in_text(self):
vocab = ["[", "]", "IN:", "INTENT1", "SL:", "SLOT1", "SLT1"]
src_tokenizer = TransformersTokenizerMock()
tokenizer = TopSchemaTokenizer(vocab, src_tokenizer)
# i.e. SLOT1 after tok2 is just a token which is written exactly like a schema word
schema_str = "[IN:INTENT1 tok6 tok2 SLT1 tok31 [SL:SLOT1 tok42 tok5 ] ]"
source_tokens = [6, 2, 1, 31, 42, 5]
# fmt: off
expected_ids = [tokenizer.bos_token_id, 8, 3, 4, 10, 11, 12, 13, 8, 5, 6, 14, 15, 9, 9, tokenizer.eos_token_id]
# fmt: on
ids = tokenizer.encode(schema_str, source_tokens)
self.assertSequenceEqual(ids, expected_ids)
def test_save_load(self):
vocab = ["[", "]", "IN:", "INTENT1", "SL:", "SLOT1"]
src_tokenizer = TransformersTokenizerMock()
tokenizer = TopSchemaTokenizer(vocab, src_tokenizer)
schema_str = "[IN:INTENT1 tok6,tok2 tok31 [SL:SLOT1 tok42 tok5 ] ]"
source_tokens = [6, 2, 31, 42, 5]
ids = tokenizer.encode(schema_str, source_tokens)
tokenizer.save(self.tmpdirname, encoder_model_type="test_type")
patch_tok_load = patch(
"new_semantic_parsing.schema_tokenizer.transformers.AutoTokenizer.from_pretrained",
MagicMock(return_value=TransformersTokenizerMock()),
)
patch_config_load = patch(
"new_semantic_parsing.schema_tokenizer.transformers.AutoConfig.from_pretrained",
MagicMock(return_value=None),
)
with patch_tok_load, patch_config_load:
loaded_tokenizer = TopSchemaTokenizer.load(self.tmpdirname)
self.assertSetEqual(set(loaded_tokenizer.vocab), set(tokenizer.vocab))
new_ids = loaded_tokenizer.encode(schema_str, source_tokens)
self.assertSequenceEqual(ids, new_ids)
def test_decode(self):
vocab = {"[", "]", "IN:", "INTENT1", "SL:", "SLOT1"}
src_tokenizer = TransformersTokenizerMock()
tokenizer = TopSchemaTokenizer(vocab, src_tokenizer)
schema_str = "[IN:INTENT1 tok6 tok2 tok31 [SL:SLOT1 tok42 tok5 ] ]"
source_tokens = [6, 2, 31, 42, 5]
# note that TransformersTokenizerMock splits tok6,tok2 into two subtokens
# note that the vocabulary is sorted
expected_ids = [7, 3, 4, 9, 10, 11, 7, 5, 6, 12, 13, 8, 8]
schema_decoded = tokenizer.decode(expected_ids, source_tokens)
self.assertEqual(schema_str, schema_decoded)
def test_decode_wpointers(self):
vocab = {"[", "]", "IN:", "INTENT1", "SL:", "SLOT1"}
src_tokenizer = TransformersTokenizerMock()
tokenizer = TopSchemaTokenizer(vocab, src_tokenizer)
schema_str = "[IN:INTENT1 @ptr0 @ptr1 @ptr2 [SL:SLOT1 @ptr3 @ptr4 ] ]"
# note that TransformersTokenizerMock splits tok6,tok2 into two subtokens
# note that the vocabulary is sorted
ids = [7, 3, 4, 9, 10, 11, 7, 5, 6, 12, 13, 8, 8]
schema_decoded = tokenizer.decode(ids)
self.assertEqual(schema_str, schema_decoded)
def test_postprocess_punct(self):
text = "[What is this?]"
expected = "[What is this ?]"
postprocessed = TopSchemaTokenizer.postprocess(text)
self.assertSequenceEqual(expected, postprocessed)
text = "[This is nothing ! ]"
expected = "[This is nothing ! ]"
postprocessed = TopSchemaTokenizer.postprocess(text)
self.assertSequenceEqual(expected, postprocessed)
text = "7;45"
expected = "7 ; 45"
postprocessed = TopSchemaTokenizer.postprocess(text)
self.assertSequenceEqual(expected, postprocessed)
def test_postprocess_apostrophe(self):
text = "[What's]"
expected = "[What 's]"
postprocessed = TopSchemaTokenizer.postprocess(text)
self.assertSequenceEqual(expected, postprocessed)
text = "[I didn't do this.]"
expected = "[I didn't do this .]"
postprocessed = TopSchemaTokenizer.postprocess(text)
self.assertSequenceEqual(expected, postprocessed)
text = "[[Your ] ' s]"
expected = "[[Your ] 's]"
postprocessed = TopSchemaTokenizer.postprocess(text)
self.assertSequenceEqual(expected, postprocessed)
| StarcoderdataPython |
6523879 | <reponame>TeriForey/fawkes
# -*- coding: utf-8 -*-
import pytest
# ``py.test --runslow`` causes the entire testsuite to be run, including test
# that are decorated with ``@@slow`` (scaffolding tests).
# see http://pytest.org/latest/example/simple.html#control-skipping-of-tests-according-to-command-line-option # Noqa
## def pytest_addoption(parser):
## parser.addoption("--runslow", action="store_true", help="run slow tests")
## slow = pytest.mark.skipif(
## not pytest.config.getoption("--runslow"),
## reason="need --runslow option to run"
## )
| StarcoderdataPython |
1955330 | import os
import unittest
import numpy as np
import pytest
from geopyspark.geotrellis import (SpatialKey,
Tile,
ProjectedExtent,
Extent,
RasterLayer,
LocalLayout,
TileLayout,
GlobalLayout,
LayoutDefinition,
SpatialPartitionStrategy)
from shapely.geometry import Point
from geopyspark.geotrellis.layer import TiledRasterLayer
from geopyspark.tests.base_test_class import BaseTestClass
from geopyspark.geotrellis.constants import LayerType, CellType
def make_raster(x, y, v, cols=4, rows=4, ct=CellType.FLOAT32, crs=4326):
cells = np.zeros((1, rows, cols))
cells.fill(v)
# extent of a single cell is 1, no fence-post here
extent = ProjectedExtent(Extent(x, y, x + cols, y + rows), crs)
return (extent, Tile(cells, ct, None))
class RasterLayerTest(BaseTestClass):
layers = [
make_raster(0, 0, v=1),
make_raster(3, 2, v=2),
make_raster(6, 0, v=3)
]
numpy_rdd = BaseTestClass.pysc.parallelize(layers)
layer = RasterLayer.from_numpy_rdd(LayerType.SPATIAL, numpy_rdd)
metadata = layer.collect_metadata(GlobalLayout(5))
def test_to_to_layout_with_partitioner(self):
strategy = SpatialPartitionStrategy(4)
tiled = self.layer.tile_to_layout(LocalLayout(5), partition_strategy=strategy)
self.assertEqual(tiled.get_partition_strategy(), strategy)
def test_tile_to_local_layout(self):
tiled = self.layer.tile_to_layout(LocalLayout(5))
assert tiled.layer_metadata.extent == Extent(0,0,10,6)
assert tiled.layer_metadata.tile_layout == TileLayout(2,2,5,5)
def test_tile_to_global_layout(self):
tiled = self.layer.tile_to_layout(GlobalLayout(5))
assert tiled.layer_metadata.extent == Extent(0,0,10,6)
assert tiled.layer_metadata.tile_layout == TileLayout(128,128,5,5)
assert tiled.zoom_level == 7
def test_tile_to_metadata_layout(self):
tiled = self.layer.tile_to_layout(layout=self.metadata)
self.assertEqual(tiled.layer_metadata.extent, Extent(0,0,10,6))
self.assertDictEqual(tiled.layer_metadata.to_dict(), self.metadata.to_dict())
def test_tile_to_tiled_layer_layout(self):
extent = Extent(0., 0., 10., 6.)
tile_layout = TileLayout(2,2,5,5)
layout_definition = LayoutDefinition(extent, tile_layout)
base = self.layer.tile_to_layout(layout_definition)
tiled = self.layer.tile_to_layout(layout=base)
self.assertDictEqual(tiled.layer_metadata.to_dict(), base.layer_metadata.to_dict())
def test_tile_to_layout_definition(self):
tiled = self.layer.tile_to_layout(layout=self.metadata.layout_definition)
self.assertDictEqual(tiled.layer_metadata.to_dict(), self.metadata.to_dict())
def test_no_data_of_zero(self):
no_data_layer = [(t[0], Tile.from_numpy_array(t[1].cells, 1)) for t in self.layers]
rdd = BaseTestClass.pysc.parallelize(no_data_layer)
nd_layer = RasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd)
nd_metadata = nd_layer.collect_metadata()
self.assertTrue('ud1' in nd_metadata.cell_type)
self.assertEqual(nd_metadata.no_data_value, 1)
@pytest.fixture(scope='class', autouse=True)
def tearDown(self):
yield
BaseTestClass.pysc._gateway.close()
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
9761149 | <filename>query_flight/tests/conftest.py
import pytest
from django.utils import timezone
from query_flight.models import Airport, Flight, Search, Layover
@pytest.fixture(scope='function', params=[str, timezone.pytz.timezone])
def tz_func(request):
return request.param
@pytest.fixture
def atl_dict():
return {'title': 'Atlanta', 'abrev': 'ATL', 'sw_airport': True,
'latitude': 33.6407, 'longitude': -84.4277,
'timezone': 'US/Eastern'}
@pytest.fixture
def boi_dict():
return {'title': 'Boise', 'abrev': 'BOI', 'sw_airport': True,
'latitude': 43.5658, 'longitude': -116.2223, 'timezone': 'US/Mountain'}
@pytest.fixture
def dal_dict():
return {'title': 'Dallas Love Field', 'abrev': 'DAL', 'sw_airport': True,
'latitude': 32.8481, 'longitude': -96.8512, 'timezone': 'US/Central'}
@pytest.fixture
def aua_dict():
return {'title': 'Aruba', 'abrev': 'AUA', 'sw_airport': True,
'latitude': 12.501400, 'longitude': -70.015198, 'timezone': 'America/Aruba'}
@pytest.fixture
def atl_dict_total(atl_dict):
atl_dict.update({'country': 'us', 'state': 'Georgia'})
return atl_dict
@pytest.fixture
def boi_dict_total(boi_dict):
boi_dict.update({'country': 'us', 'state': 'Idaho'})
return boi_dict
@pytest.fixture
def dal_dict_total(dal_dict):
dal_dict.update({'country': 'us', 'state': 'Texas'})
return dal_dict
@pytest.fixture
def aua_dict_total(aua_dict):
aua_dict.update({'country': 'nl'})
return aua_dict
@pytest.fixture
def atl_airport(atl_dict_total):
return Airport.objects.create(**atl_dict_total)
@pytest.fixture
def boi_airport(boi_dict_total):
return Airport.objects.create(**boi_dict_total)
@pytest.fixture
def dal_airport(dal_dict_total):
return Airport.objects.create(**dal_dict_total)
@pytest.fixture
def aua_airport(aua_dict_total):
return Airport.objects.create(**aua_dict_total)
@pytest.fixture
def search():
return Search.objects.create()
@pytest.fixture
def basic_flight_dict(atl_airport, boi_airport, search):
return {'origin_airport': atl_airport, 'destination_airport': boi_airport,
'depart_time': atl_airport.get_tz_obj().localize(timezone.datetime(2018, 4, 26, 6, 00, 00)),
'arrive_time': boi_airport.get_tz_obj().localize(timezone.datetime(2018, 4, 26, 13, 50, 00)),
'wanna_get_away': 438.0, 'anytime': 571.0, 'business_select': 599.0, 'search': search}
@pytest.fixture
def post_flight_dict(atl_airport, boi_airport, search):
return {'origin_airport': atl_airport.abrev, 'destination_airport': boi_airport.abrev,
'depart_time': atl_airport.get_tz_obj().localize(timezone.datetime(2018, 4, 26, 6, 00, 00)),
'arrive_time': boi_airport.get_tz_obj().localize(timezone.datetime(2018, 4, 26, 13, 50, 00)),
'wanna_get_away': 438.0, 'anytime': 571.0, 'business_select': 599.0, 'search': search.id}
@pytest.fixture
def basic_flight(basic_flight_dict):
return Flight.objects.create(**basic_flight_dict)
@pytest.fixture
def post_layover_dict(basic_flight, dal_airport):
return {'airport': dal_airport.abrev,
'change_planes': True,
'time': 3600.0}
# This gets all the airports for tests
# def pytest_generate_tests(metafunc):
# if 'airports' in metafunc.fixturenames:
# metafunc.parametrize("airports", ['atl','boi'], indirect=True)
@pytest.fixture(scope='function')
def airports(request, atl_airport, boi_airport, dal_airport, aua_airport):
if request.param.lower() == 'atl':
return atl_airport
elif request.param.lower() == 'boi':
return boi_airport
elif request.param.lower() == 'dal':
return dal_airport
elif request.param.lower() == 'aua':
return aua_airport
else:
raise ValueError('Invalid param in airports')
@pytest.fixture(scope='function')
def airports_dict(request, atl_dict_total, boi_dict_total, dal_dict_total, aua_dict_total):
if request.param.lower() == 'atl':
return atl_dict_total
elif request.param.lower() == 'boi':
return boi_dict_total
elif request.param.lower() == 'dal':
return dal_dict_total
elif request.param.lower() == 'aua':
return aua_dict_total
else:
raise ValueError('Invalid param in airports_dict')
@pytest.fixture(scope='function')
def airports_dict_partial(request, atl_dict, boi_dict, dal_dict, aua_dict):
if request.param.lower() == 'atl':
return atl_dict
elif request.param.lower() == 'boi':
return boi_dict
elif request.param.lower() == 'dal':
return dal_dict
elif request.param.lower() == 'aua':
return aua_dict
else:
raise ValueError('Invalid param in airports_dict')
# @pytest.fixture(scope='function')
# def airports(request,atl_dict_total,boi_dict_total,dal_dict_total):
# if request.param.lower() == 'atl':
# return Airport.objects.get_or_create(**atl_dict_total)
# elif request.param.lower() == 'boi':
# return Airport.objects.get_or_create(**boi_dict_total)
# elif request.param.lower() == 'dal':
# return Airport.objects.get_or_create(**dal_dict_total)
# else:
# raise ValueError('Invalid param in airports')
| StarcoderdataPython |
1708047 | <reponame>dakhnovskaya/tenders
from django.shortcuts import render
from .models import Tender
from django.shortcuts import render, get_object_or_404
from .form import TenderForm
from django.shortcuts import redirect
def tender_list(request):
tenders = Tender.objects.all()
return render(request, 'tenders_api/tender_list.html', {'tenders': tenders})
def tender_detail(request, pk):
tender = get_object_or_404(Tender, pk=pk)
return render(request, 'tenders_api/tender_detail.html', {'tender': tender})
def tender_new(request):
if request.method == "POST":
form = TenderForm(request.POST)
if form.is_valid():
tender = form.save()
return redirect('tender_detail', pk=tender.pk)
else:
form = TenderForm()
return render(request, 'tenders_api/tender_edit.html', {'form': form})
def tender_edit(request, pk):
tender = get_object_or_404(Tender, pk=pk)
if request.method == "POST":
form = TenderForm(request.POST, instance=tender)
if form.is_valid():
tender = form.save()
return redirect('tender_detail', pk=tender.pk)
else:
form = TenderForm(instance=tender)
return render(request, 'tenders_api/tender_edit.html', {'form': form})
| StarcoderdataPython |
4929585 | """
Processing of data from SPM Fig. 8
"""
from pathlib import Path
import pandas as pd
import scmdata
from .badc import read_badc
from .utils import convert_percentile_to_stats, convert_ssp_name, force_col_to_int
def compile_spm_fig_8_timeseries(raw_data_path):
directories_metadata = (
# path, start_string, metadata
(
Path("panel_a"),
"tas_global_",
dict(
variable="Surface Air Temperature Change",
unit="K",
region="World",
reference_period_start_year=1850,
reference_period_end_year=1900,
model="Ch.4 Assessed",
),
),
(
Path("panel_b"),
"sia_arctic_september_",
dict(
variable="Arctic Sea Icea Area|September",
unit="Mm^2",
region="World",
model="CMIP6 multi-model ensemble",
),
),
(
Path("panel_c"),
"phos_global_",
dict(
variable="Ocean Surface pH",
unit="dimensionless",
region="World",
model="CMIP6 multi-model ensemble",
),
),
)
out = []
for directory, start_string, metadata in directories_metadata:
for filename in (raw_data_path / directory).glob("*.csv"):
stem = Path(filename).stem
assert stem.startswith(start_string)
scenario = stem.split(start_string)[-1].replace("_", "").lower()
raw = pd.read_csv(filename).set_index("Year")
raw.columns.name = "percentile"
stacked = raw.stack().reset_index().rename({0: "value"}, axis="columns")
stacked.columns = stacked.columns.str.lower()
stacked["scenario"] = convert_ssp_name(scenario)
for k, v in metadata.items():
stacked[k] = v
out.append(scmdata.ScmRun(stacked))
out = scmdata.run_append(out)
out["variable"] = out["variable"] + "|" + out["percentile"]
# hack to force int
for c in ["reference_period_start_year", "reference_period_end_year"]:
out = force_col_to_int(out, c)
out = out.drop_meta("percentile")
return out
| StarcoderdataPython |
1865270 | <reponame>delair-ai/DISCA<gh_stars>1-10
import warnings
import cv2
import numpy as np
from sklearn.metrics import f1_score as sk_f1
from tqdm import tqdm
warnings.simplefilter(action='ignore', category=RuntimeWarning)
def accuracy(input, target):
"""Computes the total accuracy"""
return 100 * float(np.count_nonzero(input == target)) / target.size
def IoU(pred, gt, n_classes, all_iou=False):
"""Computes the IoU by class and returns mean-IoU"""
# print("IoU")
iou = []
for i in range(n_classes):
if np.sum(gt == i) == 0:
iou.append(np.NaN)
continue
TP = np.sum(np.logical_and(pred == i, gt == i))
FP = np.sum(np.logical_and(pred == i, gt != i))
FN = np.sum(np.logical_and(pred != i, gt == i))
iou.append(TP / (TP + FP + FN))
# nanmean: if a class is not present in the image, it's a NaN
result = [np.nanmean(iou), iou] if all_iou else np.nanmean(iou)
return result
def f1_score(pred, gt, n_classes, all=False):
f1 = []
for i in range(n_classes):
if np.sum(gt == i) == 0:
f1.append(np.NaN)
continue
TP = np.sum(np.logical_and(pred == i, gt == i))
FP = np.sum(np.logical_and(pred == i, gt != i))
FN = np.sum(np.logical_and(pred != i, gt == i))
prec = TP / (TP + FP)
recall = TP / (TP + FN)
result = 2 * (prec * recall) / (prec + recall)
f1.append(result)
result = [np.nanmean(f1), f1] if all else np.nanmean(f1)
if all:
flat_pred = pred.reshape(-1)
flat_gt = gt.reshape(-1)
f1_weighted = sk_f1(flat_gt, flat_pred, average="weighted")
result.append(f1_weighted)
return result
| StarcoderdataPython |
350843 | """ SeleniumBase Exceptions
NoSuchFileException => Called when self.assert_downloaded_file(...) fails.
NotUsingChromeException => Used by Chrome-only methods if not using Chrome.
OutOfScopeException => Used by BaseCase methods when setUp() is skipped.
TextNotVisibleException => Called when expected text fails to appear.
TimeLimitExceededException => Called when exceeding "--time-limit=SECONDS".
"""
from selenium.common.exceptions import WebDriverException
class NoSuchFileException(Exception):
pass
class NotUsingChromeException(WebDriverException):
pass
class OutOfScopeException(Exception):
pass
class TextNotVisibleException(WebDriverException):
pass
class TimeLimitExceededException(Exception):
pass
| StarcoderdataPython |
5164480 | <filename>conv_is_all_you_need/src/Cropping_Window_Xception/scripts/resize_pictures.py
from skimage.io import imread, imsave
import os
import cv2
from os.path import join as pjoin, basename, splitext
from lib.utils import info, multiprocessing, debug
def resize_one_img(task):
fn = task['fn']
input_dir = task['global_']['input_dir']
output_dir = task['global_']['output_dir']
resize_to = task['global_']['resize_to']
input_path = pjoin(input_dir, fn)
output_path = pjoin(output_dir, splitext(fn)[0])
imsave(output_path, imread(input_path))
return {
'id_': fn,
}
def resize():
input_dir = 'data/valid_windowed'
resize_to = 'png'
output_dir = f'data/valid_windowed_{resize_to}'
os.makedirs(output_dir, exist_ok=True)
fns = os.listdir(input_dir)
global_ = {
'input_dir': input_dir,
'output_dir': output_dir,
'resize_to': resize_to,
}
task_iter = ({'fn': fn, 'global_': global_} for fn in fns)
multiprocessing(resize_one_img, task_iter, len_=len(fns), n_threads=70)
if __name__ == '__main__':
resize()
| StarcoderdataPython |
4806687 | from common.common_classes import Scenario, Executor
import numpy as np
#################################
# 1x2 2x2 3x2 3x3 5x4 5xRand
# Scenario\Executor configs | 0 | 1 | 2 | 3 | 4 | 5 |
# 04 0 | 001 | 002 | 003 | 004 | 005 | 006 |
# 06 1 | 007 | 008 | 009 | 010 | 011 | 012 |
# 06 2 | 013 | 014 | 015 | 016 | 017 | 018 |
# 06 3 | 019 | 020 | 021 | 022 | 023 | 024 |
# 10 4 | 025 | 026 | 027 | 028 | 029 | 030 |
# 20 5 | 031 | 032 | 033 | 034 | 035 | 036 |
#
###########################
_fake_executor_configs = {0: [{"executor_id": 1, "parallel_queues_count": 2}],
1: [{"executor_id": 1, "parallel_queues_count": 2},
{"executor_id": 2, "parallel_queues_count": 2}],
2: [{"executor_id": 1, "parallel_queues_count": 2},
{"executor_id": 2, "parallel_queues_count": 2},
{"executor_id": 3, "parallel_queues_count": 2}],
3: [{"executor_id": 1, "parallel_queues_count": 3},
{"executor_id": 2, "parallel_queues_count": 3},
{"executor_id": 3, "parallel_queues_count": 3}],
4: [{"executor_id": 1, "parallel_queues_count": 4},
{"executor_id": 2, "parallel_queues_count": 4},
{"executor_id": 3, "parallel_queues_count": 4},
{"executor_id": 4, "parallel_queues_count": 4},
{"executor_id": 5, "parallel_queues_count": 4}],
5: [{"executor_id": 1, "parallel_queues_count": np.random.randint(4, high=20)},
{"executor_id": 2, "parallel_queues_count": np.random.randint(4, high=20)},
{"executor_id": 3, "parallel_queues_count": np.random.randint(4, high=20)},
{"executor_id": 4, "parallel_queues_count": np.random.randint(4, high=20)},
{"executor_id": 5, "parallel_queues_count": np.random.randint(4, high=20)}]
}
_fake_scenario_configs = {0: [{"name": "1", "configuration_times": [2, 2, 3]},
{"name": "2", "configuration_times": [3, 3, 3]},
{"name": "3", "configuration_times": [1, 1, 2]},
{"name": "4", "configuration_times": [1, 2, 2]}],
1: [{"name": "1", "configuration_times": [2, 2, 3]},
{"name": "2", "configuration_times": [3, 3, 3]},
{"name": "3", "configuration_times": [1, 1, 2]},
{"name": "4", "configuration_times": [1, 2, 2]},
{"name": "5", "configuration_times": [1, 1, 1]},
{"name": "6", "configuration_times": [1, 1, 1]}],
2: [{"name": "1", "configuration_times": [2, 2, 3]},
{"name": "2", "configuration_times": [3, 3, 3]},
{"name": "3", "configuration_times": [1, 1, 2]},
{"name": "4", "configuration_times": [1, 2, 2]},
{"name": "5", "configuration_times": [10]},
{"name": "6", "configuration_times": [1, 1, 1]}],
3: [{"name": "1", "configuration_times": [2, 2, 3]},
{"name": "2", "configuration_times": [3, 3, 3]},
{"name": "3", "configuration_times": [1, 1, 2]},
{"name": "4", "configuration_times": [1, 2, 2]},
{"name": "5", "configuration_times": [1, 2, 3, 4]},
{"name": "6", "configuration_times": [1, 1, 1]}],
4: [{"name": "1", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "2", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "3", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "4", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "5", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "6", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "7", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "8", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "9", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "10", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])}],
5: [{"name": "1", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "2", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "3", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "4", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "5", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "6", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "7", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "8", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "9", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "10", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "11", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "12", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "13", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "14", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "15", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "16", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "17", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "18", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "19", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])},
{"name": "20", "configuration_times": np.random.randint(1, high=20, size=[
np.random.randint(2, high=4)])}
]
}
fake_executor_configs_len = len(_fake_executor_configs)
fake_scenario_configs_len = len(_fake_scenario_configs)
def fake_case(env_class, executor_case=1, scenario_case=1):
if not (executor_case in _fake_executor_configs.keys() and scenario_case in _fake_scenario_configs.keys()):
raise ValueError("executor_case or scenario_case is not supported")
executors = []
scenarios = []
for executor_config in _fake_executor_configs[executor_case]:
executors.append(Executor(executor_id=executor_config["executor_id"],
parallel_queues_count=executor_config["parallel_queues_count"]))
for scenario_config in _fake_scenario_configs[scenario_case]:
scenarios.append(
Scenario(name=scenario_config["name"], configuration_times=scenario_config["configuration_times"]))
env = env_class(scenarios=scenarios, executors=executors)
if hasattr(env, 'observation_spec') and hasattr(env, 'action_spec'):
if len(env.observation_spec().shape) == 0:
observations = 1
else:
observations = env.observation_spec().shape[0]
if len(env.action_spec().shape) == 0:
actions = 1
else:
actions = env.action_spec().shape[0]
else:
observations = None
actions = None
return env, observations, actions
| StarcoderdataPython |
3235638 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %run 0-Base.ipynb
SOURCE_PATH = f'{DATA_PATH}/2-reformatted'
TARGET_PATH = f'{DATA_PATH}/3-joined'
# +
from glob import glob
def join_reformatted_columns(source_directory_path):
source_file_paths = sorted(glob(f'{source_directory_path}/*.feather'))
source_dfs = [pd.read_feather(source_file_path) for source_file_path in source_file_paths]
# check that all the data-frames have the same number of rows
assert len(set(map(lambda df: df.shape[0], source_dfs))) == 1
return pd.concat(source_dfs, axis=1)
# -
# ***
historical_transactions = join_reformatted_columns(f'{SOURCE_PATH}/historical_transactions')
assert len(historical_transactions.columns) == (14 + 3)
historical_transactions.info()
# %time historical_transactions.to_feather(f'{TARGET_PATH}/historical_transactions.feather')
# ***
new_merchant_transactions = join_reformatted_columns(f'{SOURCE_PATH}/new_merchant_transactions')
assert len(new_merchant_transactions.columns) == (14 + 3)
new_merchant_transactions.info()
# %time new_merchant_transactions.to_feather(f'{TARGET_PATH}/new_merchant_transactions.feather')
| StarcoderdataPython |
73161 | #!/usr/bin/env python3
import re
import csv
import glob
from matplotlib import pyplot as plt
from matplotlib.ticker import FuncFormatter
from adjustText import adjust_text
compilePlots = [
[ "XXD, random data", "work/compile-times-xxd-random.csv" ],
[ "XXD, text data", "work/compile-times-xxd-text.csv" ],
[ "String literals, random data", "work/compile-times-str-random.csv" ],
[ "String literals, text data", "work/compile-times-str-text.csv" ],
]
generatePlots = [
[ "XXD, random data", "work/generate-times-xxd-random.csv" ],
[ "XXD, text data", "work/generate-times-xxd-text.csv" ],
[ "String literals, random data", "work/generate-times-str-random.csv" ],
[ "String literals, text data", "work/generate-times-str-text.csv" ],
]
compileMemPlots = [
[ "XXD, random data", "work/times-xxd/random-*.txt" ],
[ "XXD, text data", "work/times-xxd/text-*.txt" ],
[ "String literals, random data", "work/times-str/random-*.txt" ],
[ "String literals, random data", "work/times-str/text-*.txt" ],
]
def drawTimes(plots, title, ylabel, xlabel, fname):
print(fname)
texts = []
for plot in plots:
path = plot[1]
label = plot[0]
keys = []
values = []
with open(path) as f:
reader = csv.DictReader(f)
for d in reader:
keys.append(float(d["parameter"]))
values.append(float(d["median"]) * 1000) # seconds -> milliseconds
texts.append(plt.text(keys[-1], values[-1], f"{values[-1]:.0f}ms", va="center"))
plt.plot(keys, values, "o-", label=label)
adjust_text(texts)
plt.gca().xaxis.set_major_formatter(FuncFormatter(lambda val, pos: f"{val:.0f}kB"))
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda val, pos: f"{val:.0f}ms"))
plt.xticks(keys, rotation=40)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
plt.savefig(fname, bbox_inches="tight")
plt.clf()
def drawMem(plots, title, ylabel, xlabel, fname):
print(fname)
texts = []
rssrx = re.compile(r".* (.*?)maxresident.*")
sizerx = re.compile(r".*-(.*?)k\.txt")
for plot in plots:
pathglob = plot[1]
label = plot[0]
keys = []
values = []
for path in glob.iglob(pathglob):
with open(path) as f:
kb = rssrx.match(f.readline()).group(1)
size = sizerx.match(path).group(1)
keys.append(float(size))
values.append(float(kb) / 1024)
keys, values = zip(*sorted(zip(keys, values), key=lambda kv: kv[0]))
texts.append(plt.text(keys[-1], values[-1], f"{values[-1]:.0f}MB", va="center"))
plt.plot(keys, values, "o-", label=label)
adjust_text(texts)
plt.gca().xaxis.set_major_formatter(FuncFormatter(lambda val, pos: f"{val:.0f}kB"))
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda val, pos: f"{val:.0f}MB"))
plt.xticks(keys, rotation=40)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
plt.savefig(fname, bbox_inches="tight")
plt.clf()
drawTimes(compilePlots, "Compile Times",
"Compile Time (ms)", "Embedded File Size (kB)", "compile-times.svg")
drawTimes(generatePlots, "Code Gen Times",
"Generate Time (ms)", "Embedded File Size (kB)", "generate-times.svg")
drawMem(compileMemPlots, "Compiler Memory Usage",
"Compiler Memory Usage (MB)", "Embedded File size (kB)", "compile-mem.svg")
| StarcoderdataPython |
1890997 |
# TODO: create a function that can help easily map raw pheno files that do not
# TODO: have the participant_session id that CPAC uses
def read_group_list_text_file(group_list_text_file):
"""Read in the group-level analysis participant-session list text file."""
with open(group_list_text_file, "r") as f:
group_list = f.readlines()
# each item here includes both participant and session, and this also will
# become the main ID column in the written design matrix CSV
group_list = [str(x).rstrip("\n") for x in group_list if x != ""]
return group_list
def read_pheno_csv_into_df(pheno_csv, id_label=None):
"""Read the phenotypic file CSV or TSV into a Pandas DataFrame."""
import pandas as pd
with open(pheno_csv, "r") as f:
if id_label:
if '.tsv' in pheno_csv or '.TSV' in pheno_csv:
pheno_df = pd.read_table(f, dtype={id_label: object})
else:
pheno_df = pd.read_csv(f, dtype={id_label: object})
else:
if '.tsv' in pheno_csv or '.TSV' in pheno_csv:
pheno_df = pd.read_table(f)
else:
pheno_df = pd.read_csv(f)
return pheno_df
def write_group_list_text_file(group_list, out_file=None):
"""Write out the group-level analysis participant list as a text file."""
import os
# prevent duplicates - depending on how the design matrix is set up, we
# might have multiples of the sub_ses_ID's, like if we're doing repeated
# measures with series/scans
new_group_list = []
for sub_ses_id in group_list:
if sub_ses_id not in new_group_list:
new_group_list.append(sub_ses_id)
if not out_file:
out_file = os.path.join(os.getcwd(), "group_analysis_participant_"
"list.txt")
else:
out_file = os.path.abspath(out_file)
dir_path = out_file.split(os.path.basename(out_file))[0]
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(out_file, "wt") as f:
for part_id in new_group_list:
f.write("{0}\n".format(part_id))
if os.path.exists(out_file):
print "Group-level analysis participant list written:" \
"\n{0}\n".format(out_file)
return out_file
def write_dataframe_to_csv(matrix_df, out_file=None):
"""Write out a matrix Pandas DataFrame into a CSV file."""
import os
if not out_file:
out_file = os.path.join(os.getcwd(), "matrix.csv")
else:
out_file = os.path.abspath(out_file)
dir_path = out_file.split(os.path.basename(out_file))[0]
if not os.path.exists(dir_path):
os.makedirs(dir_path)
try:
matrix_df = matrix_df.drop(labels='participant_session_id', axis=1)
except ValueError:
pass
matrix_df.to_csv(out_file, index=False)
if os.path.exists(out_file):
print "CSV file written:\n{0}\n".format(out_file)
def write_config_dct_to_yaml(config_dct, out_file=None):
"""Write out a configuration dictionary into a YAML file."""
import os
import CPAC
if not out_file:
out_file = os.path.join(os.getcwd(), "gpa_fsl_config.yml")
else:
out_file = os.path.abspath(out_file)
dir_path = out_file.split(os.path.basename(out_file))[0]
if not os.path.exists(dir_path):
os.makedirs(dir_path)
if not out_file.endswith(".yml"):
out_file = "{0}.yml".format(out_file)
field_order = ['participant_list', 'pheno_file', 'ev_selections',
'participant_id_label', 'design_formula', 'mean_mask',
'custom_roi_mask', 'derivative_list', 'coding_scheme',
'group_sep', 'grouping_var', 'z_threshold', 'p_threshold',
'sessions_list', 'series_list', 'contrasts', 'f_tests',
'custom_contrasts', 'model_name', 'output_dir']
with open(out_file, "wt") as f:
f.write("# CPAC Group-Level Analysis Configuration File\n"
"# Version {0}\n".format(CPAC.__version__))
f.write("#\n# http://fcp-indi.github.io for more info.\n#\n"
"# Tip: This file can be edited manually with "
"a text editor for quick modifications.\n\n")
for key in field_order:
val = config_dct[key]
f.write("{0}: {1}\n\n".format(key, val))
if os.path.exists(out_file):
print "Group-level analysis configuration YAML file written:\n" \
"{0}\n".format(out_file)
def create_design_matrix_df(group_list, pheno_df=None,
ev_selections=None, pheno_sub_label=None,
pheno_ses_label=None, pheno_site_label=None):
"""Create the design matrix intended for group-level analysis via the FSL
FLAME tool.
This does NOT create the final .mat file that FSL FLAME takes in. This is
an intermediary design matrix CSV meant for the user to review.
If there is a phenotype CSV provided, this function will align the
participant-session ID labels in the CPAC individual-level analysis output
directory with the values listed in the phenotype file.
"""
import pandas as pd
# kill duplicates
pheno_df = pheno_df.drop_duplicates()
# replace spaces and dashes with underscores, to prevent confusion with
# the Patsy design formula
rename_pheno_cols = {}
for col_name in pheno_df.columns:
if ' ' in col_name or '-' in col_name:
rename_pheno_cols.update({col_name: col_name.replace(' ', '_').replace('-', '_')})
pheno_df = pheno_df.rename(columns=rename_pheno_cols)
# map the participant-session IDs to just participant IDs
group_list_map = {}
for part_ses in group_list:
sub_id = part_ses.split("_")[0]
try:
ses_id = part_ses.split("_")[1]
except IndexError:
raise Exception('the group analysis participant list may not be '
'in the appropriate format.')
group_list_map[part_ses] = [sub_id, ses_id, part_ses]
# create a dataframe mapping the 'sub01_ses-1' CPAC-style unique IDs to
# subject and session columns, like this:
# sub01 ses-1 sub01_ses-1
# sub02 ses-1 sub02_ses-1
map_df = pd.DataFrame.from_dict(group_list_map, orient='index')
# also, rename the columns to be easier
map_df = map_df.rename(
columns={0: 'participant_id', 1: 'session_id',
2: 'participant_session_id'})
# sort by ses_id, then sub_id
# need everything grouped by session first, in case of the paired
# analyses where the first condition is all on top and the second is
# all on the bottom
map_df = map_df.sort_values(by=['session_id', 'participant_id'])
# drop unique_id column (does it ever need to really be included?)
# was just keeping it in up until here for mental book-keeping if anything
map_df = map_df[['participant_session_id', 'participant_id',
'session_id']]
if pheno_df is None:
# no phenotypic matrix provided; simpler design models
design_df = map_df[['participant_session_id']]
else:
# if a phenotype CSV file is provided with the data
# align the pheno's participant ID column with the group sublist text
# file
if not pheno_sub_label:
# TODO: exception message
raise Exception("there's a pheno file, but no pheno sub label")
else:
# rename the pheno sub label thingy
pheno_df = pheno_df.rename(
columns={pheno_sub_label: 'participant_id'})
if ev_selections:
ev_selections.insert(0, 'participant_id')
sort_by = ['participant_id']
if pheno_ses_label:
# if sessions are important in the model, do this also
pheno_df = pheno_df.rename(
columns={pheno_ses_label: 'session_id'})
if ev_selections:
ev_selections.append(1, 'session_id')
# again, sort by session ID first in case of repeated
# measures, where the sessions have to be all together first
sort_by.insert(0, 'session_id')
if pheno_site_label:
# and if sites are important as well, same here
pheno_df = pheno_df.rename(
columns={pheno_site_label: 'site_id'})
if ev_selections:
ev_selections.append(2, 'site_id')
if ev_selections:
# get specific covariates!
pheno_df = pheno_df[ev_selections]
# check for inconsistency with leading zeroes
# (sometimes, the sub_ids from individual will be something like
# '0002601' and the phenotype will have '2601')
sublist_subs = map_df['participant_id']
pheno_subs = list(pheno_df['participant_id'])
for index, row in pheno_df.iterrows():
pheno_sub_id = str(row['participant_id'])
for sub_id in sublist_subs:
if str(sub_id).lstrip('0') == pheno_sub_id:
pheno_df.at[index, 'participant_id'] = sub_id
for sub in sublist_subs:
if sub in pheno_subs:
# okay, there's at least one match
break
else:
'''
new_sublist_subs = [str(x).lstrip('0') for x in sublist_subs]
for sub in new_sublist_subs:
if sub in pheno_subs:
# that's better
map_df['participant_id'] = new_sublist_subs
break
else:
'''
raise Exception('the participant IDs in your group '
'analysis participant list and the '
'participant IDs in your phenotype file '
'do not match')
# merge
if pheno_ses_label:
design_df = pheno_df.merge(map_df, on=['participant_id'])
else:
design_df = pheno_df.merge(map_df[['participant_id',
'participant_session_id']],
on='participant_id')
design_df = design_df.sort_values(sort_by)
return design_df
def create_contrasts_template_df(design_df, contrasts_dct_list=None):
"""Create the template Pandas DataFrame for the contrasts matrix CSV.
The headers in the contrasts matrix needs to match the headers of the
design matrix."""
import pandas as pd
design_df = design_df.drop(labels='participant_session_id', axis=1)
contrast_cols = list(design_df.columns)
contrast_cols.remove('participant_id')
# TODO:
# if session, if site, remove
if contrasts_dct_list:
# if we are initializing the contrasts matrix with pre-set contrast
# vectors - just check for accuracy here
for contrast_dct in contrasts_dct_list:
# contrast_dct is a dictionary with each column name mapped to its
# contrast vector value, like this:
# {contrast: "Group Mean", "Group Mean": 1, "age": 0}
if (len(contrast_dct.keys()) - 1) != len(contrast_cols):
# it's -1 because of the "contrast" column in contrast_dct
# TODO: message
raise Exception("number of columns in the contrast vector "
"does not match the number of covariate "
"columns in the design matrix")
else:
# if default, start it up with a blank "template" contrast vector
contrast_one = {"contrasts": "contrast_1"}
contrast_two = {"contrasts": "contrast_2"}
for col in contrast_cols:
contrast_one.update({col: 0})
contrast_two.update({col: 0})
contrasts_dct_list = [contrast_one, contrast_two]
contrast_cols.insert(0, "contrasts")
# now, make the actual dataframe
contrasts_df = pd.DataFrame(contrasts_dct_list)
# order the columns properly
contrasts_df = contrasts_df[contrast_cols]
return contrasts_df
def preset_single_group_avg(group_list, pheno_df=None, covariate=None,
pheno_sub_label=None, output_dir=None,
model_name="one_sample_T-test"):
"""Set up the design matrix CSV for running a single group average
(one-sample T-test)."""
import os
if not output_dir:
output_dir = os.getcwd()
id_cols = ["participant_session_id", "participant_id", "session_id",
"site_id"]
# change spaces and dashes to underscores to prevent confusion with the
# Patsy design formula
covariate = covariate.lstrip(' ').rstrip(' ')
covariate = covariate.replace(' ', '_').replace('-', '_')
ev_selections = None
if pheno_df is not None:
if covariate and pheno_sub_label:
# if we're adding an additional covariate
ev_selections = [covariate]
design_df = create_design_matrix_df(group_list, pheno_df,
ev_selections=ev_selections,
pheno_sub_label=pheno_sub_label)
design_df["Group_Mean"] = 1
group_mean_contrast = {"contrasts": "Group Mean"}
# make these loops in case we expand this to handle more than one
# covariate past the Group Mean
for col in design_df.columns:
if col not in id_cols:
if col == "Group_Mean":
group_mean_contrast.update({col: 1})
else:
group_mean_contrast.update({col: 0})
contrasts = [group_mean_contrast]
if covariate:
covariate_contrast = {"contrasts": covariate}
for col in design_df.columns:
if col not in id_cols:
if col == covariate:
covariate_contrast.update({col: 1})
else:
covariate_contrast.update({col: 0})
contrasts.append(covariate_contrast)
contrasts_df = create_contrasts_template_df(design_df, contrasts)
# create design and contrasts matrix file paths
design_mat_path = os.path.join(output_dir, model_name,
"design_matrix_{0}.csv".format(model_name))
contrasts_mat_path = os.path.join(output_dir, model_name,
"contrasts_matrix_{0}.csv"
"".format(model_name))
# start group config yaml dictionary
design_formula = "Group_Mean"
if covariate:
design_formula = "{0} + {1}".format(design_formula, covariate)
group_config = {"pheno_file": design_mat_path,
"ev_selections": {"demean": [str(covariate)],
"categorical": ["Group_Mean"]},
"design_formula": design_formula,
"group_sep": "Off",
"grouping_var": None,
"sessions_list": [],
"series_list": [],
"custom_contrasts": contrasts_mat_path,
"model_name": model_name,
"output_dir": output_dir}
return design_df, contrasts_df, group_config
def preset_unpaired_two_group(group_list, pheno_df, groups, pheno_sub_label,
output_dir=None,
model_name="two_sample_unpaired_T-test"):
"""Set up the design matrix and contrasts matrix for running an unpaired
two-group difference (two-sample unpaired T-test).
group_list: a list of strings- sub_ses unique IDs
pheno_df: a Pandas DataFrame object of the phenotypic file CSV/matrix
groups: a list of either one or two strings- design matrix EV/covariate
labels to take from the phenotype DF and include in the model
pheno_sub_label: a string of the label name of the column in the phenotype
file that holds the participant/session ID for each row
output_dir: (optional) string of the output directory path
model_name: (optional) name/label of the model to run
Sets up the model described here:
https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FEAT/UserGuide
#Unpaired_Two-Group_Difference_.28Two-Sample_Unpaired_T-Test.29
Only one "group" will be provided usually if the two groups in the
phenotypic information you wish to compare are encoded in one covariate
column, as categorical information. Thus, providing this one name will
pull it from the phenotype file, and this function will break it out into
two columns using dummy-coding.
"""
import os
if not output_dir:
output_dir = os.getcwd()
id_cols = ["participant_session_id", "participant_id", "session_id",
"site_id"]
# change spaces and dashes to underscores to prevent confusion with the
# Patsy design formula
old_groups = groups
groups = []
for group in old_groups:
group = group.lstrip(' ').rstrip(' ')
group = group.replace(' ', '_').replace('-', '_')
groups.append(group)
# if the two groups are encoded in one categorical EV/column, then we will
# have to dummy code them out
# if this is the case, then "groups" will be a list with only one
# element in it- the one EV/column that is to be split up into two
ev_selections = []
for group in groups:
ev_selections.append(group)
design_df = create_design_matrix_df(group_list, pheno_df,
ev_selections=ev_selections,
pheno_sub_label=pheno_sub_label)
if len(groups) == 1:
# we're going to split the one categorical EV into two
new_groups = []
# get full range of values in one-column categorical EV
group_set = list(set(design_df[groups[0]]))
# run this again!
# change spaces and dashes to underscores to prevent confusion with the
# Patsy design formula
new_group_set = []
for group in group_set:
group = group.lstrip(' ').rstrip(' ')
group = group.replace(' ', '_').replace('-', '_')
new_group_set.append(group)
# this preset is for an unpaired two-group difference- should only be
# two groups encoded in this EV!
if len(group_set) > 2:
# TODO: message
raise Exception("more than two groups provided, but this is a"
"model for a two-group difference\n\ngroups "
"found in column:\n{0}".format(str(group_set)))
elif len(group_set) == 0:
raise Exception("no groups were found - something went wrong "
"with reading the phenotype information")
elif len(group_set) == 1:
raise Exception("only one group found in the column provided, "
"but this is a model for a two-group difference"
"\n\ngroups found in column:\n"
"{0}".format(str(group_set)))
# create the two new dummy-coded columns
# column 1
# new column name
new_name = "{0}_{1}".format(groups[0], new_group_set[0])
# create new column encoded in 0's
design_df[new_name] = 0
# map the relevant values into 1's
design_df[new_name] = design_df[groups[0]].map({group_set[0]: 1,
group_set[1]: 0})
# update groups list
new_groups.append(new_name)
# column 2
# new column name
new_name = "{0}_{1}".format(groups[0], new_group_set[1])
# create new column encoded in 0's
design_df[new_name] = 0
# map the relevant values into 1's
design_df[new_name] = design_df[groups[0]].map({group_set[1]: 1,
group_set[0]: 0})
# update groups list
new_groups.append(new_name)
# drop original EV/column
del design_df[groups[0]]
# update groups list
groups = new_groups
# start the contrasts
contrast_one = {"contrasts": "{0} - {1}".format(groups[0], groups[1])}
contrast_two = {"contrasts": "{0} - {1}".format(groups[1], groups[0])}
# make these loops in case we expand this to handle additional covariates
# past the "prescribed" ones in the model/preset
for col in design_df.columns:
if col not in id_cols:
if col == groups[0]:
contrast_one.update({col: 1})
contrast_two.update({col: -1})
elif col == groups[1]:
contrast_one.update({col: -1})
contrast_two.update({col: 1})
else:
contrast_one.update({col: 0})
contrast_two.update({col: 0})
contrasts = [contrast_one, contrast_two]
contrasts_df = create_contrasts_template_df(design_df, contrasts)
# create design and contrasts matrix file paths
design_mat_path = os.path.join(output_dir, model_name,
"design_matrix_{0}.csv".format(model_name))
contrasts_mat_path = os.path.join(output_dir, model_name,
"contrasts_matrix_{0}.csv"
"".format(model_name))
# start group config yaml dictionary
design_formula = "{0} + {1}".format(groups[0], groups[1])
group_config = {"pheno_file": design_mat_path,
"ev_selections": {"demean": [],
"categorical": str(groups)},
"design_formula": design_formula,
"group_sep": "On",
"grouping_var": str(groups),
"sessions_list": [],
"series_list": [],
"custom_contrasts": contrasts_mat_path,
"model_name": model_name,
"output_dir": os.path.join(output_dir, model_name)}
return design_df, contrasts_df, group_config
def preset_paired_two_group(group_list, conditions, condition_type="session",
output_dir=None,
model_name="two_sample_unpaired_T-test"):
"""Set up the design matrix and contrasts matrix for running an paired
two-group difference (two-sample paired T-test).
group_list: a list of strings- sub_ses unique IDs
conditions: a two-item list of strings- session or series/scan names of
the two sessions or two scans (per participant) you wish to
compare
condition_type: a string, either "session" or "scan", depending on what
is in "conditions"
output_dir: (optional) string of the output directory path
model_name: (optional) name/label of the model to run
Sets up the model described here:
https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FEAT/UserGuide
#Paired_Two-Group_Difference_.28Two-Sample_Paired_T-Test.29
"""
import os
if not output_dir:
output_dir = os.getcwd()
if len(conditions) != 2:
# TODO: msg
raise Exception
design_df = create_design_matrix_df(group_list)
# make the "condition" EV (the 1's and -1's delineating the two
# conditions, with the "conditions" being the two sessions or two scans)
condition_ev = []
if condition_type == "session":
# note: the participant_id column in design_df should be in order, so
# the condition_ev should come out in order:
# 1,1,1,1,-1,-1,-1,-1 (this is checked further down)
for sub_ses_id in design_df["participant_session_id"]:
if sub_ses_id.split("_")[-1] == conditions[0]:
condition_ev.append(1)
elif sub_ses_id.split("_")[-1] == conditions[1]:
condition_ev.append(-1)
group_config = {"sessions_list": conditions, "series_list": []}
elif condition_type == "scan":
# TODO: re-visit later, when session/scan difference in how to run
# TODO: group-level analysis repeated measures is streamlined and
# TODO: simplified
# the information needed in this part is not encoded in the group
# sublist! user inputs the two scan names, and we have a list of
# sub_ses (which needs to be doubled), with each scan paired to each
# half of this list (will need to ensure these scans exist for each
# selected derivative in the output directory later on)
for sub_ses_id in design_df["participant_session_id"]:
condition_ev.append(1)
for sub_ses_id in design_df["participant_session_id"]:
condition_ev.append(-1)
# NOTE: there is only one iteration of the sub_ses list in
# design_df["participant_id"] at this point! so use append to
# double that column:
design_df = design_df.append(design_df)
group_config = {"sessions_list": [], "series_list": conditions}
else:
# TODO: msg
raise Exception
# let's check to make sure it came out right
# first half
for val in condition_ev[0:(len(condition_ev) / 2) - 1]:
if val != 1:
# TODO: msg
raise Exception('Non-equal amount of participants for each '
'{0}.\n'.format(condition_type))
# second half
for val in condition_ev[(len(condition_ev) / 2):]:
if val != -1:
# TODO: msg
raise Exception('second half')
design_df[condition_type] = condition_ev
# initalize the contrast dct's
contrast_one = {}
contrast_two = {}
design_formula = "{0}".format(condition_type)
# create the participant identity columns
for sub_ses_id in design_df["participant_session_id"]:
new_part_col = []
sub_id = sub_ses_id.split("_")[0]
new_part_label = "participant_{0}".format(sub_id)
for moving_sub_ses_id in design_df["participant_id"]:
moving_sub_id = moving_sub_ses_id.split("_")[0]
if moving_sub_id == sub_id:
new_part_col.append(1)
else:
new_part_col.append(0)
design_df[new_part_label] = new_part_col
contrast_one.update({new_part_label: 0})
contrast_two.update({new_part_label: 0})
if new_part_label not in design_formula:
design_formula = "{0} + {1}".format(design_formula,
new_part_label)
# finish the contrasts
# should be something like
# ses,sub,sub,sub, etc.
# ses-1 - ses-2: 1, 0, 0, 0, 0...
# ses-2 - ses-1: -1, 0, 0, 0, etc.
contrast_one.update({
"contrasts": "{0}_{1} - {2}_{3}".format(condition_type,
conditions[0],
condition_type,
conditions[1])})
contrast_two.update({
"contrasts": "{0}_{1} - {2}_{3}".format(condition_type,
conditions[1],
condition_type,
conditions[0])})
contrast_one.update({condition_type: 1})
contrast_two.update({condition_type: -1})
contrasts = [contrast_one, contrast_two]
contrasts_df = create_contrasts_template_df(design_df, contrasts)
# create design and contrasts matrix file paths
design_mat_path = os.path.join(output_dir, model_name,
"design_matrix_{0}.csv".format(model_name))
contrasts_mat_path = os.path.join(output_dir, model_name,
"contrasts_matrix_{0}.csv"
"".format(model_name))
# start group config yaml dictionary
group_config.update({"pheno_file": design_mat_path,
"ev_selections": {"demean": [],
"categorical": []},
"design_formula": design_formula,
"group_sep": "Off",
"grouping_var": None,
"custom_contrasts": contrasts_mat_path,
"model_name": model_name,
"output_dir": os.path.join(output_dir, model_name)})
return design_df, contrasts_df, group_config
def preset_tripled_two_group(group_list, conditions, condition_type="session",
output_dir=None,
model_name="tripled_T-test"):
"""Set up the design matrix and contrasts matrix for running a tripled
two-group difference ('tripled' T-test).
group_list: a list of strings- sub_ses unique IDs
conditions: a three-item list of strings- session or series/scan names of
the three sessions or three scans (per participant) you wish
to compare
condition_type: a string, either "session" or "scan", depending on what
is in "conditions"
output_dir: (optional) string of the output directory path
model_name: (optional) name/label of the model to run
Sets up the model described here:
https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FEAT/UserGuide
#Tripled_Two-Group_Difference_.28.22Tripled.22_T-Test.29
"""
import os
if not output_dir:
output_dir = os.getcwd()
if len(conditions) != 3:
# TODO: msg
raise Exception('Three conditions are required for the tripled '
't-test.\n')
design_df = create_design_matrix_df(group_list)
# make the "condition" EVs (the 1's, -1's, and 0's delineating the three
# conditions, with the "conditions" being the three sessions or three
# scans)
condition_ev_one = []
condition_ev_two = []
if condition_type == "session":
# note: the participant_id column in design_df should be in order, so
# the condition_ev's should come out in order:
# 1,1,1,-1,-1,-1, 0, 0, 0 (this is checked further down)
# 1,1,1, 0, 0, 0,-1,-1,-1
for sub_ses_id in design_df["participant_session_id"]:
if sub_ses_id.split("_")[-1] == conditions[0]:
condition_ev_one.append(1)
condition_ev_two.append(1)
elif sub_ses_id.split("_")[-1] == conditions[1]:
condition_ev_one.append(-1)
condition_ev_two.append(0)
elif sub_ses_id.split("_")[-1] == conditions[2]:
condition_ev_one.append(0)
condition_ev_two.append(-1)
group_config = {"sessions_list": conditions, "series_list": []}
elif condition_type == "scan":
# TODO: re-visit later, when session/scan difference in how to run
# TODO: group-level analysis repeated measures is streamlined and
# TODO: simplified
# the information needed in this part is not encoded in the group
# sublist! user inputs the two scan names, and we have a list of
# sub_ses (which needs to be doubled), with each scan paired to each
# half of this list (will need to ensure these scans exist for each
# selected derivative in the output directory later on)
for sub_ses_id in design_df["participant_session_id"]:
condition_ev_one.append(1)
condition_ev_two.append(1)
for sub_ses_id in design_df["participant_session_id"]:
condition_ev_one.append(-1)
condition_ev_two.append(0)
for sub_ses_id in design_df["participant_session_id"]:
condition_ev_one.append(0)
condition_ev_two.append(-1)
# NOTE: there is only one iteration of the sub_ses list in
# design_df["participant_id"] at this point! so use append
# (twice) triple that column:
design_df_double = design_df.append(design_df)
design_df = design_df_double.append(design_df)
group_config = {"sessions_list": [], "series_list": conditions}
else:
# TODO: msg
raise Exception
# let's check to make sure it came out right
# first third
for val in condition_ev_one[0:(len(condition_ev_one) / 3) - 1]:
if val != 1:
# TODO: msg
raise Exception
# second third
for val in condition_ev_one[(len(condition_ev_one) / 3):(len(condition_ev_one)/3)*2]:
if val != -1:
# TODO: msg
raise Exception
# third... third
for val in condition_ev_one[((len(condition_ev_one)/3)*2 + 1):]:
if val != 0:
# TODO: msg
raise Exception
# first third
for val in condition_ev_two[0:(len(condition_ev_two) / 3) - 1]:
if val != 1:
# TODO: msg
raise Exception
# second third
for val in condition_ev_two[(len(condition_ev_two) / 3):(len(condition_ev_two)/3)*2]:
if val != 0:
# TODO: msg
raise Exception
# third... third
for val in condition_ev_two[((len(condition_ev_two)/3)*2 + 1):]:
if val != -1:
# TODO: msg
raise Exception
# label the two covariate columns which encode the three conditions
column_one = "{0}_column_one".format(condition_type)
column_two = "{0}_column_two".format(condition_type)
design_df[column_one] = condition_ev_one
design_df[column_two] = condition_ev_two
# initalize the contrast dct's
contrast_one = {}
contrast_two = {}
contrast_three = {}
design_formula = "{0} + {1}".format(column_one, column_two)
# create the participant identity columns
for sub_ses_id in design_df["participant_session_id"]:
new_part_col = []
sub_id = sub_ses_id.split("_")[0]
new_part_label = "participant_{0}".format(sub_id)
for moving_sub_ses_id in design_df["participant_id"]:
moving_sub_id = moving_sub_ses_id.split("_")[0]
if moving_sub_id == sub_id:
new_part_col.append(1)
else:
new_part_col.append(0)
design_df[new_part_label] = new_part_col
contrast_one.update({new_part_label: 0})
contrast_two.update({new_part_label: 0})
contrast_three.update({new_part_label: 0})
if new_part_label not in design_formula:
design_formula = "{0} + {1}".format(design_formula,
new_part_label)
# finish the contrasts
# should be something like
# ses,ses,sub,sub,sub, etc.
# ses-1 - ses-2: 2, 1, 0, 0, 0...
# ses-1 - ses-3: 1, 2, 0, 0, 0...
# ses-2 - ses-3: -1, 1, 0, 0, 0, etc.
contrast_one.update({
"contrasts": "{0}_{1} - {2}_{3}".format(condition_type,
conditions[0],
condition_type,
conditions[1])})
contrast_two.update({
"contrasts": "{0}_{1} - {2}_{3}".format(condition_type,
conditions[0],
condition_type,
conditions[2])})
contrast_three.update({
"contrasts": "{0}_{1} - {2}_{3}".format(condition_type,
conditions[1],
condition_type,
conditions[2])})
contrast_one.update({column_one: 2, column_two: 1})
contrast_two.update({column_one: 1, column_two: 2})
contrast_three.update({column_one: -1, column_two: 1})
contrasts = [contrast_one, contrast_two, contrast_three]
contrasts_df = create_contrasts_template_df(design_df, contrasts)
# create design and contrasts matrix file paths
design_mat_path = os.path.join(output_dir, model_name,
"design_matrix_{0}.csv".format(model_name))
contrasts_mat_path = os.path.join(output_dir, model_name,
"contrasts_matrix_{0}.csv"
"".format(model_name))
# start group config yaml dictionary
group_config.update({"pheno_file": design_mat_path,
"ev_selections": {"demean": [],
"categorical": []},
"design_formula": design_formula,
"group_sep": "Off",
"grouping_var": None,
"custom_contrasts": contrasts_mat_path,
"model_name": model_name,
"output_dir": os.path.join(output_dir, model_name)})
return design_df, contrasts_df, group_config
def run(group_list_text_file, derivative_list, z_thresh, p_thresh,
preset=None, pheno_file=None, pheno_sub_label=None, output_dir=None,
model_name=None, covariate=None, condition_type=None, run=False):
# FSL FEAT presets: run regular group analysis with no changes to its
# original flow- use the generated pheno as the pheno, use the
# contrasts DF as a custom contrasts matrix, and auto-generate the
# group analysis config YAML as well
# NOTE: the input parameters above may come in as a dictionary instead
# or something
import os
import pandas as pd
import pkg_resources as p
# make life easy
keys_csv = p.resource_filename('CPAC', 'resources/cpac_outputs.csv')
try:
keys = pd.read_csv(keys_csv)
except Exception as e:
err = "\n[!] Could not access or read the cpac_outputs.csv " \
"resource file:\n{0}\n\nError details {1}\n".format(keys_csv, e)
raise Exception(err)
if derivative_list == 'all':
derivative_list = ['alff', 'falff', 'reho', 'sca_roi', 'sca_tempreg',
'vmhc', 'centrality', 'dr_tempreg']
if pheno_file and not pheno_sub_label:
# TODO: message
raise Exception("pheno file provided, but no pheno sub label")
if pheno_sub_label and not pheno_file:
# TODO: message
raise Exception("pheno sub label provided, but no pheno file")
if isinstance(group_list_text_file, list):
group_list = group_list_text_file
# write out a group analysis sublist text file so that it can be
# linked in the group analysis config yaml
group_list_text_file = os.path.join(output_dir, model_name,
"gpa_participant_list_"
"{0}.txt".format(model_name))
elif os.path.isfile(group_list_text_file):
group_list = read_group_list_text_file(group_list_text_file)
# write out a group analysis sublist text file so that it can be
# linked in the group analysis config yaml
group_list_text_file = os.path.join(output_dir, model_name,
"gpa_participant_list_"
"{0}.txt".format(model_name))
group_config = {"participant_list": group_list_text_file,
"participant_id_label": "participant_id",
"mean_mask": ["Group Mask"],
"custom_roi_mask": None,
"derivative_list": derivative_list,
"coding_scheme": ["Treatment"],
"z_threshold": [float(z_thresh)],
"p_threshold": [float(p_thresh)],
"contrasts": [],
"f_tests": []}
if not preset:
# TODO: this
pass
elif preset == "single_grp":
design_df, contrasts_df, group_config_update = \
preset_single_group_avg(group_list, pheno_df=None, covariate=None,
pheno_sub_label=None,
output_dir=output_dir,
model_name=model_name)
group_config.update(group_config_update)
elif preset == "single_grp_cov":
if not pheno_file:
# TODO: message
raise Exception("pheno file not provided")
if not covariate:
# TODO: message
raise Exception("covariate not provided")
pheno_df = read_pheno_csv_into_df(pheno_file, pheno_sub_label)
design_df, contrasts_df, group_config_update = \
preset_single_group_avg(group_list, pheno_df, covariate=covariate,
pheno_sub_label=pheno_sub_label,
output_dir=output_dir,
model_name=model_name)
group_config.update(group_config_update)
elif preset == "unpaired_two":
if not pheno_file:
# TODO: message
raise Exception("pheno file not provided")
if not covariate:
# TODO: message
raise Exception("the two groups were not provided")
# we're assuming covariate will be coming in as a string of either one
# covariate name, or a string with two covariates separated by a comma
# either way, it needs to be in list form in this case, not string
covariate = covariate.split(",")
pheno_df = read_pheno_csv_into_df(pheno_file, pheno_sub_label)
# in this case, "covariate" gets sent in as a list of two covariates
design_df, contrasts_df, group_config_update = \
preset_unpaired_two_group(group_list, pheno_df,
groups=covariate,
pheno_sub_label=pheno_sub_label,
output_dir=output_dir,
model_name=model_name)
group_config.update(group_config_update)
elif preset == "paired_two":
# run a two-sample paired T-test
# we need it as repeated measures- either session or scan
# and the list of subs
# also: the two session or scan names (in a list together), and
# whether they are sessions or scans
if not covariate:
# TODO: message
raise Exception("the two conditions were not provided")
if not condition_type:
# TODO: message
raise Exception("you didn't specify whether the two groups are "
"sessions or series/scans")
# we're assuming covariate (which in this case, is the two sessions,
# or two scans) will be coming in as a string of either one covariate
# name, or a string with two covariates separated by a comma
# either way, it needs to be in list form in this case, not string
covariate = covariate.split(",")
design_df, contrasts_df, group_config_update = \
preset_paired_two_group(group_list,
conditions=covariate,
condition_type=condition_type,
output_dir=output_dir,
model_name=model_name)
group_config.update(group_config_update)
elif preset == "tripled_two":
# run a "tripled" T-test
# we need it as repeated measures- either session or scan
# and the list of subs
# also: the two session or scan names (in a list together), and
# whether they are sessions or scans
if not covariate:
# TODO: message
raise Exception("the three conditions were not provided")
if not condition_type:
# TODO: message
raise Exception("you didn't specify whether the three groups are "
"sessions or series/scans")
# we're assuming covariate (which in this case, is the three sessions,
# or three scans) will be coming in as a string of either one
# covariate name, or a string with three covariates separated by a
# comma
# either way, it needs to be in list form in this case, not string
covariate = covariate.split(",")
design_df, contrasts_df, group_config_update = \
preset_tripled_two_group(group_list,
conditions=covariate,
condition_type=condition_type,
output_dir=output_dir,
model_name=model_name)
group_config.update(group_config_update)
else:
# TODO: not a real preset!
raise Exception("not one of the valid presets")
# write participant list text file
write_group_list_text_file(design_df["participant_session_id"],
group_list_text_file)
# write design matrix CSV
write_dataframe_to_csv(design_df, group_config["pheno_file"])
# write custom contrasts matrix CSV
write_dataframe_to_csv(contrasts_df, group_config["custom_contrasts"])
# write group-level analysis config YAML
out_config = os.path.join(output_dir, model_name,
"gpa_fsl_config_{0}.yml".format(model_name))
write_config_dct_to_yaml(group_config, out_config)
if run:
# TODO: we need to separate the individual-level pipeline config from
# TODO: the group-level one, it's too restrictive
pass
| StarcoderdataPython |
8093923 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.md') as readme_file:
readme = readme_file.read()
requirements = [
"wheel>=0.23.0",
"requests>=2.7.0",
"pandas>=0.16.2",
"docopt>=0.6.2",
"PyYAML>=3.11"
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='labkey_multisite_query_tool',
version='0.1.0',
description="Commandline tool for querying across mutltiple LabKey instances.",
long_description=readme,
author="<NAME>",
author_email='<EMAIL>',
url='https://github.com/OHSUCompBio/labkey_multisite_query_tool',
packages=[
'labkey_multisite_query_tool',
],
package_dir={'labkey_multisite_query_tool':
'labkey_multisite_query_tool'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='labkey_multisite_query_tool',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
scripts=['bin/labkey'],
test_suite='tests',
tests_require=test_requirements
)
| StarcoderdataPython |
1602409 | <gh_stars>0
import pyowm
owm = pyowm.OWM('797153f746aae22307499da4ad723468')
observation = owm.weather_at_place('Almere,nl')
w = observation.get_weather()
print(w)
wind = w.get_wind()
temp = w.get_temperature('celsius')
print(wind)
print(temp)
observation_list = owm.weather_around_coords(52.371353, 5.222124) | StarcoderdataPython |
8009147 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 23 16:31:18 2021
@author: ilmrd77
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
#Creating boxes which return some values between its +-2.0 original value
class box():
def __init__(self,q):
self.q = q
def value(self):
self.Q = np.random.uniform(-2,2) + self.q
return self.Q
columnNames = ["Box1","Box2","Box3","Box4"] # You can add number of columns
boxesDF = pd.DataFrame(index=np.arange(200) ,columns=columnNames) # Creating pandas DF
boxTrueValues = [8,3,9,6] #Boxes true values
boxes = [box(i) for i in boxTrueValues]
for line in range(0,200): # Fill the rows with random values
for i in range(0,4):
boxesDF.values[line,i] = boxes[i].value()
boxesDF.to_csv("FilePath/boxValues.csv")
boxesDF = pd.read_csv("FilePath/boxValues.csv")
boxesDF = boxesDF.iloc[:,1:] # Ignoring the index
time_step = 0
clicked = [0]*4
total_reward = 0
rewardList= []
rewarded = [0]*4
chosenList = []
operation = 200
boxesNum = 4
confidence_lvl = 2
for line in range(0,operation):
max_ucb = 0
for box in range(0,boxesNum):
if (clicked[box] > 0):
exploration = math.sqrt(((math.log(time_step))/clicked[box]))*confidence_lvl
exploitation = rewarded[box]/clicked[box]
ucb = (exploitation+exploration)
else :
ucb = 999999
if (ucb>max_ucb):
max_ucb = ucb
chosenBox = box
time_step+=1
reward = boxesDF.values[line,chosenBox]
total_reward+=reward
rewarded[chosenBox]+=reward
clicked[chosenBox]+=1
rewardList.append(reward)
chosenList.append(chosenBox)
plt.hist(chosenList) # Histogram
plt.show()
| StarcoderdataPython |
6512499 | from BioModelsDAG import *
from BioModelsDAG.utils import timeit, yield_model_paths, to_csv
def flatten_reaction_data(data):
"""
Flattens each (Child, Edge, Parent) 3-tuple from reactions_parser
into relevant information to write to a CSV file.
:param data: Generator of (Child, Edge, Parent) 3-tuples returned
from BioModelsDAG.pipeline.parsers.reactions_parser.
:rtype: generator
"""
for child, edge, parent in data:
# Omit color data
parent = [v for k, v in parent.items() if k != 'color']
child = [v for k, v in child.items() if k != 'color']
yield parent + child
@timeit
def main():
filepaths = yield_model_paths()
data = extract_data(filepaths, parser=ReactionsParser())
data = flatten_reaction_data(data)
headers = ("Model Name", "Provider", "URI", "Created",
"Reaction Name", "KEGG Identifiers", "Other Identifiers")
to_csv("../kegg_reactions.csv", data, headers=headers)
if __name__ == '__main__':
main()
| StarcoderdataPython |
204618 | <reponame>UCLA-SEAL/Sibyl
# NLP | Train
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
Trainer,
TrainingArguments,
TrainerCallback,
EarlyStoppingCallback
)
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from transformers.trainer_callback import TrainerControl
from datasets import load_dataset, load_metric, load_from_disk
import os
import sys
import argparse
import time
import random
import shutil
import torch
import pandas as pd
from torch.utils.data import DataLoader
from sibyl import *
parser = argparse.ArgumentParser(description='Sibyl Trainer')
parser.add_argument('--data-dir', type=str, default="/data1/fabricehc/prepared_datasets/",
help='path to data folders')
parser.add_argument('--save-dir', type=str, default="/data1/fabricehc/pretrained/",
help='path to data folders')
parser.add_argument('--num_epoch', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--train-batch-size', default=16, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--eval-batch-size', default=32, type=int, metavar='N',
help='eval batchsize')
parser.add_argument('--gpus', default='0,1,2,3', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--transformers_cache', default="../../data1/fabricehc/.cache", type=str,
help='location for for TRANSFORMERS_CACHE')
parser.add_argument('--datasets_cache', default="../../data1/fabricehc/.cache", type=str,
help='location for for HF_DATASETS_CACHE')
parser.add_argument('--num_runs', default=1, type=int, metavar='N',
help='number of times to repeat the training')
parser.add_argument('--transforms', nargs='+',
default=['ORIG', 'INV', 'SIB', 'INVSIB', 'Concept2Sentence',
'TextMix', 'Ada-TextMix', 'SentMix', 'Ada-SentMix',
'WordMix', 'Ada-WordMix', 'ConceptMix'],
type=str, help='transforms to apply from sibyl tool')
parser.add_argument('--tasks', nargs='+',
default=['amazon_polarity', 'yahoo_answers_topics',
'dbpedia_14', 'imdb', 'yelp_polarity', 'ag_news'],
type=str, help='tasks (datasets) to train upon, from huggingface')
parser.add_argument('--num-train-per-class', nargs='+', default=[10, 200, 2500], type=int,
help='number of training examples per class')
parser.add_argument('--num-valid-per-class', default=2000, type=int, metavar='N',
help='number of validation examples per class')
parser.add_argument('--num-outputs', default=30, type=int, metavar='N',
help='augmentation multiplier - number of new inputs per 1 original')
parser.add_argument('--models', nargs='+', default=['bert-base-uncased'],
type=str, help='pretrained huggingface models to train')
parser.add_argument('--save-file', type=str, default='NLP_training_pregen.csv',
help='name for the csv file to save with results')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
os.environ['TRANSFORMERS_CACHE'] = args.transformers_cache
os.environ['HF_DATASETS_CACHE'] = args.datasets_cache
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def tokenize_fn(batch):
if sentence2_key is None:
return tokenizer(batch[sentence1_key], padding=True, truncation=True, max_length=250)
return tokenizer(batch[sentence1_key], batch[sentence2_key], padding=True, truncation=True, max_length=250)
def sibyl_tokenize_fn(text1, text2):
if text2 is None:
return tokenizer(text1, padding=True, truncation=True, max_length=250, return_tensors='pt')
return tokenizer(text1, text2, padding=True, truncation=True, max_length=250, return_tensors='pt')
data_dir = args.data_dir
save_dir = args.save_dir
num_runs = args.num_runs
num_train_examples = args.num_train_per_class
num_valid_per_class = args.num_valid_per_class
MODEL_NAMES = args.models
ts = args.transforms
tasks = args.tasks
num_outputs = args.num_outputs
run_args = []
for run_num in range(num_runs):
for num_train_per_class in num_train_examples:
for t in ts:
for task in tasks:
for MODEL_NAME in MODEL_NAMES:
run_args.append({
"run_num":run_num,
"num_train_per_class":num_train_per_class,
"task":task,
"MODEL_NAME":MODEL_NAME,
"t":t
})
print(run_args)
results = []
save_file = args.save_file
if os.path.exists(save_file):
results.extend(pd.read_csv(save_file).to_dict("records"))
start_position = len(results)
else:
start_position = 0
print('starting at position {}'.format(start_position))
for run_arg in run_args[start_position:]:
run_num = run_arg['run_num']
num_train_per_class = run_arg['num_train_per_class']
task = run_arg['task']
MODEL_NAME = run_arg['MODEL_NAME']
t = run_arg['t']
print(pd.DataFrame([run_arg]))
task_to_keys = {
"ag_news": {"keys": ("text", None), "num_classes": 4, "task_type": "topic"},
"dbpedia_14": {"keys": ("text", None), "num_classes": 14, "task_type": "topic"},
"yahoo_answers_topics": {"keys": ("text", None), "num_classes": 10, "task_type": "topic"},
"imdb": {"keys": ("text", None), "num_classes": 2, "task_type": "sentiment"},
"amazon_polarity": {"keys": ("text", None), "num_classes": 2, "task_type": "sentiment"},
"yelp_polarity": {"keys": ("text", None), "num_classes": 2, "task_type": "sentiment"}
}
sentence1_key, sentence2_key = task_to_keys[task]["keys"]
num_classes = task_to_keys[task]["num_classes"]
task_type = task_to_keys[task]["task_type"]
#############################################################
## Model + Tokenizer ########################################
#############################################################
checkpoint = save_dir + MODEL_NAME + '-' + task + '-' + t + '-' + str(num_train_per_class)
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, num_labels=num_classes).to(device)
#############################################################
## Dataset Preparation ######################################
#############################################################
if "Ada-" in t:
train_data_path = os.path.join(data_dir, task, 'ORIG', task + '_train_' + str(num_train_per_class))
else:
train_data_path = os.path.join(data_dir, task, t, task + '_train_' + str(num_train_per_class))
valid_data_path = os.path.join(data_dir, task, 'ORIG', task + '_valid_' + str(num_valid_per_class))
train_dataset = load_from_disk(train_data_path).shuffle()
eval_dataset = load_from_disk(valid_data_path)
test_dataset = load_dataset(task, split='test')
# special handling for certain datasets
if task in ["dbpedia_14", "amazon_polarity"]:
test_dataset = test_dataset.rename_column("content", "text")
if task == "yahoo_answers_topics":
test_dataset = test_dataset.map(lambda example : {'text' : example['question_title'] + " " +
example['question_content'] + " " +
example['best_answer'],
'label': example['topic']})
print('Length of train_dataset', len(train_dataset))
print('Length of eval_dataset', len(eval_dataset))
multiplier = 1
keep_original = 0
if "Ada-" not in t and t != "ORIG":
multiplier = num_outputs
keep_original = num_classes * num_train_per_class
expected_len_train_dataset = (num_classes * num_train_per_class * multiplier) + keep_original
assert expected_len_train_dataset == len(train_dataset), "Unexpected number of training examples"
eval_dataset = eval_dataset.map(tokenize_fn, batched=True, batch_size=len(eval_dataset))
eval_dataset = eval_dataset.rename_column("label", "labels")
eval_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels'])
test_dataset = test_dataset.map(tokenize_fn, batched=True, batch_size=len(test_dataset))
test_dataset = test_dataset.rename_column("label", "labels")
test_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels'])
#############################################################
## Callbacks + Collator #####################################
#############################################################
callbacks = []
tmcb = None
escb = EarlyStoppingCallback(
early_stopping_patience=10
)
callbacks.append(escb)
transform = None
transform_prob = 0
num_sampled_INV = 0
num_sampled_SIB = 0
label_type = "hard"
keep_original = True
if t not in ["ORIG", "INV", "EDA", "AEDA", "Concept2Sentence"]:
label_type = "soft"
if t in ['Ada-TextMix', 'Ada-SentMix', 'Ada-WordMix', 'Ada-ConceptMix']:
transform_prob = 1.
transform = getattr(sys.modules[__name__], t.replace('Ada-', ""))
if hasattr(transform, 'uses_dataset'):
transform = transform(dataset=task)
else:
transform = transform()
tmcb = TargetedMixturesCallback(
dataloader=DataLoader(eval_dataset, batch_size=4),
device=device,
sentence1_key=sentence1_key,
sentence2_key=sentence2_key,
num_classes=num_classes
)
callbacks.append(tmcb)
collator = SibylCollator(
sentence1_key=sentence1_key,
sentence2_key=sentence2_key,
tokenize_fn=sibyl_tokenize_fn,
transform=transform,
num_sampled_INV=num_sampled_INV,
num_sampled_SIB=num_sampled_SIB,
dataset=task,
task_type=task_type,
tran_type=None,
label_type=None,
one_hot=label_type != "hard",
transform_prob=transform_prob,
target_pairs=[],
target_prob=0.5,
reduce_mixed=False,
num_classes=num_classes,
return_tensors='pt',
return_text=False,
keep_original=keep_original
)
#############################################################
## Trainer Setup ############################################
#############################################################
train_batch_size = args.train_batch_size
eval_batch_size = args.eval_batch_size
num_epoch = args.num_epoch
gradient_accumulation_steps = 1
max_steps = int((len(train_dataset) * num_epoch / gradient_accumulation_steps) / train_batch_size)
logging_steps = max_steps // num_epoch
training_args = TrainingArguments(
output_dir=checkpoint,
overwrite_output_dir=True,
max_steps=max_steps,
save_steps=logging_steps,
save_total_limit=1,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=eval_batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
warmup_steps=int(max_steps / 10),
weight_decay=0.01,
logging_dir='./logs',
logging_steps=logging_steps,
logging_first_step=True,
load_best_model_at_end=True,
metric_for_best_model="accuracy",
greater_is_better=True,
evaluation_strategy="steps",
remove_unused_columns=False
)
trainer = SibylTrainer(
model=model,
tokenizer=tokenizer,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
data_collator=collator,
callbacks=callbacks
)
start_time = time.time()
trainer.train()
run_time = time.time() - start_time
# test with ORIG data
trainer.eval_dataset = test_dataset
trainer.data_collator = DefaultCollator()
if tmcb:
trainer.remove_callback(tmcb)
out = trainer.evaluate()
out['run_num'] = run_num
out['num_train_per_class'] = num_train_per_class
out['task'] = task
out['transform'] = t
out['run'] = checkpoint
out['model_name'] = MODEL_NAME
out['transform'] = t
out['test'] = "ORIG"
out['run_time'] = run_time
print('ORIG for {}\n{}'.format(checkpoint, out))
results.append(out)
# save results
df = pd.DataFrame(results)
df.to_csv(save_file)
# move best model to save_dir
# shutil.copytree(checkpoint, save_dir)
# shutil.rmtree(checkpoint) | StarcoderdataPython |
150015 | #!/usr/bin/env python2.7
# coding=utf-8
'''
@date = '17/4/7'
@author = 'chenliang'
@email = '<EMAIL>'
'''
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from . import ui_fatermainwin
from . import editwidget
import os
import threading
from . import settingdlg
from . import statisticswidget
import common.camera.cameramanage
class FaterWin(QMainWindow,ui_fatermainwin.Ui_FaterMainWin):
def __init__(self):
super(FaterWin, self).__init__()
self.setupUi(self)
self._sta = statisticswidget.StatisticsWidget()
self.stadock.setWidget(self._sta)
self.actionOpen.triggered.connect(self._open_file_dialog)
self.actionSave_as.triggered.connect(self._saveas_test_file)
self.actionSave.triggered.connect(self._save_test_file)
self.editscrollarea.setWidgetResizable(True)
self.actionSetting.triggered.connect(self.openSettingDlg)
self.settingpage = settingdlg.SettingDlg()
self._widgetDict = {}
self._processList = []
self.add_subwidows()
self.actionTo_end.triggered.connect(self.startProcess)
self.addbtn.clicked.connect(self.addDefaultWideget)
self.runendbtn.clicked.connect(self.startProcess)
self._currentName = ''
self.stadock.close()
self.runDock.close()
# self.mdiview.setMouseTracking(True)
# self.mdiview.installEventFilter(self)
# self.initCamera()
def _open_file_dialog(self):
openfile = QFileDialog(self,'Open file','./testfile')
openfile.setFileMode(QFileDialog.DirectoryOnly)
openfile.exec_()
filedir = openfile.selectedFiles()
if filedir != '' and os.path.exists(filedir[0] + '/setting.tes'):
try:
settingfile = open(filedir[0] + '/setting.tes', 'r')
settingstr = settingfile.read()
self.settingpage.loadSetting(eval(settingstr))
settingfile.close()
except Exception:
print(Exception)
if filedir!='' and os.path.exists(filedir[0]+'/fater.tes'):
file = filedir[0].split('/')
self._currentName=file[-1]
configfile = open(filedir[0]+'/fater.tes', 'r')
try:
teststr = configfile.read()
self.delAllEdit()
editlist=eval(teststr)
for edit in editlist:
self.addEditWidget(None,edit[0],edit[1],edit[2])
print(editlist)
for edit in self._processList:
edit.loadTmplImg(filedir[0]+'/')
finally:
configfile.close()
else:
QMessageBox.warning(self,'FATER','There is no test file in this file!')
def _save_test_file(self):
savestr='['
for edit in self._processList:
savestr=savestr+edit.getParamStr()+',\n'
savestr=savestr[:-2]+']'
saveSetting=self.settingpage.getSetting()
if self._currentName !='':
isExists = os.path.exists('./testfile/'+self._currentName)
if not isExists:
print('file not exists')
self._saveas_test_file(False,self._currentName)
else:
savefilename='./testfile/'+self._currentName+'/fater.tes'
outfile = open(savefilename, 'w+')
outfile.write(savestr)
outfile.close()
saveSetname = './testfile/' + self._currentName + '/setting.tes'
outsetfile = open(saveSetname, 'w+')
outsetfile.write(saveSetting)
outsetfile.close()
for edit in self._processList:
edit.saveTmplImg('./testfile/'+self._currentName+'/')
QMessageBox.information(self,'FATER','Save file OK!')
else:
self._saveas_test_file()
def _saveas_test_file(self,trigger=False,filename=''):
savestr='['
for edit in self._processList:
savestr=savestr+edit.getParamStr()+',\n'
savestr=savestr[:-2]+']'
saveSetting = self.settingpage.getSetting()
if filename != '':
path='./testfile/'+filename+'/fater.tes'
else:
path = QFileDialog.getSaveFileName(self,'Save File',"./testfile", "Test Files(*.tes)")
if path[0] == '':
return False
isExists = os.path.exists(path[0])
if not isExists:
print(path[0] + 'creat ok')
os.makedirs(path[0])
savefilename = path[0] + '/fater.tes'
saveSetname = path[0] + '/setting.tes'
if savefilename != '':
outfile = open(savefilename, 'w')
outfile.write(savestr)
outfile.close()
outsetfile = open(saveSetname, 'w+')
outsetfile.write(saveSetting)
outsetfile.close()
for edit in self._processList:
edit.saveTmplImg(path[0]+'/')
QMessageBox.information(self, 'FATER', 'Save file OK!')
return True
else:
return False
def openSettingDlg(self):
# self.settingpage = settingdlg.SettingDlg()
self.settingpage.show()
def add_subwidows(self):
self.addEditWidget()
print('curdir',os.path.abspath(os.curdir))
def initCamera(self):
common.camera.cameramanage.CameraManage.initCamera()
def delAllEdit(self):
print('processlist',self._processList)
while len(self._processList)>0:
edit=self._processList[-1]
self.deleteEditWidget(edit)
print('processlist', self._processList)
def deleteEditWidget(self,widget):
print(self._widgetDict)
imageview=widget.getShowWidget()
if imageview in self._widgetDict:
self._widgetDict[imageview].hide()
del self._widgetDict[imageview]
if widget in self._processList:
self._processList.remove(widget)
self.editlayout1.removeWidget(widget)
widget.close()
self.refreshIdx()
def addDefaultWideget(self):
self.addEditWidget()
def addEditWidget(self,ahead=None,father='Source',name='Camera',param=''):
if ahead==None:
edit = editwidget.EditWidget(father,name,param)
self.editlayout1.addWidget(edit)
edit.showSignal.connect(self._show_widget)
edit.addSingal.connect(self.addEditWidget)
edit.deleteSingal.connect(self.deleteEditWidget)
self._processList.append(edit)
self.refreshIdx()
print(self._processList)
else:
edit = editwidget.EditWidget(father,name,param)
edit.showSignal.connect(self._show_widget)
edit.addSingal.connect(self.addEditWidget)
edit.deleteSingal.connect(self.deleteEditWidget)
idx = self._processList.index(ahead)+1
self._processList.insert(idx,edit)
self.editlayout1.insertWidget(idx,edit)
self.refreshIdx()
print(self._processList)
def startProcess(self):
#Todo peocess
if hasattr(self,'_processThread') and self._processThread.isAlive():
QMessageBox.warning(self, 'FATER', 'The process is running!')
else:
self._processThread = threading.Thread(target=self.run)
self._processThread.start()
def refreshIdx(self):
idx = 1
for edit in self._processList:
edit.setEditId(idx)
idx += 1
def run(self):
if self.initRun():
for i in self._processList:
i.run()
def initRun(self):
if len(self._processList)>0:
self._processList[0].initProcess()
return True
return False
def _show_widget(self,widget):
print(self._widgetDict)
# print 'iddd' ,id(widget)
if self._widgetDict.get(widget):
if self._widgetDict[widget].isHidden():
self._widgetDict[widget].setWindowTitle(widget.windowTitle())
self._widgetDict[widget].show()
else:
self._widgetDict[widget].hide()
else:
self._widgetDict[widget] = self.mdiview.addSubWindow(widget)
flags=Qt.CustomizeWindowHint | Qt.WindowMinMaxButtonsHint | Qt.WindowTitleHint
self._widgetDict[widget].setWindowFlags(flags)
self._widgetDict[widget].setWindowTitle(widget.windowTitle())
self._widgetDict[widget].setWindowIcon(widget.windowIcon())
self._widgetDict[widget].show()
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setStyle('Fusion')
faterwin = FaterWin()
faterwin.show()
app.exec_()
| StarcoderdataPython |
5171925 | <filename>tests/sketchingdev/image_to_ascii/test_converter.py
import unittest
import ntpath
import os
from os import path
from parameterized import parameterized
from sketchingdev.image_to_ascii.converter import format_image
def get_current_directory():
return os.path.dirname(os.path.realpath(__file__))
def resolve_data_path(filename):
data_dir = path.join(get_current_directory(), "data/")
return path.join(data_dir, filename)
def read_test_file(file_path):
full_path = resolve_data_path(file_path)
with open(full_path, "r") as f:
return f.read()
def custom_name_func(testcase_func, param_num, param):
ascii_size = param.args[0]
image_filename = ntpath.basename(param.args[1])
return "%s_%s_of_%s" % (
testcase_func.__name__,
"%sx%s" % (ascii_size[0], ascii_size[1]),
parameterized.to_safe_name(image_filename)
)
class TestConsoleDisplayWithImages(unittest.TestCase):
TEST_CASES = [
((10, 10),
resolve_data_path("24bit-without-transparency.png"),
resolve_data_path("24bit-without-transparency.png.10x10.txt")),
((10, 10),
resolve_data_path("32bit-with-transparency.png"),
resolve_data_path("32bit-with-transparency.png.txt")),
((12, 12),
resolve_data_path("24bit-without-transparency.png"),
resolve_data_path("24bit-without-transparency.png.12x12.txt")),
((10, 10),
resolve_data_path("rgb-with-transparency.png"),
resolve_data_path("rgb-with-transparency.png.10x10.txt")),
((20, 20),
resolve_data_path("16bit-with-transparency.png"),
resolve_data_path("16bit-with-transparency.png.20x20.txt")),
((1, 1),
resolve_data_path("16bit-with-transparency.png"),
resolve_data_path("16bit-with-transparency.png.1x1.txt")),
((10, 10),
resolve_data_path("rgb-with-transparency.gif"),
resolve_data_path("rgb-with-transparency.gif.10x10.txt")),
((10, 10),
resolve_data_path("rgb.jpg"),
resolve_data_path("rgb.jpg.10x10.txt")),
]
@parameterized.expand(TEST_CASES, testcase_func_name=custom_name_func)
def test_conversion(self, console_size, input_image, expected_ascii_output_file):
ascii_output = format_image(console_size, input_image)
expected_ascii_output = read_test_file(expected_ascii_output_file)
self.assertEqual(expected_ascii_output, ascii_output)
def generate_ascii_baseline_files(self):
for test in self.TEST_CASES:
image_path = test[1]
ascii_image_size = test[0]
ascii_image_path = test[2]
output = format_image(ascii_image_size, image_path)
with open(ascii_image_path, "w") as text_file:
text_file.write(output)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
4955923 | from math import floor
def calculate_fuel(mass: int) -> float:
"""
Calculate fuel based on the mass
Args:
mass (int): Mass to calculate fuel
Returns:
None
"""
fuel = floor(mass / 3) - 2
return fuel
def main():
with open('input.txt.txt', 'r') as file:
fuel = file.readlines()
fuel = list(map(int, fuel))
total_fuel = 0
while any(item > 0 for item in fuel):
fuel = [calculate_fuel(mass=mass) for mass in fuel]
fuel = list(filter(lambda x: x > 0, fuel))
total_fuel = total_fuel + sum(fuel)
print(total_fuel)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3565135 | <reponame>datosgobar/series-tiempo-ar-api
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2019-02-08 18:09
from __future__ import unicode_literals
from django.db import migrations
# No op mantenida para consistencia
def delete_all_but_last_periodicity(*_):
pass
def revert(*_):
pass
class Migration(migrations.Migration):
dependencies = [
('management', '0006_auto_20190208_1509'),
('contenttypes', '0001_initial'),
('django_datajsonar', '0020_auto_20190131_1213')
]
operations = [
migrations.RunPython(delete_all_but_last_periodicity, revert)
]
| StarcoderdataPython |
9681710 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provides methods for accessing the interface eAE API.
"""
import json
import stat
import os
import zipfile
import http.client as http
from subprocess import call
from uuid import uuid4
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, <NAME>"
__credits__ = []
__license__ = "Apache 2"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Dev"
__all__ = ['eAE']
class eAE(object):
def __init__(self, interface_ip, interface_port):
self.interface_ip = str(interface_ip)
self.interface_port = int(interface_port)
self.connection = http.HTTPSConnection(self.interface_ip, self.interface_port)
def __str__(self):
return "\rThe interface ip is set to: {0}\r The interface port is set to: {1}".format(self.interface_ip,
self.interface_port)
def _create_eae_zipfile(self, zip_file_name, main_file_path, data_files=None):
to_zip = []
if data_files is None:
data_files = []
# Handle main script
to_zip.append(main_file_path)
# Prepare the zip file
zip_path = "/tmp/" + zip_file_name
zipf = zipfile.ZipFile(zip_path, mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=True)
for f in to_zip:
zipf.write(f)
zipf.close()
# Handle other files & dirs
for f in data_files:
zipCommand = "zip -r -u -0 " + zip_path + " " + f
call([zipCommand], shell=True)
# Chmod 666 the zip file so it can be accessed
os.chmod(zip_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH)
return zip_path
def is_eae_alive(self):
"""Retrieve the status of the eAE"""
self.connection.request('GET', '/interfaceEAE/utilities/isAlive')
res = self.connection.getresponse()
return int(res.read())
def retrieve_clusters(self):
"""Retrieve the list of all available clusters"""
self.connection.request('GET', '/interfaceEAE/EAEManagement/retrieveClusters')
res = self.connection.getresponse()
str_response = res.read().decode('utf-8')
clusters = json.loads(str_response)
return clusters
def submit_jobs(self, parameters_set, cluster, computation_type, main_file, data_files, host_ip, ssh_port="22"):
"""Submit jobs to the eAE backend
This method is called when a specific task needs to be deployed on a cluster.
"""
uuid = uuid4()
zip_file_name = "{0}.zip".format(uuid)
configs = parameters_set
zip_file = self._create_eae_zipfile(zip_file_name, main_file, data_files)
data = {'id': str(uuid), 'host_ip': host_ip, 'ssh_port': ssh_port, 'zip': zip_file, 'configs': configs,
'cluster': cluster, 'clusterType': computation_type, 'mainScriptExport': main_file}
data_str = json.dumps(data)
self.connection.request('POST', '/interfaceEAE/OpenLava/submitJob', data_str)
res = self.connection.getresponse()
submit_sucess = res.read()
return submit_sucess
def test_methods():
# Setting up the connection to interface
ip = "interfaceeae.doc.ic.ac.uk"
port = 443
eae = eAE(ip, port)
# Testing if the interface is Alive
is_alive = eae.is_eae_alive()
print(is_alive)
# We retrieve the list of Clusters
clusters = eae.retrieve_clusters()
print(clusters)
# We submit a dummy job
parameters_set = "0\n 1\n 2\n"
cluster = "python_large"
computation_type = "Python"
main_file = "/PATH/TO/FILE/Demo.py"
data_files = ['']
host_ip = "X.X.X.X"
ssh_port = "22"
job = eae.submit_jobs(parameters_set, cluster, computation_type, main_file, data_files, host_ip, ssh_port)
print(job)
if __name__ == '__main__':
test_methods()
| StarcoderdataPython |
3216360 | <reponame>yeshess/cloudify-manager
#########
# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from .... import rest_utils, rest_decorators
from .. import base
class ARIAService(base.BaseARIAEndpoints):
@rest_decorators.exceptions_handled
def get(self, service_id, **kwargs):
"""
Get Service by id
"""
return self.model.service.get(service_id).to_dict()
def delete(self, service_id, force=False, **kwargs):
"""
delete Service by id
"""
service = self.model.service.get(service_id)
self.core.delete_service(service_id, force)
return service, 200
class ARIAServices(base.BaseARIAEndpoints):
@rest_decorators.create_filters()
def get(self,
_include=None,
filters=None, pagination=None, sort=None,
**kwargs):
"""
Get a Service list
"""
return self._respond_list(
self.model.service.list(
include=_include,
filters=filters,
pagination=pagination,
sort=sort,
**kwargs
)
)
@rest_decorators.exceptions_handled
def put(self, **kwargs):
"""
create a Service template
"""
request_dict = rest_utils.get_json_and_verify_params(
dict(
service_template_id={},
service_name={'optional': True, 'type': basestring},
inputs={'optional': True, 'type': dict}
)
)
service = self.core.create_service(
request_dict['service_template_id'],
inputs=request_dict.get('inputs', {}),
service_name=request_dict['service_name'])
return service.to_dict(
service.fields() - {'created_at', 'updated_at'}), \
201
| StarcoderdataPython |
3222968 | <reponame>Arfey/aiohttp_admin2<filename>tests/test_connection_injectors.py<gh_stars>10-100
from aiohttp_admin2.connection_injectors import ConnectionInjector
from aiohttp import web
async def test_connection_injector(aiohttp_client):
"""
In this test we check corrected work of ConnectionInjector:
1. success init in aiohttp context
2. success inject into some decorated class
"""
connection_injector = ConnectionInjector()
db_connection_string = "db_connection_string"
# 1. success init in aiohttp context
async def init_db(_):
connection_injector.init(db_connection_string)
yield
@connection_injector.inject
class TestController:
pass
application = web.Application()
application.cleanup_ctx.append(init_db)
await aiohttp_client(application)
assert isinstance(TestController.connection_injector, ConnectionInjector)
assert \
TestController.connection_injector.connection == db_connection_string
| StarcoderdataPython |
8108883 | import os
#import argparse
import torch
import mixed_precision
from stats import StatTracker
from datasets import Dataset, build_dataset, get_dataset, get_encoder_size
from model import Model
from checkpoint import Checkpointer
from task_self_supervised import train_self_supervised
from task_classifiers import train_classifiers
args = {'dataset': 'C10',
'batch_size': 200,
'learning_rate': 0.0002,
'seed': 1,
'amp': False,
'classifiers': False,
'decoder': False, # Nawid - Used to train the decoder
'ndf': 128,
'n_rkhs': 1024,
'tclip': 20.0,
'n_depth': 3,
'use_bn':0,
'output_dir' : '/runs',
'input_dir': '/mnt/imagenet',
'cpt_load_path': None,
'cpt_name' : 'amdim_cpt.pth',
'run_name' : 'default_run'
}
def main():
# create target output dir if it doesn't exist yet
if not os.path.isdir(args['output_dir']):
os.mkdir(args['output_dir'])
# enable mixed-precision computation if desired
if args['amp']:
mixed_precision.enable_mixed_precision()
# set the RNG seeds (probably more hidden elsewhere...)
torch.manual_seed(args['seed'])
torch.cuda.manual_seed(args['seed'])
# get the dataset
dataset = get_dataset(args['dataset'])
encoder_size = get_encoder_size(dataset)
# get a helper object for tensorboard logging
log_dir = os.path.join(args['output_dir'], args['run_name'])
stat_tracker = StatTracker(log_dir=log_dir)
# get dataloaders for training and testing
train_loader, test_loader, num_classes = \
build_dataset(dataset=dataset,
batch_size=args['batch_size'],
input_dir=args['input_dir'],
labeled_only=args['classifiers'])
torch_device = torch.device('cuda')
checkpointer = Checkpointer(args['output_dir'])
if args['cpt_load_path']:
model = checkpointer.restore_model_from_checkpoint(
args['cpt_load_path'],
training_classifier=args['classifiers'])
else:
# create new model with random parameters
model = Model(ndf=args['ndf'], n_classes=num_classes, n_rkhs=args['n_rkhs'],
tclip=args['tclip'], n_depth=args['n_depth'], encoder_size=encoder_size,
use_bn=(args['use_bn'] == 1))
model.init_weights(init_scale=1.0)
checkpointer.track_new_model(model)
model = model.to(torch_device)
# select which type of training to do
task = train_classifiers if args['classifiers'] else train_self_supervised
if args['classifiers']:
task = train_classifiers
elif args['decoder']:
task = train_decoder
else:
task = train_self_supervised
task(model, args['learning_rate'], dataset, train_loader,
test_loader, stat_tracker, checkpointer, args['output_dir'], torch_device)
'''
if __name__ == "__main__":
print(args)
main()
'''
| StarcoderdataPython |
313745 |
matrix = [
#0, 1, 2, 3, 4, 5, 6, 7], #0
[0, 1, 1, 0, 0, 0, 0, 0], #0
[1, 0, 1, 1, 0, 0, 0, 0], #1
[1, 1, 0, 0, 0, 0, 0, 0], #2
[0, 1, 0, 0, 1, 1, 0, 0], #3
[0, 0, 0, 1, 0, 0, 1, 0], #4
[0, 0, 0, 1, 0, 0, 1, 0], #5
[0, 0, 0, 0, 1, 1, 0, 1], #6
[0, 0, 0, 0, 0, 0, 1, 0], #7
]
def convert_to_adjacency():
adjacency = {}
for i, array in enumerate(matrix):
adjacency[i] = []
for j, v in enumerate(array):
if v == 1:
adjacency[i].append(j)
return adjacency
import pprint
output = convert_to_adjacency()
pprint(output) | StarcoderdataPython |
9712219 | <reponame>6RiverSystems/pure_pursuit
#!/usr/bin/env python
import rospy
from std_msgs.msg import String, Header
from nav_msgs.msg import Odometry, Path
from geometry_msgs.msg import Twist, PoseStamped
import math
import sys
import time
import tf
from srslib_framework.msg import MsgUpdateToteLights
def messageCreation(message, cmd, startColor, endColor, startSegment, endSegment, freq):
message.lightCmd = cmd
message.startColor.r = startColor[0]
message.startColor.g = startColor[1]
message.startColor.b = startColor[2]
message.startColor.a = startColor[3]
message.endColor.r = endColor[0]
message.endColor.g = endColor[1]
message.endColor.b = endColor[2]
message.endColor.a = endColor[3]
message.startSegment.x = startSegment[0]
message.startSegment.y = startSegment[1]
message.startSegment.z = startSegment[2]
message.endSegment.x = endSegment[0]
message.endSegment.y = endSegment[1]
message.endSegment.z = endSegment[2]
message.frequency = freq
class StraightLine:
VEL_COM_TOPIC = "/sensors/odometry/velocity/cmd"
MAP_POSE_TOPIC = "/map_pose"
GOAL_TOPIC = "/path_segment"
LIGHT_TOPIC = "/drivers/brainstem/cmd/update_tote_lights"
global rospy
def __init__(self, endPoint):
self.endPoint = endPoint
self.map_pose_sub = rospy.Subscriber(self.MAP_POSE_TOPIC, PoseStamped, self.mapPoseCallback)
#self.vel_sub = rospy.Subscriber(self.VEL_COM_TOPIC, Twist, self.velocityCmdCallback)
self.goal_pub = rospy.Publisher(self.GOAL_TOPIC, Path, queue_size=2)
self.light_pub = rospy.Publisher(self.LIGHT_TOPIC, MsgUpdateToteLights, queue_size=5)
self.timeChange = 1.#0
self.redTop = True
self.changeLights = 0
def velocityCmdCallback(self, msg):
if(rospy.get_time() - self.changeLights > self.timeChange):
if(self.redTop):
lightMsg1 = MsgUpdateToteLights()
lightMsg2 = MsgUpdateToteLights()
messageCreation(lightMsg1,1,[255,0,0,0],[255,0,0,0],[0,0,0],[26,1,0],1)
self.light_pub.publish(lightMsg1)
messageCreation(lightMsg2,1,[0,255,0,0],[0,255,0,0],[0,0,1],[26,1,1],1)
self.light_pub.publish(lightMsg2)
else:
lightMsg1 = MsgUpdateToteLights()
lightMsg2 = MsgUpdateToteLights()
messageCreation(lightMsg1,1,[0,255,0,0],[0,255,0,0],[0,0,0],[26,1,0],1)
self.light_pub.publish(lightMsg1)
messageCreation(lightMsg2,1,[255,0,0,0],[255,0,0,0],[0,0,1],[26,1,1],1)
self.light_pub.publish(lightMsg2)
self.redTop = not self.redTop
self.changeLights = rospy.get_time()
def mapPoseCallback(self, msg):
self.currentPose = msg.pose
def sendGoalFunc(self):
path = Path()
path.header.frame_id = 'map'
for i in range(99):
newPose = PoseStamped()
newPose.header.seq = i
newPose.header.frame_id = 'map'
newPose.pose.position.x = self.currentPose.position.x + i*(self.endPoint[0] - self.currentPose.position.x)/100.0
newPose.pose.position.y = self.currentPose.position.y + i*(self.endPoint[1] - self.currentPose.position.y)/100.0
newPose.pose.position.z = 0
newQuaternion = tf.transformations.quaternion_from_euler(0, 0, math.tan((self.endPoint[1] - self.currentPose.position.y)/( .0001 + self.endPoint[0] - self.currentPose.position.x)))
newPose.pose.orientation.x = 0
newPose.pose.orientation.y = 0
newPose.pose.orientation.z = newQuaternion[2]
newPose.pose.orientation.w = newQuaternion[3]
path.poses.append(newPose)
self.goal_pub.publish(path)
if __name__ == '__main__':
rospy.init_node('straight_line', anonymous=True)
endPoint = []
endPoint.append(13) # x
endPoint.append(42) # y
hs = StraightLine(endPoint)
time.sleep(1.0)
rate = rospy.Rate(20) # 10hz
rate.sleep()
print "Here we go"
hs.sendGoalFunc()
rospy.spin() | StarcoderdataPython |
276686 | <filename>rpmrepo/util.py
"""rpmrepo - utilities
A module with a wide array of different utility functions that extend the
standard library.
"""
# pylint: disable=invalid-name
import contextlib
import errno
import os
@contextlib.contextmanager
def suppress_oserror(*errnos):
"""Suppress OSError Exceptions
This is an extension to `contextlib.suppress()` from the python standard
library. It catches any `OSError` exceptions and suppresses them. However,
it only catches the exceptions that match the specified error numbers.
Parameters
----------
errnos
A list of error numbers to match on. If none are specified, this
function has no effect.
"""
try:
yield
except OSError as e:
if e.errno not in errnos:
raise e
@contextlib.contextmanager
def open_tmpfile(dirpath, mode=0o777):
"""Open O_TMPFILE and optionally link it
This opens a new temporary file in the specified directory. As part of the
context-manager, a dictionary is returned as the context object. An opened
stream to the temporary file is accessible as `ctx["stream"]`.
If the caller sets `ctx["name"]` to something else than `None`, the file
will be attempted to be linked as that name once the context is exited. If
`ctx["replace"]` is changed to `True`, a possible previous file is
replaced. If it is set to `False`, the operation fails if there is a
previous file with the same name.
Parameters
----------
dirpath
A path to a directory where to open the temporary file in.
mode
The file mode to use when opening the temporary file. Note that this is
subject to the OS `umask`.
"""
ctx = {"name": None, "replace": False, "stream": None}
dirfd = None
fd = None
try:
dirfd = os.open(dirpath, os.O_PATH | os.O_CLOEXEC)
fd = os.open(".", os.O_RDWR | os.O_TMPFILE | os.O_CLOEXEC, mode, dir_fd=dirfd)
with os.fdopen(fd, "rb+", closefd=False) as stream:
ctx["stream"] = stream
yield ctx
if ctx["name"] is not None:
if ctx["replace"]:
# We would want to call:
#
# os.replace(f"/proc/self/fd/{fd}", ctx["name"], dst_dir_fd=dirfd)
#
# ..but the underlying linux syscall `renameat2(2)` does not
# support `AT_SYMLINK_FOLLOW` nor `AT_EMPTY_PATH`, hence we
# cannot combine it with `O_TMPFILE`. We accept the race for
# now and wait for the kernel to provide the extended flags.
with suppress_oserror(errno.ENOENT):
os.unlink(ctx["name"], dir_fd=dirfd)
os.link(f"/proc/self/fd/{fd}", ctx["name"], dst_dir_fd=dirfd)
else:
os.link(f"/proc/self/fd/{fd}", ctx["name"], dst_dir_fd=dirfd)
finally:
if fd is not None:
os.close(fd)
if dirfd is not None:
os.close(dirfd)
| StarcoderdataPython |
11224853 | <reponame>bavodenys/KitesurfApp<filename>KiteSurfApp.py<gh_stars>0
from kivymd.app import MDApp
from kivy.lang import Builder
from kivy.core.window import Window
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.boxlayout import BoxLayout
from kivymd.uix.list import OneLineListItem
import SpeedMeter
from MeetnetVlaamseBanken import VlaamseMeetbank
from GenFunctions import *
# Meetnet Vlaamse Banken login:
f = open("login.txt", "r")
login = f.read().split('\n')
username = login[0]
password = login[1]
# Make object Vlaamse Meetbank
VlaamseMeetbank = VlaamseMeetbank(username, password)
# Resize window to smartphone format
Window.size = (300, 500)
Builder.load_string("""
<HomeScreen>:
name:'HomeScreen'
MDNavigationLayout:
ScreenManager:
Screen:
BoxLayout:
orientation: 'vertical'
MDToolbar:
title: 'Belgian Wind'
left_action_items: [["menu", lambda x: nav_drawer.set_state()]]
right_action_items: [["images/BelgianKitesurfLogo.png", lambda x:app.go_home()]]
elevation:8
ScrollView:
MDList:
id:'locations_id'
MDNavigationDrawer:
id:nav_drawer
ContentNavigationDrawer:
orientation: 'vertical'
padding: "8dp"
spacing: "8dp"
MDLabel:
text:'Menu'
size_hint_y: None
height: self.texture_size[1]
ScrollView:
MDList:
OneLineIconListItem:
text:'Home'
on_release: root.manager.current = 'HomeScreen'
IconLeftWidget:
icon: 'home'
OneLineIconListItem:
text:'About'
on_release: root.manager.current = 'AboutScreen'
IconLeftWidget:
icon:'information-outline'
OneLineIconListItem:
text:'Exit'
on_release: app.exit()
IconLeftWidget:
icon:'logout'
<DataScreen>:
name:'DataScreen'
MDNavigationLayout:
ScreenManager:
Screen:
BoxLayout:
orientation: 'vertical'
MDToolbar:
title: 'Belgian Wind'
left_action_items: [["menu", lambda x: nav_drawer.set_state()]]
right_action_items: [["images/BelgianKitesurfLogo.png", lambda x: app.go_home()]]
elevation:8
MDLabel:
id:location_id
text: 'DataScreen'
halign: 'center'
padding_x: 5
padding_y: 5
SpeedMeter:
id: windspeed_id
max: 50
tick: 200
start_angle: -90
end_angle: 90
subtick: 100
label: 'Kts'
value: 20
cadran_color: '#ffffff'
needle_color: '#000000'
sectors: (0, '#6699cc', 3, '#87cefa', 6, '#87CEEB', 10, '#0d98ba', 16, '#00FF00', 21, '#FFA500', 27, '#cb4154', 33, '#AE0700', 40, '#800080', 47, '#8F00FF', 50)
sector_width: 10
MDLabel:
id:windspeed_id_old
text: 'Windspeed: 10 kt'
halign: 'center'
padding_x: 5
padding_y: 5
SpeedMeter:
id: winddirection_id
max: 360
tick: 500
start_angle: 0
end_angle: 0
subtick: 400
label: ''
value: 20
sectors: (0, '#00ff00', 50, '#ff0000', 230, '#00ff00' ,360)
sector_width: 10
display_first: False
cadran_color: '#ffffff'
needle_color: '#000000'
MDLabel:
id:winddirection_id_old
text: 'Winddirection: NNW'
halign: 'center'
padding_x: 5
padding_y: 5
MDNavigationDrawer:
id:nav_drawer
ContentNavigationDrawer:
orientation: 'vertical'
padding: "8dp"
spacing: "8dp"
MDLabel:
text:'Menu'
size_hint_y: None
height: self.texture_size[1]
ScrollView:
MDList:
OneLineIconListItem:
text:'Home'
on_release: root.manager.current = 'HomeScreen'
IconLeftWidget:
icon: 'home'
OneLineIconListItem:
text:'About'
on_release: root.manager.current = 'AboutScreen'
IconLeftWidget:
icon:'information-outline'
OneLineIconListItem:
text:'Exit'
on_release: app.exit()
IconLeftWidget:
icon:'logout'
<AboutScreen>:
name:'AboutScreen'
MDNavigationLayout:
ScreenManager:
Screen:
BoxLayout:
orientation: 'vertical'
MDToolbar:
title: 'Belgian Wind'
left_action_items: [["menu", lambda x: nav_drawer.set_state()]]
right_action_items: [["images/BelgianKitesurfLogo.png", lambda x:app.go_home()]]
elevation:8
MDLabel:
text: "This is an application created by a belgian kitesurfer for kitersurfers in Belgium"
halign: "center"
MDNavigationDrawer:
id:nav_drawer
ContentNavigationDrawer:
orientation: 'vertical'
padding: "8dp"
spacing: "8dp"
MDLabel:
text:'Menu'
size_hint_y: None
height: self.texture_size[1]
ScrollView:
MDList:
OneLineIconListItem:
text:'Home'
on_release: root.manager.current = 'HomeScreen'
IconLeftWidget:
icon: 'home'
OneLineIconListItem:
text:'About'
on_release: root.manager.current = 'AboutScreen'
IconLeftWidget:
icon:'information-outline'
OneLineIconListItem:
text:'Exit'
on_release: app.exit()
IconLeftWidget:
icon:'logout'
""")
# Home screen
class HomeScreen(Screen):
pass
# Data screen
class DataScreen(Screen):
pass
# About screen
class AboutScreen(Screen):
pass
Locations = {'Blankenberge': {'WindMagnitude':"BL7WVC",'WindDirection':"BL7WRS"}, \
'Nieuwpoort': {'WindMagnitude':"NP7WVC",'WindDirection':"NP7WRS"}, \
'Oostende': {'WindMagnitude':"OMPWVC",'WindDirection':"OMPWRS"}, \
'Zeebrugge': {'WindMagnitude':"ZDIWVC",'WindDirection':"ZDIWRS"}}
class DemoApp(MDApp):
def build(self):
self.sm = ScreenManager()
self.sm.add_widget(HomeScreen(name='HomeScreen'))
self.sm.add_widget(DataScreen(name='DataScreen'))
self.sm.add_widget(AboutScreen(name='AboutScreen'))
self.theme_cls.primary_palette = "Green"
# For loop over all locations with wind information and add item
for location in sorted(Locations):
self.sm.screens[0].ids["'locations_id'"].add_widget(OneLineListItem(text=location, on_release=self.select_location))
return self.sm
class ContentNavigationDrawer(BoxLayout):
pass
def navigation_draw(self):
pass
# Go to home screen
def go_home(self):
self.sm.current = 'HomeScreen'
# Go to about screen
def go_about(self):
self.sm.current = 'AboutScreen'
# Exit the application
def exit(self):
self.stop()
# Selection of location
def select_location(self, obj):
# Set DataScreen
self.sm.current = 'DataScreen'
Data = VlaamseMeetbank.getDataLastXhours(2, [Locations[obj.text]['WindDirection'], Locations[obj.text]['WindMagnitude']])
WindDirectionTab = []
WindSpeedTab = []
for Entry in Data['Values']:
if Entry['ID'].find("WRS") != -1:
for sample in Entry['Values']:
WindDirectionTab.append(sample['Value'])
if Entry['ID'].find("WVC") != -1:
for sample in Entry['Values']:
WindSpeedTab.append(sample['Value'])
# Wind direction processing
a1, a2, a3, a4, a5 = ProcessWindDirection(WindDirectionTab)
# Wind speed processing
b1, b2, b3, b4, b5 = ProcessWindSpeed(WindSpeedTab, 1)
self.sm.current_screen.ids['location_id'].text = obj.text
self.sm.current_screen.ids['windspeed_id'].value = round(b2)
self.sm.current_screen.ids['windspeed_id_old'].text = f"Windspeed: {round(b2)} kt"
self.sm.current_screen.ids['winddirection_id'].value = round(a2)
self.sm.current_screen.ids['winddirection_id_old'].text = f"Winddirection: {convert_to_winddirection(a2)}"
if __name__ == "__main__":
DemoApp().run()
| StarcoderdataPython |
3368841 | # Distributed DL Client runs on the master node
# @author: <NAME>
# @created date: 2021-06-28
# @last modified date:
# @note:
from ddlf.cluster import *
code1 = '''
def f(self):
self.data['total'] = 1000
setattr(Worker, 'ff', f)
self.ff()
'''
code2 = '''
self.data['price'] = 1000000
'''
async def g(self, a, b):
self.data['unit_price'] = 10000 + a - b
return self.data['unit_price']
async def main():
cluster = Cluster()
await cluster.connect()
await cluster.run_code(code1)
await cluster.run_code(code2)
res = await cluster.run_method(g, a=4000, b=2000 )
print(f"result: {res}")
res = await cluster.show_data()
print(f"result: {res}")
# await cluster.clean()
await cluster.close()
asyncio.run(main())
| StarcoderdataPython |
1772929 | """test_mainmodel.py - tests the mainmodel module
<NAME> (TRI/Austin, Inc.)
"""
__author__ = '<NAME>'
import models.mainmodel as model
import models.dataio as dataio
import models.abstractplugin as abstractplugin
import models.config as config
import models.ultrasonicgate as ultrasonicgate
import controllers.pathfinder as pathfinder
from utils.skiptest import skipIfModuleNotInstalled
import h5py
import numpy as np
import logging
import multiprocessing
import os
import random
import shutil
import sys
import tempfile
import unittest
def deleted_user_path():
"""Utility function to delete empty folders in the user data folders,
used to verify that MainModel will recreate missing folders as required.
Returns a list of folders successfully deleted or None if no folders
were deleted."""
data_folders = [pathfinder.user_path(), pathfinder.data_path(), pathfinder.thumbnails_path(),
pathfinder.plugins_path(), pathfinder.podmodels_path(), pathfinder.adamodels_path(),
pathfinder.colormaps_path()]
deleted_folders = []
for folder in data_folders:
exists_and_empty = os.path.exists(folder) and os.listdir(folder) == []
if exists_and_empty:
try:
os.rmdir(folder)
deleted_folders.append(folder)
except WindowsError: # folder in use (Explorer, cmd, etc.)
pass
if deleted_folders:
return deleted_folders
return None
# Define a mock plugin to inspect results of calling plugin classes
class MockPlugin(abstractplugin.TRIPlugin):
"""Mock NDIToolbox plugin used to check plugin_wrapper"""
def __init__(self, **kwargs):
abstractplugin.TRIPlugin.__init__(self, **kwargs)
self.config = {'a': 'b'}
self._data = {'kwargs': kwargs}
@property
def data(self):
return self._data
@data.setter
def data(self, new_data):
self._data['data'] = new_data
def run(self):
self._data['config'] = self.config
# A MockPlugin that raises an Exception on execution
class ExceptionPlugin(MockPlugin):
"""Raises an Exception on run() - used to verify
exception Queue messaging"""
def run(self):
raise Exception("Wuh-oh.")
class TestMainModel(unittest.TestCase):
"""Tests the main model"""
def setUp(self):
self.sample_data = np.array(self.random_data())
self.sample_data_basename = "sample.dat"
self.sample_data_file = os.path.join(os.path.dirname(__file__),
self.sample_data_basename)
with h5py.File(self.sample_data_file, 'w') as fidout:
fidout.create_dataset(self.sample_data_basename, data=self.sample_data)
self.mock_controller = ""
self.model = model.MainModel(self.mock_controller)
cfg = config.Configure(pathfinder.config_path())
self.original_loglevel = cfg.get_app_option("log level")
def random_data(self):
"""Returns a list of random data"""
return [random.uniform(-100, 100) for i in range(25)]
@unittest.skipIf(deleted_user_path() is None,
"User data folders in use")
def test_check_user_path(self):
"""Verify main model creates the user data folders if not
already in existence."""
self.check_user_path()
def check_user_path(self):
"""Verify user data folders were created"""
data_folders = [pathfinder.user_path(), pathfinder.data_path(),
pathfinder.thumbnails_path(), pathfinder.gates_path(),
pathfinder.plugins_path(), pathfinder.podmodels_path(),
pathfinder.adamodels_path(), pathfinder.colormaps_path(),
pathfinder.batchoutput_path()]
self.model.check_user_path()
for folder in data_folders:
self.assertTrue(os.path.exists(folder))
def test_copy_system_files(self):
"""Verify main model copies dynamic modules to the specified
folder."""
test_module_folder = os.path.dirname(__file__)
test_modules = []
temp_dest_folder = tempfile.mkdtemp()
module_files = os.listdir(test_module_folder)
for module_file in module_files:
module_name, module_extension = os.path.splitext(module_file)
if module_name.startswith("test_") and\
module_extension == os.extsep + "py":
test_modules.append(module_file)
self.model.copy_system_files(test_module_folder, temp_dest_folder)
for module_file in test_modules:
dest_module = os.path.join(temp_dest_folder, module_file)
self.assertTrue(os.path.exists(dest_module))
try:
shutil.rmtree(temp_dest_folder)
except WindowsError: # folder in use (Windows)
pass
def test_copy_system_plugins(self):
"""Verify main model copies system plugins to the user's
plugins folder."""
self.copy_system_plugins()
def test_copy_system_gates(self):
"""Verify main model copies system ultrasonic gate plugins to the user's
gates folder."""
self.copy_system_gates()
def test_copy_system_colormaps(self):
"""Verify main model copies colormaps to the user's colormaps folder."""
self.copy_system_colormaps()
def copy_system_plugins(self):
"""Verify system plugins are copied to the user's plugins folder"""
# Sample of system plugins to install
system_plugins = ['medfilter_plugin.py', 'normalize_plugin.py', '__init__.py']
self.remove_system_files(system_plugins, pathfinder.plugins_path())
self.model.copy_system_plugins()
for plugin in system_plugins:
installed_plugin = os.path.join(pathfinder.plugins_path(), plugin)
self.assertTrue(os.path.exists(installed_plugin))
def copy_system_gates(self):
"""Verify system ultrasonic gate plugins are copied to user's
gates folder"""
gate_plugins = ['predefined_gates.py', 'additional_gates.py', '__init__.py']
self.remove_system_files(gate_plugins, pathfinder.gates_path())
self.model.copy_system_gates()
for gate in gate_plugins:
installed_gate = os.path.join(pathfinder.gates_path(), gate)
self.assertTrue(os.path.exists(installed_gate))
def copy_system_colormaps(self):
"""Verify system colormaps are copied to user's folder"""
colormaps_folder = os.path.join(pathfinder.app_path(), 'colormaps')
colormaps = os.listdir(colormaps_folder)
self.remove_system_files(colormaps, pathfinder.colormaps_path())
self.model.copy_system_colormaps()
for cmap in colormaps:
installed_cmap = os.path.join(pathfinder.colormaps_path(), cmap)
self.assertTrue(os.path.exists(installed_cmap))
def remove_system_files(self, file_list, dest):
"""Attempts to remove every file in file_list found in dest folder.
Used to verify copying system files to user's local data folder."""
for each_file in file_list:
dest_path = os.path.join(dest, each_file)
if os.path.exists(dest_path):
try:
os.remove(dest_path)
except WindowsError: # file in use (Windows)
pass
def test_migrate_user_path(self):
"""Verify migration of the user's data folder"""
current_user_path = pathfinder.user_path()
temp_user_path = tempfile.mkdtemp()
self.model.migrate_user_path(temp_user_path)
self.check_user_path()
self.copy_system_plugins()
self.model.migrate_user_path(current_user_path)
try:
shutil.rmtree(temp_user_path)
except WindowsError: # folder in use
pass
def test_load_dynamic_modules(self):
"""Verify the main model's dynamic module loading"""
plugin_list = model.load_dynamic_modules(pathfinder.plugins_path(), abstractplugin.AbstractPlugin)
for plugin in plugin_list:
plugin_instance = plugin[1]
self.assertTrue(issubclass(plugin_instance, abstractplugin.AbstractPlugin))
def test_load_plugins(self):
"""Verify the main model loads available plugins"""
plugin_list = model.load_plugins()
for plugin in plugin_list:
plugin_instance = plugin[1]
self.assertTrue(issubclass(plugin_instance, abstractplugin.AbstractPlugin))
def test_load_gates(self):
"""Verify the main model loads available gates"""
gate_list = model.load_gates()
for gate in gate_list:
gate_instance = gate[1]
self.assertTrue(issubclass(gate_instance, ultrasonicgate.UltrasonicGate))
def test_plugin_wrapper(self):
"""Verify the plugin_wrapper function properly configures and runs a plugin"""
plugin_queue = multiprocessing.Queue()
plugin_exception_queue = multiprocessing.Queue()
plugin_data = np.array(self.random_data())
plugin_cfg = {'a': 'c'}
kwargs = {'name': 'Mock Plugin', 'description': 'Mock plugin used to test plugin_wrapper'}
model.plugin_wrapper(plugin_exception_queue, MockPlugin, plugin_data, plugin_queue, plugin_cfg,
**kwargs)
returned_data = plugin_queue.get()
self.assertTrue(isinstance(returned_data, dict))
self.assertDictEqual(returned_data['config'], plugin_cfg)
self.assertDictEqual(returned_data['kwargs'], kwargs)
self.assertTrue(np.array_equal(returned_data['data'], plugin_data))
def test_plugin_wrapper_exceptions(self):
"""Verify the plugin_wrapper function properly returns Exception info"""
plugin_queue = multiprocessing.Queue()
plugin_exception_queue = multiprocessing.Queue()
plugin_data = np.array(self.random_data())
model.plugin_wrapper(exception_queue=plugin_exception_queue,
plugin_cls=ExceptionPlugin,
plugin_data=plugin_data,
plugin_queue=plugin_queue)
exc_type, exc = plugin_exception_queue.get(block=True)
self.assertTrue(isinstance(exc, Exception))
@skipIfModuleNotInstalled("tcunittest")
def test_run_plugin(self):
"""Verify the main model can run a loaded plugin"""
plugin_data = np.array(self.random_data())
plugin_config = {'pi': 3.141592654}
plugin_cls = self.get_normalize_plugin()
plugin_process, plugin_queue, exception_queue = model.run_plugin(plugin_cls,
data=plugin_data, config=plugin_config)
self.assertTrue(isinstance(plugin_process, multiprocessing.Process))
returned_data = plugin_queue.get()
expected_data = plugin_data / np.max(plugin_data)
self.assertTrue(np.array_equal(expected_data, returned_data))
@skipIfModuleNotInstalled("tcunittest")
def test_run_plugin_exceptions(self):
"""Verify run_plugin returns exception messages in Queue"""
plugin_data = np.zeros(5) # Use division by zero exception in NormalizePlugin
plugin_config = {'pi': 3.141592654}
plugin_cls = self.get_normalize_plugin()
plugin_process, plugin_queue, exception_queue = model.run_plugin(plugin_cls,
data=plugin_data, config=plugin_config)
exc_type, exc = exception_queue.get(block=True)
self.assertTrue(isinstance(exc, Exception))
def get_normalize_plugin(self):
"""Returns NDIToolbox's NormalizePlugin plugin"""
normalize_plugin_name = "NormalizePlugin"
plugin_list = model.load_plugins()
plugin_names = [plugin[0] for plugin in plugin_list]
plugin_classes = [plugin[1] for plugin in plugin_list]
# Ensure that the normalize plugin was found
self.assertTrue(normalize_plugin_name in plugin_names)
return plugin_classes[plugin_names.index(normalize_plugin_name)]
def test_get_config(self):
"""Verify returning the application's configuration"""
expected_configuration = config.Configure(pathfinder.config_path()).config
expected_configuration.read(pathfinder.config_path())
returned_configuration = model.get_config().config
returned_configuration.read(pathfinder.config_path())
for section in expected_configuration.sections():
self.assertListEqual(expected_configuration.items(section), returned_configuration.items(section))
def test_copy_data(self):
"""Verify copying of sample data file to data folder"""
self.model.copy_data(self.sample_data_file)
copied_data_file = os.path.join(pathfinder.data_path(),
self.sample_data_basename)
self.assertTrue(os.path.exists(copied_data_file))
os.remove(copied_data_file)
def test_remove_data(self):
"""Verify removal of a data file from the data folder"""
self.model.copy_data(self.sample_data_file)
copied_data_file = os.path.join(pathfinder.data_path(),
self.sample_data_basename)
self.assertTrue(os.path.exists(copied_data_file))
self.model.remove_data(copied_data_file)
self.assertFalse(os.path.exists(copied_data_file))
def test_remove_thumbs(self):
"""Verify remove_thumbs method deletes all files in the thumbnail
folder"""
shutil.copy(__file__, pathfinder.thumbnails_path())
self.assertTrue(len(os.listdir(pathfinder.thumbnails_path())) > 0)
self.model.remove_thumbs()
self.assertListEqual(os.listdir(pathfinder.thumbnails_path()), [])
def test_get_preview_state(self):
"""Verify returning the current setting for displaying plot thumbnails"""
cfg = config.Configure(pathfinder.config_path())
preview_state = cfg.get_app_option_boolean("Enable Preview")
self.assertEqual(preview_state, self.model.get_preview_state())
def test_set_preview_state(self):
"""Verify setting the current setting for displaying plot thumbnails"""
cfg = config.Configure(pathfinder.config_path())
original_preview_state = cfg.get_app_option_boolean("Enable Preview")
new_preview_state = not original_preview_state
self.assertEqual(original_preview_state, self.model.get_preview_state())
self.model.set_preview_state(new_preview_state)
self.assertEqual(new_preview_state, self.model.get_preview_state())
self.model.set_preview_state(original_preview_state)
def test_get_coords(self):
"""Verify returning the UL corner of the main app window set in config"""
cfg = config.Configure(pathfinder.config_path())
str_coords = cfg.get_app_option_list("Coordinates")
expected_coords = (0, 0)
if str_coords is not None:
expected_coords = [int(coord) for coord in str_coords]
self.assertListEqual(expected_coords, self.model.get_coords())
def test_set_coords(self):
"""Verify setting the UL corner of the main app window in config"""
cfg = config.Configure(pathfinder.config_path())
str_coords = cfg.get_app_option_list("Coordinates")
original_coords = (0, 0)
if str_coords is not None:
original_coords = [int(coord) for coord in str_coords]
self.assertListEqual(original_coords, self.model.get_coords())
new_coords_int = [3, 5]
self.model.set_coords(new_coords_int)
self.assertListEqual(new_coords_int, self.model.get_coords())
new_coords_str = ["9", "-1"]
self.model.set_coords(new_coords_str)
self.assertListEqual([int(coord) for coord in new_coords_str], self.model.get_coords())
self.model.set_coords(original_coords)
def test_get_size(self):
"""Verify returning the size of the main app window set in config"""
cfg = config.Configure(pathfinder.config_path())
str_win_size = cfg.get_app_option_list("Window Size")
expected_win_size = [300, 600]
if str_win_size is not None:
expected_win_size = [int(dimsize) for dimsize in str_win_size]
self.assertListEqual(expected_win_size, self.model.get_window_size())
def test_set_size(self):
"""Verify setting the size of the main app window in config"""
cfg = config.Configure(pathfinder.config_path())
str_win_size = cfg.get_app_option_list("Window Size")
original_win_size = [300, 600]
if str_win_size is not None:
original_win_size = [int(dimsize) for dimsize in str_win_size]
self.assertListEqual(original_win_size, self.model.get_window_size())
new_win_size = [800, 1024]
self.model.set_window_size(new_win_size)
self.assertListEqual(new_win_size, self.model.get_window_size())
self.model.set_window_size(original_win_size)
def test_get_loglevel(self):
"""Verify returning the log level from config"""
cfg = config.Configure(pathfinder.config_path())
log_level = cfg.get_app_option("log level")
available_log_levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
log_level = available_log_levels.get(log_level, logging.WARNING)
self.assertEqual(log_level, model.get_loglevel())
def test_set_loglevel(self):
"""Verify setting the log level in config"""
cfg = config.Configure(pathfinder.config_path())
log_levels = ['debug', 'info', 'warning', 'error', 'critical', None, 'abc']
acceptable_log_levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
for level in log_levels:
model.set_loglevel(level)
if level in acceptable_log_levels:
self.assertEqual(acceptable_log_levels[level], model.get_loglevel())
def test_get_loglevels(self):
"""Verify returning a list of available log levels"""
available_log_levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
self.assertDictEqual(available_log_levels, model.available_log_levels)
def test_get_logger(self):
"""Verify returning a logger instance"""
logger = model.get_logger(__name__)
self.assertTrue(isinstance(logger, logging.Logger))
expected_logger = logging.getLogger(name='.'.join(['nditoolbox', __name__]))
self.assertEqual(expected_logger.name, logger.name)
acceptable_log_levels = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]
for level in acceptable_log_levels:
self.assertEqual(expected_logger.isEnabledFor(level), logger.isEnabledFor(level))
def test_clear_log(self):
"""Verify deleting the log file"""
log_file = pathfinder.log_path()
if os.path.exists(log_file):
try:
model.clear_log()
self.assertFalse(os.path.exists(log_file))
except WindowsError: # file in use (Windows)
pass
def test_get_windows_version(self):
"""Verify get_windows_version function returns correct version
information."""
if sys.platform == 'win32':
win_ver = sys.getwindowsversion()
major, minor = model.get_windows_version()
self.assertEqual(win_ver.major, major)
self.assertEqual(win_ver.minor, minor)
else:
self.assertIsNone(model.get_windows_version())
def get_win_ver(self):
"""Returns the major, minor version of the Windows OS"""
if sys.platform == 'win32':
win_ver = sys.getwindowsversion()
return win_ver.major, win_ver.minor
return None
def test_iswin7(self):
"""Verify is_win7 function returns True if running on Windows 7."""
is_windows7 = False
if sys.platform == 'win32':
major, minor = self.get_win_ver()
is_windows7 = major == 6 and minor == 1
self.assertEqual(is_windows7, model.is_win7())
def test_iswinvista(self):
"""Verify is_winvista function returns True if running on Windows Vista."""
is_winvista = False
if sys.platform == 'win32':
major, minor = self.get_win_ver()
is_winvista = major == 6 and minor == 0
self.assertEqual(is_winvista, model.is_winvista())
def test_iswinxp(self):
"""Verify is_winxp function returns True if running on Windows XP."""
is_winxp = False
if sys.platform == 'win32':
major, minor = self.get_win_ver()
is_winxp = major == 5 and minor == 1
self.assertEqual(is_winxp, model.is_winxp())
def test_iswinxp64(self):
"""Verify is_winxp64 function returns True if running on Windows XP x64."""
is_winxp64 = False
if sys.platform == 'win32':
major, minor = self.get_win_ver()
is_winxp64 = major == 5 and minor == 2
self.assertEqual(is_winxp64, model.is_winxp64())
def test_iswin2k(self):
"""Verify is_winxp function returns True if running on Windows 2000."""
is_win2k = False
if sys.platform == 'win32':
major, minor = self.get_win_ver()
is_win2k = major == 5 and minor == 0
self.assertEqual(is_win2k, model.is_win2k())
def test_get_data_info(self):
"""Verify get_data_info returns info about a data file"""
file_size = int(os.stat(self.sample_data_file).st_size)
data = dataio.get_data(self.sample_data_file)
data_info = self.model.get_data_info(self.sample_data_file)
ndim = data.ndim
shape = data.shape
numpoints = data.size
dtype = str(data.dtype)
self.assertEqual(file_size, data_info['filesize'])
self.assertEqual(ndim, data_info['ndim'])
self.assertEqual(shape, data_info['shape'])
self.assertEqual(numpoints, data_info['numpoints'])
self.assertEqual(dtype, data_info['dtype'])
def tearDown(self):
try:
if os.path.exists(self.sample_data_file + ".hdf5"):
os.remove(self.sample_data_file + ".hdf5")
if os.path.exists(self.sample_data_file):
os.remove(self.sample_data_file)
except WindowsError: # file in use
pass
model.set_loglevel(self.original_loglevel)
if __name__ == "__main__":
multiprocessing.freeze_support()
random.seed()
unittest.main()
| StarcoderdataPython |
12818712 | # SPDX-License-Identifier: MIT
# Copyright (C) 2018-present iced project and contributors
# ⚠️This file was generated by GENERATOR!🦹♂️
# pylint: disable=invalid-name
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
"""
Operand, register and memory access
"""
import typing
if typing.TYPE_CHECKING:
from ._iced_x86_py import OpAccess
else:
OpAccess = int
NONE: OpAccess = 0 # type: ignore
"""
Nothing is read and nothing is written
"""
READ: OpAccess = 1 # type: ignore
"""
The value is read
"""
COND_READ: OpAccess = 2 # type: ignore
"""
The value is sometimes read and sometimes not
"""
WRITE: OpAccess = 3 # type: ignore
"""
The value is completely overwritten
"""
COND_WRITE: OpAccess = 4 # type: ignore
"""
Conditional write, sometimes it's written and sometimes it's not modified
"""
READ_WRITE: OpAccess = 5 # type: ignore
"""
The value is read and written
"""
READ_COND_WRITE: OpAccess = 6 # type: ignore
"""
The value is read and sometimes written
"""
NO_MEM_ACCESS: OpAccess = 7 # type: ignore
"""
The memory operand doesn't refer to memory (eg. ``LEA`` instruction) or it's an instruction that doesn't read the data to a register or doesn't write to the memory location, it just prefetches/invalidates it, eg. ``INVLPG``, ``PREFETCHNTA``, ``VGATHERPF0DPS``, etc. Some of those instructions still check if the code can access the memory location.
"""
| StarcoderdataPython |
245535 | import argparse
import math
import os
import torch
import torch.nn.functional as F
import torchvision
from torch import optim
from tqdm import tqdm
from src.models.stylegan import Generator
def conv_warper(layer, input, style, noise):
"""[summary]
Args:
layer (nn.Module): StyleConv
input ([type]): [description]
style ([type]): [description]
noise ([type]): [description]
"""
conv = layer.conv
batch, in_channel, height, width = input.shape
style = style.view(batch, 1, in_channel, 1, 1)
weight = conv.scale * conv.weight * style
if conv.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
weight = weight * demod.view(batch, conv.out_channel, 1, 1, 1)
weight = weight.view(
batch * conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
)
if conv.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(
batch, conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
)
weight = weight.transpose(1, 2).reshape(
batch * in_channel, conv.out_channel, conv.kernel_size, conv.kernel_size
)
out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
out = conv.blur(out)
elif conv.downsample:
input = conv.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=conv.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
out = layer.noise(out, noise=noise)
out = layer.activate(out)
return out
def encoder(G, noise):
styles = [noise]
style_space = []
styles = [G.style(s) for s in styles]
noise = [getattr(G.noises, f'noise_{i}') for i in range(G.num_layers)]
inject_index = G.n_latent
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
style_space.append(G.conv1.conv.modulation(latent[:, 0]))
i = 1
for conv1, conv2 in zip(G.convs[::2], G.convs[1::2]):
style_space.append(conv1.conv.modulation(latent[:, i]))
style_space.append(conv2.conv.modulation(latent[:, i + 1]))
i += 2
return style_space, latent, noise
def decoder(G, style_space, latent, noise):
out = G.input(latent)
out = conv_warper(G.conv1, out, style_space[0], noise[0])
skip, _ = G.to_rgb1(out, latent[:, 1])
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
):
out = conv_warper(conv1, out, style_space[i], noise=noise1)
out = conv_warper(conv2, out, style_space[i+1], noise=noise2)
skip, _ = to_rgb(out, latent[:, i + 2], skip)
i += 2
image = skip
return image
def main():
index = [0,1,1,2,2,3,4,4,5,6,6,7,8,8,9,10,10,11,12,12,13,14,14,15,16,16]
LOAD_PATH = '../checkpoint/stylegan2-ffhq-config-f.pt'
model = Generator(
size=1024,
style_dim=512,
n_mlp=8,
channel_multiplier=2
)
model.load_state_dict(torch.load(LOAD_PATH)['g_ema'])
model.eval()
model.cuda()
test_input = torch.randn(1,512).cuda()
output, _ = model([test_input], return_latents=False)
torchvision.utils.save_image(output.detach().cpu(),
os.path.join("./results/stylespace_origin.jpg"),
normalize=True,
scale_each=True,
range=(-1, 1))
style_space, latent, noise = encoder(model, test_input)
style_space[index[9]][:, 409] += 10
image = decoder(model, style_space, latent, noise)
torchvision.utils.save_image(image.detach().cpu(),
os.path.join("./results/stylespace_eye.jpg"),
normalize=True,
scale_each=True,
range=(-1, 1))
style_space, latent, noise = encoder(model, test_input)
style_space[index[12]][:, 330] -= 50
image = decoder(model, style_space, latent, noise)
torchvision.utils.save_image(image.detach().cpu(),
os.path.join("./results/stylespace_hair.jpg"),
normalize=True,
scale_each=True,
range=(-1, 1))
style_space, latent, noise = encoder(model, test_input)
style_space[index[6]][:, 259] -= 20
image = decoder(model, style_space, latent, noise)
torchvision.utils.save_image(image.detach().cpu(),
os.path.join("./results/stylespace_mouth.jpg"),
normalize=True,
scale_each=True,
range=(-1, 1))
style_space, latent, noise = encoder(model, test_input)
style_space[index[15]][:, 45] -= 3
image = decoder(model, style_space, latent, noise)
torchvision.utils.save_image(image.detach().cpu(),
os.path.join("./results/stylespace_lip.jpg"),
normalize=True,
scale_each=True,
range=(-1, 1))
if __name__ == "__main__":
main()
| StarcoderdataPython |
11277541 | #!/usr/bin/env/python3
# -*- coding:utf-8 -*-
"""
@project: apiAutoTest
@author: zy7y
@file: run.py
@ide: PyCharm
@time: 2020/12/16
@github: https://github.com/zy7y
@site: https://cnblogs.com/zy7y
@desc: 运行文件
"""
import os
import shutil
from test.conftest import pytest
from tools import logger
from tools.read_file import ReadFile
file_path = ReadFile.read_config('$.file_path')
def run():
if os.path.exists('report/'):
shutil.rmtree(path='report/')
logger.add(file_path['log'], enqueue=True, encoding='utf-8')
logger.info("""
_ _ _ _____ _
__ _ _ __ (_) / \\ _ _| |_ __|_ _|__ ___| |_
/ _` | '_ \\| | / _ \\| | | | __/ _ \\| |/ _ \\/ __| __|
| (_| | |_) | |/ ___ \\ |_| | || (_) | | __/\\__ \\ |_
\\__,_| .__/|_/_/ \\_\\__,_|\\__\\___/|_|\\___||___/\\__|
|_|
Starting ... ... ...
""")
pytest.main(
args=[
'test/test_api.py',
f'--alluredir={file_path["report"]}/data'])
# 自动以服务形式打开报告
# os.system(f'allure serve {report}/data')
# 本地生成报告
os.system(
f'allure generate {file_path["report"]}/data -o {file_path["report"]}/html --clean')
logger.success('报告已生成')
if __name__ == '__main__':
run()
| StarcoderdataPython |
224082 | <reponame>abought/zorp
import pytest
from zorp import parsers
##
# Sample preconfigured parsers that work with the data in these test suites
# These were defined at a time before optional columns were introduced, so in practice many of our tests have the
# same layout and order of fields
@pytest.fixture(scope='module')
def standard_gwas_parser():
return parsers.GenericGwasLineParser(chrom_col=1, pos_col=2, ref_col=3, alt_col=4,
pvalue_col=5, is_neg_log_pvalue=True,
beta_col=6, stderr_beta_col=7,
allele_freq_col=8,
is_alt_effect=True,
delimiter='\t')
@pytest.fixture(scope='module')
def standard_gwas_parser_basic():
return parsers.GenericGwasLineParser(chrom_col=1, pos_col=2, ref_col=3, alt_col=4,
pvalue_col=5, is_neg_log_pvalue=True,
delimiter='\t')
| StarcoderdataPython |
3357078 | #!/usr/bin/env python
# encoding: utf-8
"""Azkaban CLI: a lightweight command line interface for Azkaban.
Usage:
azkaban build [-cp PROJECT] [-a ALIAS | -u URL | [-r] ZIP] [-o OPTION ...]
azkaban info [-p PROJECT] [-f | -o OPTION ... | [-i] JOB ...]
azkaban log [-a ALIAS | -u URL] EXECUTION [JOB]
azkaban run [-bkp PROJECT] [-a ALIAS | -u URL] [-e EMAIL ...]
[-o OPTION ...] FLOW [JOB ...]
azkaban schedule [-bkp PROJECT] [-a ALIAS | -u URL] [-e EMAIL ...]
[-o OPTION ...] [-s SPAN] (-d DATE) (-t TIME)
FLOW [JOB ...]
azkaban upload [-cp PROJECT] [-a ALIAS | -u URL] ZIP
azkaban -h | --help | -l | --log | -v | --version
Commmands:
build* Build project and upload to Azkaban or save
locally the resulting archive.
info* View information about jobs or files.
log View workflow or job execution logs.
run Run jobs or workflows. If no job is specified,
the entire workflow will be executed.
schedule Schedule a workflow to be run at a specified
date and time.
upload Upload archive to Azkaban server.
Arguments:
EXECUTION Execution ID.
JOB Job name.
FLOW Workflow name. Recall that in the Azkaban world
this is simply a job without children.
ZIP For `upload` command, the path to an existing
project zip archive. For `build`, the path
where the output archive will be built. If it
points to a directory, the archive will be
named after the project name (and version, if
present) and created in said directory.
Options:
-a ALIAS --alias=ALIAS Alias to saved URL and username. Will also try
to reuse session IDs for later connections.
-b --bounce Skip execution if workflow is already running.
-c --create Create the project if it does not exist.
-d DATE --date=DATE Date used for first run of a schedule. It must
be in the format `MM/DD/YYYY`.
-e EMAIL --email=EMAIL Email address to be notified when the workflow
finishes (can be specified multiple times).
-f --files List project files instead of jobs. The first
column is the local path of the file, the
second the path of the file in the archive.
-h --help Show this message and exit.
-i --include-properties Include project properties with job options.
-k --kill Kill worfklow on first job failure.
-l --log Show path to current log file and exit.
-o OPTION --option=OPTION Azkaban properties. Can either be the path to
a properties file or a single parameter
formatted as `key=value`, e.g. `-o
user.to.proxy=foo`. For the `build` and `run`
commands, these will be added to the project's
or run's properties respectively (potentially
overriding existing ones). For the `info`
command, this will cause only jobs with these
exact parameters to be displayed.
-p PROJECT --project=PROJECT Azkaban project. Can either be a project name
or a path to a python module/package defining
an `azkaban.Project` instance. Commands which
are followed by an asterisk will only work in
the latter case. If multiple projects are
registered, you can disambiguate as follows:
`--project=module:project_name`.
-r --replace Overwrite any existing file.
-s SPAN --span=SPAN Period to repeat the scheduled flow. Must be
in format `1d`, a combination of magnitude and
unit of repetition. If not specified, the flow
will be run only once.
-t TIME --time=TIME Time when a schedule should be run. Must be of
the format `hh,mm,(AM|PM),(PDT|UTC|..)`.
-u URL --url=URL Azkaban endpoint (with protocol, and optionally
a username): '[user@]protocol:endpoint'. E.g.
'http://azkaban.server'. The username defaults
to the current user, as determined by `whoami`.
If you often use the same url, consider using
the `--alias` option instead.
-v --version Show version and exit.
Azkaban CLI returns with exit code 1 if an error occurred and 0 otherwise.
"""
from azkaban import __version__, CLI_ARGS
from azkaban.project import Project
from azkaban.remote import Execution, Session
from azkaban.util import (AzkabanError, Config, catch, flatten, human_readable,
temppath, read_properties, write_properties)
from docopt import docopt
from requests.exceptions import HTTPError
import logging as lg
import os
import os.path as osp
import sys
_logger = lg.getLogger(__name__)
def _forward(args, names):
"""Forward subset of arguments from initial dictionary.
:param args: Dictionary of parsed arguments (output of `docopt.docopt`).
:param names: List of names that will be included.
"""
names = set(names)
return dict(
('_%s' % (k.lower().lstrip('-').replace('-', '_'), ), v)
for (k, v) in args.items() if k in names
)
def _parse_option(_option):
"""Parse `--option` argument.
:param _option: `--option` argument.
Returns a dictionary.
"""
paths = (opt for opt in _option if not '=' in opt)
opts = read_properties(*paths)
try:
opts.update(dict(s.split('=', 1) for s in _option if '=' in s))
except ValueError:
raise AzkabanError('Invalid `--option` flag.')
return opts
def _parse_project(_project, require_project=False):
"""Parse `--project` argument into `(name, project)`.
:param _project: `--project` argument.
:param require_project: Fail if we fail to load the project.
Note that `name` is guaranteed to be non-`None` (this function will throw an
exception otherwise) but `project` can be.
The rules are as follows:
+ If at least one `':'` is found in `_project` then the rightmost one is
interpreted as delimitor between the path to the module and the project
name.
+ Else:
+ We first try to interpret `_project` as a module path and find a unique
project inside.
+ If the above attempt raises an `ImportError`, we interpret it as a name.
"""
default_module = Config().get_option('azkaban', 'project', 'jobs')
projects = {}
_project = _project or default_module
if ':' in _project:
# unambiguous case
path, name = _project.rsplit(':', 1)
try:
projects = Project.load(path or default_module)
# adding the default here lets options like `-p :name` work as intended
except ImportError:
pass
else:
# the option could be a name or module
try:
# try first as a module
projects = Project.load(_project)
except ImportError:
# if that fails, try as a name: load the default module and look there
name = _project
try:
projects = Project.load(default_module)
except ImportError:
pass
else:
name = None
if name:
if name in projects:
return name, projects[name]
elif projects:
# harder consistency requirement
raise AzkabanError(
'Project %r not found. Available projects: %s\n'
'You can also specify another location using the `--project` option.'
% (name, ', '.join(projects))
)
elif require_project:
raise AzkabanError(
'This command requires a project configuration module.\n'
'You can specify another location using the `--project` option.'
)
else:
return name, None
else:
if not projects:
raise AzkabanError(
'No registered project found in %r.\n'
'You can also specify another location using the `--project` option.'
% (_project, )
)
elif len(projects) > 1:
raise AzkabanError(
'Multiple registered projects found: %s\n'
'You can use the `--project` option to disambiguate.'
% (', '.join(projects), )
)
else:
return projects.popitem()
def _get_project_name(_project):
"""Return project name.
:param _project: `--project` argument.
"""
return _parse_project(_project)[0]
def _load_project(_project):
"""Resolve project from CLI argument.
:param _project: `--project` argument.
"""
try:
name, project = _parse_project(_project, require_project=True)
except ImportError:
raise AzkabanError(
'This command requires a project configuration module which was not '
'found.\nYou can specify another location using the `--project` option.'
)
else:
return project
def _upload_callback(cur_bytes, tot_bytes, file_index, _stdout=sys.stdout):
"""Callback for streaming upload.
:param cur_bytes: Total bytes uploaded so far.
:param tot_bytes: Total bytes to be uploaded.
:param file_index: (0-based) index of the file currently uploaded.
:param _stdout: Performance caching.
"""
if cur_bytes != tot_bytes:
_stdout.write(
'Uploading project: %.1f%%\r'
% (100. * cur_bytes / tot_bytes, )
)
else:
_stdout.write('Validating project... \r')
_stdout.flush()
def view_info(project, _files, _option, _job, _include_properties):
"""List jobs in project."""
if _job:
if _include_properties:
write_properties(
flatten(project.properties),
header='project.properties'
)
for name in _job:
project.jobs[name].build(header='%s.job' % (name, ))
elif _files:
for path, archive_path in sorted(project.files):
sys.stdout.write('%s\t%s\n' % (osp.relpath(path), archive_path))
else:
options = _parse_option(_option).items()
jobs = sorted(project.jobs.items())
dependencies = set(
dep
for _, job in jobs
for dep in job.options.get('dependencies', '').split(',')
)
for name, job in jobs:
if all(job.options.get(k) == v for k, v in options):
sys.stdout.write(
'%s\t%s\n'
% ('J' if name in dependencies else 'F', name, )
)
def view_log(_execution, _job, _url, _alias):
"""View workflow or job execution logs."""
session = Session(_url, _alias)
exc = Execution(session, _execution)
logs = exc.job_logs(_job[0]) if _job else exc.logs()
try:
for line in logs:
sys.stdout.write('%s\n' % (line.encode('utf-8'), ))
except HTTPError:
# Azkaban responds with 500 if the execution or job isn't found
if _job:
raise AzkabanError(
'Execution %s and/or job %s not found.', _execution, _job
)
else:
raise AzkabanError('Execution %s not found.', _execution)
def run_workflow(project_name, _flow, _job, _url, _alias, _bounce, _kill,
_email, _option):
"""Run workflow."""
session = Session(_url, _alias)
res = session.run_workflow(
name=project_name,
flow=_flow,
jobs=_job,
concurrent=not _bounce,
on_failure='cancel' if _kill else 'finish',
emails=_email,
properties=_parse_option(_option),
)
exec_id = res['execid']
job_names = ', jobs: %s' % (', '.join(_job), ) if _job else ''
sys.stdout.write(
'Flow %s successfully submitted (execution id: %s%s).\n'
'Details at %s/executor?execid=%s\n'
% (_flow, exec_id, job_names, session.url, exec_id)
)
def schedule_workflow(project_name, _date, _time, _span, _flow, _job, _url,
_alias, _bounce, _kill, _email, _option):
"""Schedule workflow."""
session = Session(_url, _alias)
session.schedule_workflow(
name=project_name,
flow=_flow,
date=_date,
time=_time,
period=_span,
jobs=_job,
concurrent=not _bounce,
on_failure='cancel' if _kill else 'finish',
emails=_email,
properties=_parse_option(_option),
)
sys.stdout.write(
'Flow %s scheduled successfully.\n' % (_flow, )
)
def upload_project(project_name, _zip, _url, _alias, _create):
"""Upload project."""
session = Session(_url, _alias)
while True:
try:
res = session.upload_project(
name=project_name,
path=_zip,
callback=_upload_callback
)
except AzkabanError as err:
if _create:
session.create_project(project_name, project_name)
else:
raise err
else:
break
sys.stdout.write(
'Project %s successfully uploaded (id: %s, size: %s, version: %s).\n'
'Details at %s/manager?project=%s\n'
% (
project_name,
res['projectId'],
human_readable(osp.getsize(_zip)),
res['version'],
session.url,
project_name,
)
)
def build_project(project, _zip, _url, _alias, _replace, _create, _option):
"""Build project."""
if _option:
project.properties = flatten(project.properties)
# to make sure we properly override nested options, we flatten first
project.properties.update(_parse_option(_option))
if _zip:
if osp.isdir(_zip):
_zip = osp.join(_zip, '%s.zip' % (project.versioned_name, ))
project.build(_zip, overwrite=_replace)
sys.stdout.write(
'Project %s successfully built and saved as %r (size: %s).\n'
% (project, _zip, human_readable(osp.getsize(_zip)))
)
else:
with temppath() as _zip:
project.build(_zip)
archive_name = '%s.zip' % (project.versioned_name, )
session = Session(_url, _alias)
while True:
try:
res = session.upload_project(
name=project.name,
path=_zip,
archive_name=archive_name,
callback=_upload_callback
)
except AzkabanError as err:
if _create and str(err).endswith("doesn't exist."):
session.create_project(project.name, project.name)
else:
raise err
else:
break
sys.stdout.write(
'Project %s successfully built and uploaded '
'(id: %s, size: %s, upload: %s).\n'
'Details at %s/manager?project=%s\n'
% (
project,
res['projectId'],
human_readable(osp.getsize(_zip)),
res['version'],
session.url,
project,
)
)
@catch(AzkabanError)
def main(argv=None):
"""Entry point."""
# enable general logging
logger = lg.getLogger()
logger.setLevel(lg.DEBUG)
handler = Config().get_file_handler('azkaban')
if handler:
logger.addHandler(handler)
# parse arguments
argv = argv or sys.argv[1:]
_logger.debug('Running command %r from %r.', ' '.join(argv), os.getcwd())
args = docopt(__doc__, version=__version__)
CLI_ARGS.update(args)
# do things
if args['--log']:
if handler:
sys.stdout.write('%s\n' % (handler.baseFilename, ))
else:
raise AzkabanError('No log file active.')
elif args['build']:
build_project(
_load_project(args['--project']),
**_forward(
args,
['ZIP', '--url', '--alias', '--replace', '--create', '--option']
)
)
elif args['log']:
view_log(
**_forward(args, ['EXECUTION', 'JOB', '--url', '--alias'])
)
elif args['info']:
view_info(
_load_project(args['--project']),
**_forward(args, ['--files', '--option', 'JOB', '--include-properties'])
)
elif args['run']:
run_workflow(
_get_project_name(args['--project']),
**_forward(
args,
[
'FLOW', 'JOB', '--bounce', '--url', '--alias', '--kill', '--email',
'--option',
]
)
)
elif args['schedule']:
schedule_workflow(
_get_project_name(args['--project']),
**_forward(
args,
[
'FLOW', 'JOB', '--bounce', '--url', '--alias', '--kill',
'--email', '--option', '--date', '--time', '--span'
]
)
)
elif args['upload']:
upload_project(
_get_project_name(args['--project']),
**_forward(args, ['ZIP', '--create', '--url', '--alias'])
)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3401641 | <filename>Project Euler Qusetions 51 - 60/Project Euler Question 57.py<gh_stars>1-10
#Project Euler Question 57
#Square root convergents
fraction_list = 0
x = 3
y = 2
for num in range(1, 1000):
xy = x + y
x = xy + y
y = xy
if len(str(x)) > len(str(y)):
fraction_list += 1
print (fraction_list) | StarcoderdataPython |
3405926 | <filename>Oplevering-2/geslachtsverandering.py
# klopt niet
def vertaal(word, vertaling):
new_word = ""
if word.isupper():
for k,v in vertalingen.items():
if k == word:
new_word += v.upper()
else:
new_word += k.upper()
else:
for k,v in vertalingen.items():
if k == word:
new_word += v
else:
new_word += k
return new_word
def geslachtsverandering(zin, vertaling):
new_zin = []
for k, v in vertalingen.items():
for word in zin:
if word == k:
new_zin.apppend(v.capitalize())
else:
new_zin.append(k.capitalize())
if word == v:
new_zin.append(k)
else:
new_zin.append(v)
return ''.join([x for x in new_zin])
def geslachtsherstel(zin, vertaling):
new_zin = []
for k, v in vertalingen.items():
for word in zin:
if word == k:
new_zin.apppend(k.capitalize())
else:
new_zin.append(v.capitalize())
if word == v:
new_zin.append(v)
else:
new_zin.append(k)
return ''.join([x for x in new_zin])
if __name__ == "__main__":
vertalingen = {'hij': 'zij', 'broer':'zus'}
print(vertaal('hij', vertalingen))
print(vertaal('HIJ', vertalingen))
print(vertaal('Hij', vertalingen))
print(vertaal('broer', vertalingen))
print(vertaal('mijn', vertalingen))
print(geslachtsverandering('Hij is mijn broer.', vertalingen))
print(geslachtsherstel('Zij is mijn zus.', vertalingen))
| StarcoderdataPython |
148346 | #!/usr/bin/python3
''' Summary: Script to process images and update the database '''
import datetime
import os
import sqlite3
from pathlib import Path
from sqlite3 import Error
import cv2
import numpy as np
from numpy import array
from shapely.geometry import Polygon, asPoint
import mrcnn.config
from mrcnn.model import MaskRCNN
DB_PATH = os.path.join(os.path.dirname(__file__), 'db/park.db')
try:
# Open database connection
conn = sqlite3.connect(DB_PATH)
conn.row_factory = sqlite3.Row
# prepare a cursor object using cursor() method
cursor = conn.cursor()
except Error as ex:
print("Error in connection: {}".format(ex))
exit()
# Configuration that will be used by the Mask-RCNN library
class MaskRCNNConfig(mrcnn.config.Config):
NAME = "coco_pretrained_model_config"
IMAGES_PER_GPU = 1
GPU_COUNT = 1
NUM_CLASSES = 1 + 80 # COCO dataset has 80 classes + one background class
DETECTION_MIN_CONFIDENCE = 0.5
# Filter a list of Mask R-CNN detection results to get only the detected cars / trucks
def get_car_boxes(boxes, class_ids):
car_boxes = []
for i, box in enumerate(boxes):
# If the detected object isn't a car / truck, skip it
if class_ids[i] in [3, 8, 6]:
car_boxes.append(box)
return np.array(car_boxes)
def query_database(sql):
with conn:
# execute SQL query using execute() method.
cursor.execute(sql)
# Commit on CREATE, INSERT, UPDATE, and DELETE
if sql.lower().startswith("select") == False:
conn.commit
return cursor
# Root directory of the project
ROOT_DIR = Path(__file__).resolve().parent
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
print("Missing the mask_rcnn_coco.h5 dataset! Downloading now...")
mrcnn.utils.download_trained_weights(COCO_MODEL_PATH)
# Get database version
db_vers = query_database("SELECT sqlite_version();").fetchone()
print("Connected. Database version: {}".format(db_vers[0]))
# Get source data
source = query_database("SELECT * FROM Source").fetchall()
if len(source) == 0:
print("No feeds found! Exiting now...")
exit()
else:
# Create a Mask-RCNN model in inference mode
model = MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=MaskRCNNConfig())
# Load pre-trained model
model.load_weights(COCO_MODEL_PATH, by_name=True)
# Get UTC time before loop
local_timezone = datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
timestamp = datetime.datetime.now(local_timezone).strftime("%Y-%m-%d %H:%M:%S %Z")
smalltimestamp = datetime.datetime.now(local_timezone).strftime("%Y%m%d")
def main():
for s in source:
if s['Active'] == True:
# Source password decryption code would go here
# Video file or camera feed to process
FRAME_SOURCE = s['URI']
# Load the source we want to run detection on
video_capture = cv2.VideoCapture(FRAME_SOURCE)
success, frame = video_capture.read()
if success:
# Clone image instead of using original
frame_copy = frame.copy()
# Convert the image from BGR color (which OpenCV uses) to RGB color
rgb_image = frame_copy[:, :, ::-1]
print("Starting Mask R-CNN segmentation and detection...")
# Run the image through the Mask R-CNN model to get results.
results = model.detect([rgb_image], verbose=0)
# Mask R-CNN assumes we are running detection on multiple images.
# We only passed in one image to detect, so only grab the first result.
r = results[0]
# The r variable will now have the results of detection:
# - r['rois'] are the bounding box of each detected object
# - r['class_ids'] are the class id (type) of each detected object
# - r['scores'] are the confidence scores for each detection
# - r['masks'] are the object masks for each detected object (which gives you the object outline)
print("Starting vehicle localization...")
# Filter the results to only grab the car / truck bounding boxes
car_boxes = get_car_boxes(r['rois'], r['class_ids'])
# Get zone data
sql = "SELECT Zone.*, Type.Description FROM Zone JOIN Type USING(TypeID) WHERE SourceID = {}".format(s['SourceID'])
zone = query_database(sql).fetchall()
if len(zone) == 0:
print("There are no zones defined for this source!")
break
print("Cars found in frame: {}".format(len(car_boxes)))
print("Counting vehicles in zones...")
for z in zone:
# Convert string representation of list to list
poly_coords = eval(z['PolyCoords'])
# Hold count of cars in zone
count = 0
# Draw each box on the frame
for box in car_boxes:
y1, x1, y2, x2 = box
if(((Polygon([(x1, y1), (x2, y1), (x1, y2), (x2, y2)])).centroid).intersects(Polygon(asPoint(array(poly_coords))))):
# Display the box coordinates in the console
print("Car: ", box)
# Count cars in zone
count += 1
# Delete the car to avoid double counting
np.delete(car_boxes, box)
# Make sure the number counted is not more than the number of spaces
count = count if count <= z['TotalSpaces'] else z['TotalSpaces']
print("Total cars in zone {} ({}): {}.".format(z['ZoneID'], z['Description'], count))
# Insert count into database
sql = "INSERT INTO OccupancyLog (ZoneID, LotID, TypeID, Timestamp, OccupiedSpaces, TotalSpaces) VALUES ({}, {}, {}, {}, {}, {})".format(z['ZoneID'], z['LotID'], z['TypeID'], "'{}'".format(timestamp), count, z['TotalSpaces'])
query_database(sql)
print("Database updated...")
# Clean up everything when finished
video_capture.release()
cv2.destroyAllWindows()
else:
print("Cannot access source {} vic {}!".format(s['SourceID'], s['Location']))
cursor.close()
conn.close()
print("Job complete. Have an excellent day.")
if __name__ == '__main__':
main()
| StarcoderdataPython |
11203213 | # Time: O(m + n), excluding ctor of result
# Space: O(1)
# optimized from Solution2 since we can find next i, j pair without nested loops
class Solution(object):
def restoreMatrix(self, rowSum, colSum):
"""
:type rowSum: List[int]
:type colSum: List[int]
:rtype: List[List[int]]
"""
matrix = [[0]*len(colSum) for _ in range(len(rowSum))]
i = j = 0
while i < len(matrix) and j < len(matrix[0]):
matrix[i][j] = min(rowSum[i], colSum[j]) # greedily used
rowSum[i] -= matrix[i][j]
colSum[j] -= matrix[i][j]
if not rowSum[i]: # won't be used in row i, ++i
i += 1
if not colSum[j]: # won't be used in col j, ++j
j += 1
return matrix
# Time: O(m * n)
# Space: O(1)
class Solution2(object):
def restoreMatrix(self, rowSum, colSum):
"""
:type rowSum: List[int]
:type colSum: List[int]
:rtype: List[List[int]]
"""
matrix = [[0]*len(colSum) for _ in range(len(rowSum))]
for i in range(len(matrix)):
for j in range(len(matrix[i])):
matrix[i][j] = min(rowSum[i], colSum[j]) # greedily used
rowSum[i] -= matrix[i][j]
colSum[j] -= matrix[i][j]
return matrix
| StarcoderdataPython |
8152014 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import eventlet
from oslo.config import cfg
import six
from stevedore import enabled
from climate.db import api as db_api
from climate.db import exceptions as db_ex
from climate import exceptions as common_ex
from climate import manager
from climate.manager import exceptions
from climate.notification import api as notification_api
from climate.openstack.common.gettextutils import _
from climate.openstack.common import log as logging
from climate.utils import service as service_utils
from climate.utils import trusts
manager_opts = [
cfg.ListOpt('plugins',
default=['dummy.vm.plugin'],
help='All plugins to use (one for every resource type to '
'support.)'),
cfg.IntOpt('notify_hours_before_lease_end',
default=48,
help='Number of hours prior to lease end in which a '
'notification of lease close to expire will be sent. If '
'this is set to 0, then this notification will '
'not be sent.')
]
CONF = cfg.CONF
CONF.register_opts(manager_opts, 'manager')
LOG = logging.getLogger(__name__)
LEASE_DATE_FORMAT = "%Y-%m-%d %H:%M"
class ManagerService(service_utils.RPCServer):
"""Service class for the climate-manager service.
Responsible for working with Climate DB, scheduling logic, running events,
working with plugins, etc.
"""
def __init__(self):
target = manager.get_target()
super(ManagerService, self).__init__(target)
self.plugins = self._get_plugins()
self.resource_actions = self._setup_actions()
def start(self):
super(ManagerService, self).start()
self.tg.add_timer(10, self._event)
def _get_plugins(self):
"""Return dict of resource-plugin class pairs."""
config_plugins = CONF.manager.plugins
plugins = {}
extension_manager = enabled.EnabledExtensionManager(
check_func=lambda ext: ext.name in config_plugins,
namespace='climate.resource.plugins',
invoke_on_load=False
)
for ext in extension_manager.extensions:
try:
plugin_obj = ext.plugin()
except Exception as e:
LOG.warning("Could not load {0} plugin "
"for resource type {1} '{2}'".format(
ext.name, ext.plugin.resource_type, e))
else:
if plugin_obj.resource_type in plugins:
msg = ("You have provided several plugins for "
"one resource type in configuration file. "
"Please set one plugin per resource type.")
raise exceptions.PluginConfigurationError(error=msg)
plugins[plugin_obj.resource_type] = plugin_obj
return plugins
def _setup_actions(self):
"""Setup actions for each resource type supported.
BasePlugin interface provides only on_start and on_end behaviour now.
If there are some configs needed by plugin, they should be returned
from get_plugin_opts method. These flags are registered in
[resource_type] group of configuration file.
"""
actions = {}
for resource_type, plugin in six.iteritems(self.plugins):
plugin = self.plugins[resource_type]
CONF.register_opts(plugin.get_plugin_opts(), group=resource_type)
actions[resource_type] = {}
actions[resource_type]['on_start'] = plugin.on_start
actions[resource_type]['on_end'] = plugin.on_end
plugin.setup(None)
return actions
@service_utils.with_empty_context
def _event(self):
"""Tries to commit event.
If there is an event in Climate DB to be done, do it and change its
status to 'DONE'.
"""
LOG.debug('Trying to get event from DB.')
event = db_api.event_get_first_sorted_by_filters(
sort_key='time',
sort_dir='asc',
filters={'status': 'UNDONE'}
)
if not event:
return
if event['time'] < datetime.datetime.now():
db_api.event_update(event['id'], {'status': 'IN_PROGRESS'})
event_type = event['event_type']
event_fn = getattr(self, event_type, None)
if event_fn is None:
raise exceptions.EventError(error='Event type %s is not '
'supported' % event_type)
try:
eventlet.spawn_n(service_utils.with_empty_context(event_fn),
event['lease_id'], event['id'])
lease = db_api.lease_get(event['lease_id'])
with trusts.create_ctx_from_trust(lease['trust_id']) as ctx:
self._send_notification(lease,
ctx,
events=['event.%s' % event_type])
except Exception:
db_api.event_update(event['id'], {'status': 'ERROR'})
LOG.exception(_('Error occurred while event handling.'))
def _date_from_string(self, date_string, date_format=LEASE_DATE_FORMAT):
try:
date = datetime.datetime.strptime(date_string, date_format)
except ValueError:
raise exceptions.InvalidDate(date=date_string,
date_format=date_format)
return date
def get_lease(self, lease_id):
return db_api.lease_get(lease_id)
def list_leases(self, project_id=None):
return db_api.lease_list(project_id)
def create_lease(self, lease_values):
"""Create a lease with reservations.
Return either the model of created lease or None if any error.
"""
try:
trust_id = lease_values.pop('trust_id')
except KeyError:
raise exceptions.MissingTrustId()
# Remove and keep reservation values
reservations = lease_values.pop("reservations", [])
# Create the lease without the reservations
start_date = lease_values['start_date']
end_date = lease_values['end_date']
now = datetime.datetime.now()
now = datetime.datetime(now.year,
now.month,
now.day,
now.hour,
now.minute)
if start_date == 'now':
start_date = now
else:
start_date = self._date_from_string(start_date)
end_date = self._date_from_string(end_date)
if start_date < now:
raise common_ex.NotAuthorized(
'Start date must later than current date')
with trusts.create_ctx_from_trust(trust_id) as ctx:
lease_values['user_id'] = ctx.user_id
lease_values['project_id'] = ctx.project_id
lease_values['start_date'] = start_date
lease_values['end_date'] = end_date
if not lease_values.get('events'):
lease_values['events'] = []
lease_values['events'].append({'event_type': 'start_lease',
'time': start_date,
'status': 'UNDONE'})
lease_values['events'].append({'event_type': 'end_lease',
'time': end_date,
'status': 'UNDONE'})
before_end_date = lease_values.get('before_end_notification', None)
if before_end_date:
# incoming param. Validation check
try:
before_end_date = self._date_from_string(
before_end_date)
self._check_date_within_lease_limits(before_end_date,
lease_values)
except common_ex.ClimateException as e:
LOG.error("Invalid before_end_date param. %s" % e.message)
raise e
elif CONF.manager.notify_hours_before_lease_end > 0:
delta = datetime.timedelta(
hours=CONF.manager.notify_hours_before_lease_end)
before_end_date = lease_values['end_date'] - delta
if before_end_date:
event = {'event_type': 'before_end_lease',
'status': 'UNDONE'}
lease_values['events'].append(event)
self._update_before_end_event_date(event, before_end_date,
lease_values)
try:
if trust_id:
lease_values.update({'trust_id': trust_id})
lease = db_api.lease_create(lease_values)
lease_id = lease['id']
except db_ex.ClimateDBDuplicateEntry:
LOG.exception('Cannot create a lease - duplicated lease name')
raise exceptions.LeaseNameAlreadyExists(
name=lease_values['name'])
except db_ex.ClimateDBException:
LOG.exception('Cannot create a lease')
raise
else:
try:
for reservation in reservations:
reservation['lease_id'] = lease['id']
reservation['start_date'] = lease['start_date']
reservation['end_date'] = lease['end_date']
resource_type = reservation['resource_type']
if resource_type in self.plugins:
self.plugins[resource_type].create_reservation(
reservation)
else:
raise exceptions.UnsupportedResourceType(
resource_type)
except (exceptions.UnsupportedResourceType,
common_ex.ClimateException):
LOG.exception("Failed to create reservation for a lease. "
"Rollback the lease and associated "
"reservations")
db_api.lease_destroy(lease_id)
raise
else:
lease = db_api.lease_get(lease['id'])
self._send_notification(lease, ctx, events=['create'])
return lease
def update_lease(self, lease_id, values):
if not values:
return db_api.lease_get(lease_id)
if len(values) == 1 and 'name' in values:
db_api.lease_update(lease_id, values)
return db_api.lease_get(lease_id)
lease = db_api.lease_get(lease_id)
start_date = values.get(
'start_date',
datetime.datetime.strftime(lease['start_date'], LEASE_DATE_FORMAT))
end_date = values.get(
'end_date',
datetime.datetime.strftime(lease['end_date'], LEASE_DATE_FORMAT))
before_end_date = values.get('before_end_notification', None)
now = datetime.datetime.now()
now = datetime.datetime(now.year,
now.month,
now.day,
now.hour,
now.minute)
if start_date == 'now':
start_date = now
else:
start_date = self._date_from_string(start_date)
end_date = self._date_from_string(end_date)
values['start_date'] = start_date
values['end_date'] = end_date
if (lease['start_date'] < now and
values['start_date'] != lease['start_date']):
raise common_ex.NotAuthorized(
'Cannot modify the start date of already started leases')
if (lease['start_date'] > now and
values['start_date'] < now):
raise common_ex.NotAuthorized(
'Start date must later than current date')
if lease['end_date'] < now:
raise common_ex.NotAuthorized(
'Terminated leases can only be renamed')
if (values['end_date'] < now or
values['end_date'] < values['start_date']):
raise common_ex.NotAuthorized(
'End date must be later than current and start date')
with trusts.create_ctx_from_trust(lease['trust_id']):
if before_end_date:
try:
before_end_date = self._date_from_string(before_end_date)
self._check_date_within_lease_limits(before_end_date,
values)
except common_ex.ClimateException as e:
LOG.error("Invalid before_end_date param. %s" % e.message)
raise e
# TODO(frossigneux) rollback if an exception is raised
for reservation in (
db_api.reservation_get_all_by_lease_id(lease_id)):
reservation['start_date'] = values['start_date']
reservation['end_date'] = values['end_date']
resource_type = reservation['resource_type']
self.plugins[resource_type].update_reservation(
reservation['id'],
reservation)
event = db_api.event_get_first_sorted_by_filters(
'lease_id',
'asc',
{
'lease_id': lease_id,
'event_type': 'start_lease'
}
)
if not event:
raise common_ex.ClimateException(
'Start lease event not found')
db_api.event_update(event['id'], {'time': values['start_date']})
event = db_api.event_get_first_sorted_by_filters(
'lease_id',
'asc',
{
'lease_id': lease_id,
'event_type': 'end_lease'
}
)
if not event:
raise common_ex.ClimateException(
'End lease event not found')
db_api.event_update(event['id'], {'time': values['end_date']})
notifications = ['update']
self._update_before_end_event(lease, values, notifications,
before_end_date)
db_api.lease_update(lease_id, values)
lease = db_api.lease_get(lease_id)
with trusts.create_ctx_from_trust(lease['trust_id']) as ctx:
self._send_notification(lease, ctx, events=notifications)
return lease
def delete_lease(self, lease_id):
lease = self.get_lease(lease_id)
if (datetime.datetime.now() < lease['start_date'] or
datetime.datetime.now() > lease['end_date']):
with trusts.create_ctx_from_trust(lease['trust_id']) as ctx:
for reservation in lease['reservations']:
plugin = self.plugins[reservation['resource_type']]
try:
plugin.on_end(reservation['resource_id'])
except (db_ex.ClimateDBException, RuntimeError):
LOG.exception("Failed to delete a reservation "
"for a lease.")
raise
db_api.lease_destroy(lease_id)
self._send_notification(lease, ctx, events=['delete'])
else:
raise common_ex.NotAuthorized(
'Already started lease cannot be deleted')
def start_lease(self, lease_id, event_id):
lease = self.get_lease(lease_id)
with trusts.create_ctx_from_trust(lease['trust_id']):
self._basic_action(lease_id, event_id, 'on_start', 'active')
def end_lease(self, lease_id, event_id):
lease = self.get_lease(lease_id)
with trusts.create_ctx_from_trust(lease['trust_id']):
self._basic_action(lease_id, event_id, 'on_end', 'deleted')
def before_end_lease(self, lease_id, event_id):
db_api.event_update(event_id, {'status': 'DONE'})
def _basic_action(self, lease_id, event_id, action_time,
reservation_status=None):
"""Commits basic lease actions such as starting and ending."""
lease = self.get_lease(lease_id)
event_status = 'DONE'
for reservation in lease['reservations']:
resource_type = reservation['resource_type']
try:
self.resource_actions[resource_type][action_time](
reservation['resource_id']
)
except common_ex.ClimateException:
LOG.exception("Failed to execute action %(action)s "
"for lease %(lease)s"
% {
'action': action_time,
'lease': lease_id,
})
event_status = 'ERROR'
db_api.reservation_update(reservation['id'],
{'status': 'error'})
else:
if reservation_status is not None:
db_api.reservation_update(reservation['id'],
{'status': reservation_status})
db_api.event_update(event_id, {'status': event_status})
def _send_notification(self, lease, ctx, events=[]):
payload = notification_api.format_lease_payload(lease)
for event in events:
notification_api.send_lease_notification(ctx, payload,
'lease.%s' % event)
def _check_date_within_lease_limits(self, date, lease):
if not lease['start_date'] < date < lease['end_date']:
raise common_ex.NotAuthorized(
'Datetime is out of lease limits')
def _update_before_end_event_date(self, event, before_end_date, lease):
event['time'] = before_end_date
if event['time'] < lease['start_date']:
LOG.warning("New start_date greater than before_end_date. "
"Setting before_end_date to %s for lease %s"
% (lease['start_date'], lease.get('id',
lease.get('name'))))
event['time'] = lease['start_date']
def _update_before_end_event(self, old_lease, new_lease,
notifications, before_end_date=None):
event = db_api.event_get_first_sorted_by_filters(
'lease_id',
'asc',
{
'lease_id': old_lease['id'],
'event_type': 'before_end_lease'
}
)
if event:
# NOTE(casanch1) do nothing if the event does not exist.
# This is for backward compatibility
update_values = {}
if not before_end_date:
# before_end_date needs to be calculated based on
# previous delta
prev_before_end_delta = old_lease['end_date'] - event['time']
before_end_date = new_lease['end_date'] - prev_before_end_delta
self._update_before_end_event_date(update_values, before_end_date,
new_lease)
if event['status'] == 'DONE':
update_values['status'] = 'UNDONE'
notifications.append('event.before_end_lease.stop')
db_api.event_update(event['id'], update_values)
def __getattr__(self, name):
"""RPC Dispatcher for plugins methods."""
fn = None
try:
resource_type, method = name.rsplit(':', 1)
except ValueError:
# NOTE(sbauza) : the dispatcher needs to know which plugin to use,
# raising error if consequently not
raise AttributeError(name)
try:
try:
fn = getattr(self.plugins[resource_type], method)
except KeyError:
LOG.error("Plugin with resource type %s not found",
resource_type)
raise exceptions.UnsupportedResourceType(resource_type)
except AttributeError:
LOG.error("Plugin %s doesn't include method %s",
self.plugins[resource_type], method)
if fn is not None:
return fn
raise AttributeError(name)
| StarcoderdataPython |
5040884 | from __future__ import absolute_import
from typing import Tuple, List, Callable, Any
import numpy as np # type: ignore
from sklearn.utils import check_random_state # type: ignore
import matplotlib.pyplot as plt
import pickle
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
def iter_shuffled(X, columns_to_shuffle=None, pre_shuffle=True,
random_state=None):
"""
Return an iterator of X matrices which have one or more columns shuffled.
After each iteration yielded matrix is mutated inplace, so
if you want to use multiple of them at the same time, make copies.
``columns_to_shuffle`` is a sequence of column numbers to shuffle.
By default, all columns are shuffled once, i.e. columns_to_shuffle
is ``range(X.shape[1])``.
If ``pre_shuffle`` is True, a copy of ``X`` is shuffled once, and then
result takes shuffled columns from this copy. If it is False,
columns are shuffled on fly. ``pre_shuffle = True`` can be faster
if there is a lot of columns, or if columns are used multiple times.
"""
rng = check_random_state(random_state)
if columns_to_shuffle is None:
columns_to_shuffle = range(X.shape[1])
if pre_shuffle:
X_shuffled = X.copy()
rng.shuffle(X_shuffled)
X_res = X.copy()
for columns in columns_to_shuffle:
if pre_shuffle:
X_res[:, columns] = X_shuffled[:, columns]
else:
rng.shuffle(X_res[:, columns])
yield X_res
X_res[:, columns] = X[:, columns]
def get_score_importances(
score_func, # type: Callable[[Any, Any], float]
X,
y,
n_iter=5, # type: int
columns_to_shuffle=None,
random_state=None
):
# type: (...) -> Tuple[float, List[np.ndarray]]
"""
Return ``(base_score, score_decreases)`` tuple with the base score and
score decreases when a feature is not available.
``base_score`` is ``score_func(X, y)``; ``score_decreases``
is a list of length ``n_iter`` with feature importance arrays
(each array is of shape ``n_features``); feature importances are computed
as score decrease when a feature is not available.
``n_iter`` iterations of the basic algorithm is done, each iteration
starting from a different random seed.
If you just want feature importances, you can take a mean of the result::
import numpy as np
from eli5.permutation_importance import get_score_importances
base_score, score_decreases = get_score_importances(score_func, X, y)
feature_importances = np.mean(score_decreases, axis=0)
"""
rng = check_random_state(random_state)
base_score = score_func(X, y)
scores_decreases = []
for i in range(n_iter):
scores_shuffled = _get_scores_shufled(
score_func, X, y, columns_to_shuffle=columns_to_shuffle,
random_state=rng
)
scores_decreases.append(-scores_shuffled + base_score)
return base_score, scores_decreases
def _get_scores_shufled(score_func, X, y, columns_to_shuffle=None,
random_state=None):
Xs = iter_shuffled(X, columns_to_shuffle, random_state=random_state)
return np.array([score_func(X_shuffled, y) for X_shuffled in Xs])
# -*- coding: utf-8 -*-
from functools import partial
from typing import List
import numpy as np # type: ignore
from sklearn.model_selection import check_cv # type: ignore
from sklearn.utils.metaestimators import if_delegate_has_method # type: ignore
from sklearn.utils import check_array, check_random_state # type: ignore
from sklearn.base import ( # type: ignore
BaseEstimator,
MetaEstimatorMixin,
clone,
is_classifier
)
from sklearn.metrics.scorer import check_scoring # type: ignore
# from eli5.permutation_importance import get_score_importances
#from eli5.sklearn.utils import pandas_available
import pandas as pd # type: ignore
pandas_available = True
CAVEATS_CV_NONE = """
Feature importances are computed on the same data as used for training,
i.e. feature importances don't reflect importance of features for
generalization.
"""
CAVEATS_CV = """
Feature importances are not computed for the final estimator;
they are computed for a sequence of estimators trained and evaluated
on train/test splits. So they tell you about importances of features
for generalization, but not feature importances of a particular trained model.
"""
CAVEATS_PREFIT = """
If feature importances are computed on the same data as used for training,
they don't reflect importance of features for generalization. Use a held-out
dataset if you want generalization feature importances.
"""
class PermutationImportance(BaseEstimator, MetaEstimatorMixin):
"""Meta-estimator which computes ``feature_importances_`` attribute
based on permutation importance (also known as mean score decrease).
:class:`~PermutationImportance` instance can be used instead of
its wrapped estimator, as it exposes all estimator's common methods like
``predict``.
There are 3 main modes of operation:
1. cv="prefit" (pre-fit estimator is passed). You can call
PermutationImportance.fit either with training data, or
with a held-out dataset (in the latter case ``feature_importances_``
would be importances of features for generalization). After the fitting
``feature_importances_`` attribute becomes available, but the estimator
itself is not fit again. When cv="prefit",
:meth:`~PermutationImportance.fit` must be called
directly, and :class:`~PermutationImportance` cannot be used with
``cross_val_score``, ``GridSearchCV`` and similar utilities that clone
the estimator.
2. cv=None. In this case :meth:`~PermutationImportance.fit` method fits
the estimator and computes feature importances on the same data, i.e.
feature importances don't reflect importance of features for
generalization.
3. all other ``cv`` values. :meth:`~PermutationImportance.fit` method
fits the estimator, but instead of computing feature importances for
the concrete estimator which is fit, importances are computed for
a sequence of estimators trained and evaluated on train/test splits
according to ``cv``, and then averaged. This is more resource-intensive
(estimators are fit multiple times), and importances are not computed
for the final estimator, but ``feature_importances_`` show importances
of features for generalization.
Mode (1) is most useful for inspecting an existing estimator; modes
(2) and (3) can be also used for feature selection, e.g. together with
sklearn's SelectFromModel or RFE.
Currently :class:`~PermutationImportance` works with dense data.
Parameters
----------
estimator : object
The base estimator. This can be both a fitted
(if ``prefit`` is set to True) or a non-fitted estimator.
scoring : string, callable or None, default=None
Scoring function to use for computing feature importances.
A string with scoring name (see scikit-learn docs) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
n_iter : int, default 5
Number of random shuffle iterations. Decrease to improve speed,
increase to get more precise estimates.
random_state : integer or numpy.random.RandomState, optional
random state
cv : int, cross-validation generator, iterable or "prefit"
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to disable cross-validation and compute feature importances
on the same data as used for training.
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
- "prefit" string constant (default).
If "prefit" is passed, it is assumed that ``estimator`` has been
fitted already and all data is used for computing feature importances.
refit : bool
Whether to fit the estimator on the whole data if cross-validation
is used (default is True).
Attributes
----------
feature_importances_ : array
Feature importances, computed as mean decrease of the score when
a feature is permuted (i.e. becomes noise).
feature_importances_std_ : array
Standard deviations of feature importances.
results_ : list of arrays
A list of score decreases for all experiments.
scores_ : array of float
A list of base scores for all experiments (with no features permuted).
estimator_ : an estimator
The base estimator from which the :class:`~PermutationImportance`
instance is built. This is stored only when a non-fitted estimator
is passed to the :class:`~PermutationImportance`, i.e when ``cv`` is
not "prefit".
rng_ : numpy.random.RandomState
random state
"""
def __init__(self, estimator, scoring=None, n_iter=5, random_state=None,
cv='prefit', refit=True):
# type: (...) -> None
if isinstance(cv, str) and cv != "prefit":
raise ValueError("Invalid cv value: {!r}".format(cv))
self.refit = refit
self.estimator = estimator
self.scoring = scoring
self.n_iter = n_iter
self.random_state = random_state
self.cv = cv
self.rng_ = check_random_state(random_state)
def _wrap_scorer(self, base_scorer, pd_columns):
def pd_scorer(model, X, y):
X = pd.DataFrame(X, columns=pd_columns)
return base_scorer(model, X, y)
return pd_scorer
def fit(self, X, y, groups=None, columns_to_shuffle=None, **fit_params):
# type: (...) -> PermutationImportance
"""Compute ``feature_importances_`` attribute and optionally
fit the base estimator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
columns_to_shuffle : list of lists
Each element represents the columns to be shuffled simultaneously
**fit_params : Other estimator specific parameters
Returns
-------
self : object
Returns self.
"""
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
if pandas_available and isinstance(X, pd.DataFrame):
self.scorer_ = self._wrap_scorer(self.scorer_, X.columns)
if self.cv != "prefit" and self.refit:
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y, **fit_params)
column_inds_to_shuffle = get_column_inds_from_names(X.columns, columns_to_shuffle)
X = check_array(X)
if self.cv not in (None, "prefit"):
si = self._cv_scores_importances(X, y, groups=groups,
columns_to_shuffle=column_inds_to_shuffle, **fit_params)
else:
si = self._non_cv_scores_importances(X, y, columns_to_shuffle=column_inds_to_shuffle)
scores, results = si
self.scores_ = np.array(scores)
self.results_ = results
self.feature_importances_ = np.mean(results, axis=0)
self.feature_importances_std_ = np.std(results, axis=0)
return self
def _cv_scores_importances(self, X, y, groups=None,
columns_to_shuffle=None, **fit_params):
assert self.cv is not None
cv = check_cv(self.cv, y, is_classifier(self.estimator))
feature_importances = [] # type: List
base_scores = [] # type: List[float]
for train, test in cv.split(X, y, groups):
est = clone(self.estimator).fit(X[train], y[train], **fit_params)
score_func = partial(self.scorer_, est)
_base_score, _importances = self._get_score_importances(
score_func, X[test], y[test], columns_to_shuffle)
base_scores.extend([_base_score] * len(_importances))
feature_importances.extend(_importances)
return base_scores, feature_importances
def _non_cv_scores_importances(self, X, y, columns_to_shuffle):
score_func = partial(self.scorer_, self.wrapped_estimator_)
base_score, importances = self._get_score_importances(score_func, X, y, columns_to_shuffle)
return [base_score] * len(importances), importances
def _get_score_importances(self, score_func, X, y, columns_to_shuffle):
return get_score_importances(score_func, X, y, n_iter=self.n_iter,
columns_to_shuffle=columns_to_shuffle,
random_state=self.rng_)
@property
def caveats_(self):
# type: () -> str
if self.cv == 'prefit':
return CAVEATS_PREFIT
elif self.cv is None:
return CAVEATS_CV_NONE
return CAVEATS_CV
# ============= Exposed methods of a wrapped estimator:
@if_delegate_has_method(delegate='wrapped_estimator_')
def score(self, X, y=None, *args, **kwargs):
return self.wrapped_estimator_.score(X, y, *args, **kwargs)
@if_delegate_has_method(delegate='wrapped_estimator_')
def predict(self, X):
return self.wrapped_estimator_.predict(X)
@if_delegate_has_method(delegate='wrapped_estimator_')
def predict_proba(self, X):
return self.wrapped_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='wrapped_estimator_')
def predict_log_proba(self, X):
return self.wrapped_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='wrapped_estimator_')
def decision_function(self, X):
return self.wrapped_estimator_.decision_function(X)
@property
def wrapped_estimator_(self):
if self.cv == "prefit" or not self.refit:
return self.estimator
return self.estimator_
@property
def _estimator_type(self):
return self.estimator._estimator_type
@property
def classes_(self):
return self.wrapped_estimator_.classes_
def get_column_inds_from_names(df_column_names, names_to_replace):
replace_inds = []
for n2r in names_to_replace:
replace_inds.append([df_column_names.get_loc(c) for c in n2r])
return(replace_inds)
def variable_importance_plot(feature_names, feat_importances, err=None, keep_top = None):
"""
Purpose
----------
Prints bar chart detailing variable importance for CART model
NOTE: feature_space list was created because the bar chart
was transposed and index would be in incorrect order.
Parameters
----------
* importance: Array returned from feature_importances_ for CART
models organized by dataframe index
Returns:
----------
Returns variable importance plot in descending order
"""
# index = np.arange(len(names_index))
# importance_desc = sorted(importance)
# feature_space = []
# for i in range(indices.shape[0] - 1, -1, -1):
# feature_space.append(names_index[indices[i]])
fig, ax = plt.subplots(figsize=(7.5, 12))
if err is None:
err = np.zeros(len(feat_importances))
feature_importances = pd.DataFrame([feat_importances, err], columns=feature_names)
importances_df = feature_importances.sort_values(by=0, axis=1, ascending=True, inplace=False, kind='quicksort', na_position='last').T
importances_df.columns = ['imps', 'err']
if keep_top is not None:
importances_df = importances_df.iloc[(-1*keep_top):]
# ax.set_axis_bgcolor('#fafafa')
ax.barh(importances_df.index,
importances_df.imps,
xerr=importances_df.err,
alpha = 0.9,
edgecolor = "black",
zorder=3,
color='lightblue'
)
# align="center",
# color = '#875FDB')
# plt.yticks(index,
# feature_space)
# plt.ylim(-1, 30)
# plt.xlim(0, max(importance_desc) + 0.01)
ax.set_ylabel('Feature')
fig.subplots_adjust(left=0.3)
fig.tight_layout()
return ax, fig
#names_of_feats_all = []
#for feat_group in feature_space.columns:
# for feat_dict in PATIENT_FEATURES_CONFIG:
# if feat_dict['name'] == feat_group:
# names_of_feats_all.append(feat_dict['formatted_name'])
# break
#feat_list = [['agebl'],
#['female'],
#['race'],
#['hdlchol'],
#['totchol'],
#['systolic'],
#['t2d_history'],
#['bp_antihtn'],
#['cursmk_ever'],
#['ldlchol'],
#['diastolic'],
#['wt'],
#['ht'],
#['medhousincome'],
#['primarycarevsts'],
#['otherservicevsts'],
#['specialtycarevsts'],
#['total_medications'],
#['education5'],
#['education3'],
#['education4'],
#['education6'],
#['education1'],
#['education2'],
#['normal_tests'],
#['abnormal_tests'],
#['CCS_158'],
#['CCS_98'],
#['MONO_1'],
#['CCS_5'],
#['PSA_0'],
#['LYMPH_1'],
#['CCS_79'],
#['MED_4799'],
#['MED_3320'],
#['MED_1630'],
#['EOS_0'],
#['CCS_102'],
#['CCS_8'],
#['MED_3615'],
#['CCS_96'],
#['MED_9646'],
#['MED_6205'],
#['CALCIUM_0'],
#['MED_8672'],
#['MED_6410'],
#['EOS_1'],
#['CCS_33'],
#['BASO_0'],
#['CCS_63'],
#['GLU_1'],
#['CCS_59'],
#['GFR_1'],
#['CRP_1'],
#['CCS_51'],
#['CCS_204'],
#['CCS_95'],
#['CCS_653'],
#['CCS_64'],
#['CCS_244'],
#['CCS_97'],
#['MED_3999'],
#['U_ACR_1'],
#['MED_8625'],
#['K_0'],
#['MED_4630'],
#['U_PROT_1'],
#['MED_4155'],
#['BILI_0'],
#['CCS_83'],
#['BILI_1'],
#['CCS_2'],
#['MED_1220'],
#['MED_0310'],
#['MED_5940'],
#['CCS_11'],
#['CCS_660'],
#['MED_9066'],
#['CCS_104'],
#['MED_3720'],
#['MED_7710'],
#['MED_4240'],
#['CCS_115'],
#['AST_0'],
#['CCS_216'],
#['MED_3760'],
#['CCS_211'],
#['MED_0700'],
#['T4_1'],
#['FIBRINOGEN_1'],
#['BUN_1'],
#['MED_8230'],
#['CCS_152'],
#['CCS_49'],
#['CCS_50'],
#['CCS_651'],
#['CCS_199'],
#['MED_3610'],
#['CCS_99'],
#['MED_4920'],
#['MED_0199'],
#['MED_4650'],
#['Emphysema'],
#['MED_3940'],
#['MED_0230'],
#['MED_9940'],
#['MED_7813'],
#['U_MICALB24_1']]
#
#feat_names = ['agebl',
#'female',
#'race',
#'hdlchol',
#'totchol',
#'systolic',
#'t2d_history',
#'bp_antihtn',
#'cursmk_ever',
#'ldlchol',
#'diastolic',
#'wt',
#'ht',
#'medhousincome',
#'primarycarevsts',
#'otherservicevsts',
#'specialtycarevsts',
#'total_medications',
#'education5',
#'education3',
#'education4',
#'education6',
#'education1',
#'education2',
#'normal_tests',
#'abnormal_tests',
#'CCS_158',
#'CCS_98',
#'MONO_1',
#'CCS_5',
#'PSA_0',
#'LYMPH_1',
#'CCS_79',
#'MED_4799',
#'MED_3320',
#'MED_1630',
#'EOS_0',
#'CCS_102',
#'CCS_8',
#'MED_3615',
#'CCS_96',
#'MED_9646',
#'MED_6205',
#'CALCIUM_0',
#'MED_8672',
#'MED_6410',
#'EOS_1',
#'CCS_33',
#'BASO_0',
#'CCS_63',
#'GLU_1',
#'CCS_59',
#'GFR_1',
#'CRP_1',
#'CCS_51',
#'CCS_204',
#'CCS_95',
#'CCS_653',
#'CCS_64',
#'CCS_244',
#'CCS_97',
#'MED_3999',
#'U_ACR_1',
#'MED_8625',
#'K_0',
#'MED_4630',
#'U_PROT_1',
#'MED_4155',
#'BILI_0',
#'CCS_83',
#'BILI_1',
#'CCS_2',
#'MED_1220',
#'MED_0310',
#'MED_5940',
#'CCS_11',
#'CCS_660',
#'MED_9066',
#'CCS_104',
#'MED_3720',
#'MED_7710',
#'MED_4240',
#'CCS_115',
#'AST_0',
#'CCS_216',
#'MED_3760',
#'CCS_211',
#'MED_0700',
#'T4_1',
#'FIBRINOGEN_1',
#'BUN_1',
#'MED_8230',
#'CCS_152',
#'CCS_49',
#'CCS_50',
#'CCS_651',
#'CCS_199',
#'MED_3610',
#'CCS_99',
#'MED_4920',
#'MED_0199',
#'MED_4650',
#'Emphysema',
#'MED_3940',
#'MED_0230',
#'MED_9940',
#'MED_7813',
#'U_MICALB24_1']
#names_of_feats = []
#for feat_group in feat_list:
# for feat_dict in PATIENT_FEATURES_CONFIG:
# if feat_dict['name'] == feat_group[0]:
# names_of_feats.append(feat_dict['formatted_name'])
# break
#
#names_of_feats[0] = 'Clinic Location'
#names_of_feats[1] = 'Clinic Urban/Rural'
#names_of_feats[2] = 'Ethnicity'
#names_of_feats[3] = 'Insurance Type'
#%%
result_dir = '../Results/allvars_pce_pts_0506/'
import os
if not os.path.isdir(os.path.dirname(result_dir)): os.mkdir(os.path.dirname(result_dir))
#result_dir = '../Results/allvars_pce_pts_0925/'
#best_model = 'gbm'
#from joblib import dump, load
#result_dir = '../Results/allvars_oldyoung_missing_0913/'
#best_model = 'gbm'
#model = load(result_dir + best_model + '_best_model.joblib')
run_date_str = '0507'
#feat_import_df = pd.read_csv(result_dir + best_model + "_feature_importances.csv")
##%%
#feat_names = [f for f in feat_import_df.feature if '_missing' not in f]
#feat_list = [[f] for f in feat_names]
##%%
#ax, fig = variable_importance_plot(feat_import_df.feature, feat_import_df.importance.values, keep_top = 30)
#ax.set_title('Feature importances for GBM: Impurity')
#ax.set_xlabel('Mean Decrease in Impurity');
#plt.tight_layout()
#plt.savefig(f'{result_dir}feature_importances_{best_model}_impurity_{run_date_str}.png', dpi = 500)
#%%
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from medical_ML import split_cohort
from datetime import datetime
test_ind_col = 'test_ind'
label = 'ascvdany5y'
to_exclude = {
'pce_cohort': False,
'pce_invalid_vars': True,
'cvd_bl': True,
'antilpd': True,
'oldyoung': True}
datafile = 'allvars.csv'
ascvd_est = pd.read_csv('../Data/cohort/' + datafile)
#%%
train_est2, test_est2 = split_cohort(ascvd_est, to_exclude, test_ind_col, drop = 'all')
test_set_data = pd.get_dummies(test_est2, columns = [c for c in test_est2.columns if test_est2[c].dtype=='O'])
train_set_data = pd.get_dummies(train_est2, columns = [c for c in train_est2.columns if train_est2[c].dtype=='O'])
train_set_features = train_set_data[[f for f in train_set_data.columns if f != label]]
test_set_features = test_set_data[[f for f in test_set_data.columns if f != label]]
train_set_labels = train_est2[label]
test_set_labels = test_est2[label]
train_est2 = test_est2 = ascvd_est = None
imp = IterativeImputer(add_indicator=False,
estimator=None,
imputation_order='ascending',
initial_strategy='mean',
max_iter=50, max_value=None,
min_value=None,
missing_values=np.nan,
n_nearest_features=10,
random_state=None,
sample_posterior=False,
tol=0.001, verbose=0)
imp.fit(train_set_features)
train_set_imp_features = imp.transform(train_set_features)
train_set_imp_features = pd.DataFrame(train_set_imp_features, columns = train_set_features.columns)
test_set_imp_features = imp.transform(test_set_features)
test_set_imp_features = pd.DataFrame(test_set_imp_features, columns = test_set_features.columns)
train_set_features = test_set_features = None
#%%
#fl2 = [[fl[0]] for fl in feat_list if 'race' not in fl[0]]
#
#fl2.append(['race'])
#%%
#gbm = model.named_steps['predictor']
#gbm.n_features_ = test_set_features.shape[1]
#parms = gbm.get_params()
#model.named_steps['predictor'].n_features = test_set_features.shape[1]
parms = {'n_estimators': 300,
'learning_rate': 0.01,
'max_depth': 5,
'subsample': 0.35,
'max_features': 0.25}
print('training GBM')
now = datetime.now()
gbm2 = GradientBoostingClassifier(**parms)
print(train_set_imp_features.columns)
gbm2.fit(train_set_imp_features, train_set_labels)
difference = (datetime.now() - now).total_seconds()
print('done, total seconds:', difference)
#%%
ax, fig = variable_importance_plot(train_set_imp_features.columns, gbm2.feature_importances_, keep_top = 30)
ax.set_title('Feature importances for GBM model: Permutation Importance')
ax.set_xlabel('Mean Decrease in AUC')
plt.tight_layout()
plt.savefig(f'{result_dir}feat_imps_gini_{run_date_str}_100.png', dpi = 500)
#dump(gbm2)
#%%
print('calculating permutation importance')
now = datetime.now()
feat_names = [f for f in test_set_imp_features.columns if '_missing' not in f]
feat_list = [[f] for f in feat_names]
perm = PermutationImportance(gbm2, n_iter=5).fit(test_set_imp_features, test_set_labels, columns_to_shuffle = feat_list)
difference = (datetime.now() - now).total_seconds()
print('done, total seconds:', difference)
with open(f'{result_dir}permutation_feat_importances_all_{run_date_str}_5.pkl', "wb") as output_file:
pickle.dump([perm.results_, perm.feature_importances_, perm.feature_importances_std_], output_file)
#%%
ax, fig = variable_importance_plot(feat_names, perm.feature_importances_, err=perm.feature_importances_std_, keep_top = 30)
ax.set_title('Feature importances for GBM model: Permutation Importance')
ax.set_xlabel('Mean Decrease in AUC')
plt.tight_layout()
plt.savefig(f'{result_dir}feat_imps_permutation_{run_date_str}_100.png', dpi = 500)
## Create horizontal bars
#y_pos = np.arange(len(top_features_union))
#
#fig, ax = plt.subplots(figsize=(10,8))
#ax.xaxis.grid(True, zorder=0)
#width = 0.40
#
#offset_fix = np.zeros(len(top_features_union))
#offset_fix[top_var_imp_red == 0]= -width/2
##top_var_imp/np.max(top_var_imp) * 100 top_var_imp_red/np.max(top_var_imp_red) * 100 , width
#
#plt.barh(y_pos+width/2 + offset_fix, var_imp_df_top['relative importance'] , width, alpha = 0.5, edgecolor = "black", zorder=3, color='tab:grey')
#plt.barh(y_pos-width/2, var_imp_df_red_top['relative importance'] ,width, alpha = 0.5, edgecolor = "black", zorder=3, color='tab:blue')
#
## Create names on the y-axis
#plt.yticks(y_pos, top_features)
#
#plt.xlabel('Relative Importance (%)')
#plt.xlim(0, 100)
#plt.legend([ 'All variables','Bedside variables'])
#plt.tight_layout() | StarcoderdataPython |
1971098 | # -*- coding: utf-8 -*-
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
import sys
import re
f = open("portraits_qs.txt", "w")
counter = 0
raw_html = open('portrait2015.html').read()
html = BeautifulSoup(raw_html, 'html.parser')
for item in html.select('li.live-item'):
url = item.select("h3 a")[0]['href'].strip().encode("utf8")
titre = item.select("h3 a")[0].text.strip().encode("utf8")
date = item.select("p.live-datetime")[0].text.strip().encode("utf8")
try:
rubrique = item.select("a.slug")[0].text.strip().encode("utf8")
except:
rubrique = "Undefined"
if rubrique == "Portrait" or rubrique == "portrait":
counter = counter + 1
f.write("CREATE\n")
f.write('LAST\tLfr\t"%s"\n' % titre)
f.write('LAST\tDfr\t"Portrait paru dans Libération"\n')
f.write("LAST\tP31\tQ5707594\n") # Nature : article de presse
f.write("LAST\tP361\tQ30091381\n") # Partie de : portrait de Libération
f.write("LAST\tP407\tQ150\n") # Langue : français
f.write("LAST\tP1433\tQ13717\n") # Publié dans : Libération
f.write('LAST\tP953\t"%s"\n' % url) # Texte intégral disponible à
f.write('LAST\tP1476\tfr:"%s"\n' % titre) # Texte intégral disponible à
dateFormatted = re.sub(r"^(\d{2})\.(\d{2})\.(\d{2})$", r'+20\3-\2-\1T00:00:00Z/11', date)
f.write('LAST\tP577\t%s\n' % dateFormatted) # Texte intégral disponible à
f.write("\n")
print "%s - %s" % (counter, date)
else:
print "%s non traité" % rubrique
| StarcoderdataPython |
1777689 | <filename>app/api/tests/test_friend_request_api.py
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
from .utils import create_user, create_friend_request
from core.models import FriendRequest
CREATE_FRIEND_REQUEST_URL = reverse('api:friend_request_create')
LIST_FRIEND_REQUEST_URL = reverse('api:friend_request_list')
MANAGE_FRIEND_REQUEST_URL = 'api:friend_request_manage'
class TestPublicFriendRequestAPI(TestCase):
"""Tests for the public API for the FriendRequest model"""
def setUp(self) -> None:
"""Sets up the APIClient for the tests and creates users"""
self.user_one = create_user(
email='<EMAIL>',
password='<PASSWORD>',
username='test_username_one',
)
self.user_two = create_user(
email='<EMAIL>',
password='<PASSWORD>',
username='test_username_two',
)
self.client = APIClient()
def test_create_friend_request_unauthorized(self) -> None:
"""
Tests what happens if a FriendRequest is created by an anonymous User
"""
payload = {'to_user': self.user_one, 'from_user': self.user_two}
response = self.client.post(CREATE_FRIEND_REQUEST_URL, payload)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_list_friend_request_unauthorized(self) -> None:
"""
Tests what happens if a FriendRequest is listed by an anonymous User
"""
response = self.client.get(LIST_FRIEND_REQUEST_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_manage_friend_request_unauthorized(self) -> None:
"""
Tests what happens if a FriendRequest is managed by an anonymous User
"""
data = {'to_user': self.user_one, 'from_user': self.user_two}
friend_request = create_friend_request(**data)
response = self.client.get(
reverse(
MANAGE_FRIEND_REQUEST_URL, kwargs={'pk': friend_request.pk}
)
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class TestFriendRequestPrivateAPI(TestCase):
"""Tests for the private API for the FriendRequest model"""
def setUp(self) -> None:
"""Creates client and users for the tests"""
self.user_one = create_user(
email='<EMAIL>',
password='<PASSWORD>',
username='test_username_one',
)
self.user_two = create_user(
email='<EMAIL>',
password='<PASSWORD>',
username='test_username_two',
)
self.client = APIClient()
self.client.force_authenticate(self.user_one)
def test_create_friend_request_successfully(self) -> None:
"""Tests if a FriendRequest is created successfully"""
payload = {
'crypto_key': self.user_two.crypto_key,
'from_user': self.user_one.pk,
}
response = self.client.post(CREATE_FRIEND_REQUEST_URL, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_friend_request_already_exists_sent_by_user(self) -> None:
"""
Tests if a FriendRequest isn't created when the
same one has already been sent by User
"""
create_friend_request(to_user=self.user_two, from_user=self.user_one)
payload = {
'crypto_key': self.user_two.crypto_key,
'from_user': self.user_one.pk,
}
response = self.client.post(CREATE_FRIEND_REQUEST_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_friend_request_already_exists_sent_to_user(self) -> None:
"""
Tests if a FriendRequest isn't created when the
same one has already been sent to User
"""
create_friend_request(to_user=self.user_two, from_user=self.user_one)
payload = {
'crypto_key': self.user_one.crypto_key,
'from_user': self.user_two.pk,
}
response = self.client.post(CREATE_FRIEND_REQUEST_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_friend_request_no_crypto_key_provided(self) -> None:
"""
Tests if a FriendRequest isn't created when 'crypto_key' isn't provided
"""
payload = {'from_user': self.user_one.pk}
response = self.client.post(CREATE_FRIEND_REQUEST_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn(b'No \'crypto_key\' provided', response.content)
def test_list_friend_request_successfully(self) -> None:
"""Tests if a FriendRequest is listed successfully"""
data = {'to_user': self.user_one, 'from_user': self.user_two}
create_friend_request(**data)
response = self.client.get(LIST_FRIEND_REQUEST_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
def test_manage_friend_request_does_not_exist(self) -> None:
"""
Tests what happens when User tries to
access FriendRequest, which doesn't exist
"""
response = self.client.get(
reverse(MANAGE_FRIEND_REQUEST_URL, kwargs={'pk': 1})
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_manage_friend_request_forbidden(self) -> None:
"""
Tests what happens when User tries to
access FriendRequest, which wasn't meant for them
"""
data = {'to_user': self.user_two, 'from_user': self.user_one}
friend_request = create_friend_request(**data)
response = self.client.get(
reverse(
MANAGE_FRIEND_REQUEST_URL, kwargs={'pk': friend_request.pk}
)
)
message = b'You don\'t have permission to manage this FriendRequest'
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertIn(message, response.content)
def test_retrieve_friend_request_successfully(self) -> None:
"""Tests if a FriendRequest is retrieved successfully"""
data = {'to_user': self.user_one, 'from_user': self.user_two}
friend_request = create_friend_request(**data)
response = self.client.get(
reverse(
MANAGE_FRIEND_REQUEST_URL, kwargs={'pk': friend_request.pk}
)
)
expected_data = {
'from_user': self.user_two.pk,
'to_user': self.user_one.pk,
'is_new': True,
'is_accepted': False,
}
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, expected_data)
def test_update_friend_request_successfully(self) -> None:
"""Tests if a FriendRequest is updated successfully"""
data = {'to_user': self.user_one, 'from_user': self.user_two}
friend_request = create_friend_request(**data)
response = self.client.patch(
reverse(
MANAGE_FRIEND_REQUEST_URL, kwargs={'pk': friend_request.pk}
),
{'is_new': False, 'is_accepted': True},
)
friend_request.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertFalse(friend_request.is_new)
self.assertTrue(friend_request.is_accepted)
def test_delete_friend_request_successfully(self) -> None:
"""Tests if a FriendRequest is deleted successfully"""
data = {'to_user': self.user_one, 'from_user': self.user_two}
friend_request = create_friend_request(**data)
response = self.client.delete(
reverse(
MANAGE_FRIEND_REQUEST_URL, kwargs={'pk': friend_request.pk}
)
)
friend_request_exists = FriendRequest.objects.filter(
pk=friend_request.pk
).exists()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(friend_request_exists)
| StarcoderdataPython |
1972565 | from time import sleep
from datetime import datetime
import os
import requests
def post_to_discord(msg):
from discord_endpoint import discord_endpoint
data = { "content": msg}
requests.post(discord_endpoint, json = data)
bIsBuilding = False
while 1:
PROCNAME = "UnrealLightmass.exe"
output = os.popen('tasklist /FI "IMAGENAME eq ' + PROCNAME + '"').read()
# For Mac: output = os.popen('ps -ax | grep "' + PROCNAME + '"').read()
if not bIsBuilding and (len(output.split('\n'))>3):
bIsBuilding = True
post_to_discord("Your process started at {}. I am watching it for you!".format(datetime.now().strftime("%m/%d/%Y %H:%M:%S")))
elif bIsBuilding and not (len(output.split('\n'))>3):
bIsBuilding = False
post_to_discord("Your process ended at {}. All done!".format(datetime.now().strftime("%m/%d/%Y %H:%M:%S")))
sleep(1)
| StarcoderdataPython |
3579366 | <filename>collector-agent/CollectorAgent/TCGenerator.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# ------------------------------------------------------------------------------
# Copyright 2020. NAVER Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
# keyWord ={
# 1:'agentId',
# 2:'applicationName',
# 3:'agentStartTime',
# 4:'transactionId',
# 5:'spanId',
# 6:'parentSpanId',
# 7:'spanId',
# 8:'parentSpanId ',
# 9:'startTime',
# 10:'elapsed ',
# 11:'rpc',
# 12:'serviceType',
# 13:'endPoint',
# 14:'remoteAddr',
# 15:'annotations',
# 16:'flag',
# 17:'err',
# 18:'spanEventList',
# 19:'parentApplicationName',
# 20:'parentApplicationType',
# 21:'acceptorHost',
# 25:'apiId',
# 26:'exceptionInfo',
# 30:'pplicationServiceType',
# 31:'loggingTransactionInfo',
# 32:'key',
# 33:'value'
# }
# from Intercept import interceptManger
class TCGenerator(object):
@staticmethod
def fetchIfExist(jMap, name):
'''
:param dict jMap:
:param name:
:return:
'''
if jMap.has_key(name):
return jMap[name]
return None
@staticmethod
def __transactionIDHelper(interManger, id):
'''
:param InterceptManager interManger:
:param int id:
:return:
'''
# return str(interManger.ac.AgentID)+'^'+str(interManger.startTimeStamp)+'^'+str(id)
class ThriftProtocolUtil(object):
@staticmethod
def _parseStrField(str):
'''
:param string str:
:return dict:
'''
ret = {}
for item in str.split():
key, value = item.split(sep='=')
ret[key] = value
return ret
@staticmethod
def _parseDotFormat(time):
'''
:param string time:
:return:
'''
sec, ms = time.split(sep='.')
return int(sec) * 1000 + int(ms)
| StarcoderdataPython |
11284956 | from django.db import models
class Edge(models.Model):
"""
Django model to hold Edge information
"""
# Now parse the relevant CSV file and bulk add the data
exclude_smiles = models.CharField(max_length=200)
exclude_type = models.CharField(max_length=200)
rebuilt_smiles = models.CharField(max_length=200)
rebuilt_ring_smiles = models.CharField(max_length=200)
rebuilt_type = models.CharField(max_length=200)
excluded_ring_smiles = models.CharField(max_length=200)
# The nodes
node_from = models.ForeignKey(Nodes)
node_to = models.ForeignKey(Nodes)
# Define this column as unique
unique_together = (("exclude_smiles", "rebuilt_smiles"),)
class Nodes(models.Model):
"""
Django model to hold Node information
"""
smiles = models.CharField(unique=True,max_length=200)
heavy_atom_count = models.IntegerField()
ring_atom_count = models.IntegerField()
ring_smiles = models.CharField(max_length=200)
# Then add the annotations
price = models.IntegerField()
mol_type = models.CharField(max_length=200)
| StarcoderdataPython |
1740244 | <filename>python/GafferRenderManUI/RenderManRenderUI.py
##########################################################################
#
# Copyright (c) 2012-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferRenderMan
Gaffer.Metadata.registerNode(
GafferRenderMan.RenderManRender,
"description",
"""
Performs offline batch rendering using a
RenderMan renderer. This is done in two
phases - first a RIB file is generated and
then the renderer is invoked to render it in
a separate process. Note though that the RIB
file is lightweight, and contains a single
procedural which will invoke Gaffer to generate
the scene on demand at runtime. The RIB therefore
requires very little disk space.
""",
plugs = {
"mode" : [
"description",
"""
When in "Render" mode, a RIB file is generated
and then renderered by running the renderer on
it. In "Generate RIB only" mode, only the RIB
is generated, and a subsequent node could be used
to post-process or launch the render in another
way - a SystemCommand node may be useful for this.
""",
"preset:Render", "render",
"preset:Generate RIB only", "generate",
"nodule:type", "",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"ribFileName" : [
"description",
"""
The name of the RIB file to be generated.
""",
"nodule:type", "",
"plugValueWidget:type", "GafferUI.FileSystemPathPlugValueWidget",
"path:leaf", True,
"path:bookmarks", "rib",
"fileSystemPath:extensions", "rib",
],
"command" : [
"description",
"""
The system command used to invoke the renderer - this
can be edited to add any custom flags that are necessary,
or to use a different renderer. The rib filename is
automatically appended to the command before it is invoked.
""",
"nodule:type", "",
],
},
)
| StarcoderdataPython |
351753 | <reponame>easy-electrophysiology/load-heka-python<filename>run_test_load_heka.py<gh_stars>0
from test import test_load_heka
from os.path import join, basename, dirname
import glob
def test_file(test_path, dp_thr=1e-6, info_type="max_dp_match", assert_mode=False):
"""
Convenience function around test_load_heka.test_heka_reader()
To test your file, first export the data from Patchmaster. Make sure stimulus is on (Tweak > Export > tick 'Stimulus'),
Zero offset is subtracted and display filter is off (see README.md for more details).
Export the series you want to test under the filename (and in the same directory as the test file):
e.g. if your filename is test_file.dat
./.../test_file/test_file.dat
test_file_group-1_series-1.asc
test_file_group-1_series-2.asc ...
"""
base_path = dirname(test_path)
version = basename(test_path)
ascii_files = glob.glob(join(test_path, "*_group-?_series-?.asc"))
filenames = list(map(basename, ascii_files))
if len(ascii_files) == 0:
raise Exception("no files found in " + base_path)
group_series_to_test = [[filename.split("group-")[1][0], filename.split("series-")[1][0]] for filename in filenames]
test_load_heka.test_heka_reader(base_path,
version,
group_series_to_test,
dp_thr,
info_type,
assert_mode)
| StarcoderdataPython |
12810729 | # Title: 피보나치 수 5
# Link: https://www.acmicpc.net/problem/10870
import sys
sys.setrecursionlimit(10 ** 6)
read_single_int = lambda: int(sys.stdin.readline().strip())
def get_fibonacci(n: int):
if n == 0:
return 0
if n == 1:
return 1
return get_fibonacci(n-1) + get_fibonacci(n-2)
def solution(n: int):
return get_fibonacci(n)
def main():
n = read_single_int()
print(solution(n))
if __name__ == '__main__':
main() | StarcoderdataPython |
3270564 | <filename>src/aoc21/log.py
from logging import getLogger, INFO, Formatter, StreamHandler, WARNING
from sys import stderr
def setup_logging(verbosity: int):
logger = getLogger("aoc21")
logger.setLevel(INFO if verbosity > 0 else WARNING)
inner_logger = getLogger("aoc21.days")
inner_logger.setLevel(INFO if verbosity > 1 else WARNING)
handler = StreamHandler(stderr)
formatter = Formatter("%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
| StarcoderdataPython |
9775081 | <filename>api/src/app/database/models.py
from sqlalchemy import Boolean, Column, Table, ForeignKey, Integer, BigInteger, String, DateTime
from sqlalchemy.orm import relationship
from sqlalchemy_utils import UUIDType
from app.database.base import Base
from datetime import datetime
import uuid
follow_table = Table('follows', Base.metadata,
Column('followee_id', UUIDType(binary=False),
ForeignKey('users.id')),
Column('follower_id', UUIDType(binary=False),
ForeignKey('users.id')),
)
user_upvotes = Table('user_upvotes', Base.metadata,
Column('user_id', UUIDType(binary=False),
ForeignKey('users.id')),
Column('post_id', UUIDType(binary=False),
ForeignKey('posts.id')),
)
user_downvotes = Table('user_downvotes', Base.metadata,
Column('user_id', UUIDType(binary=False),
ForeignKey('users.id')),
Column('post_id', UUIDType(binary=False),
ForeignKey('posts.id')),
)
class User(Base):
__tablename__ = 'users'
id = Column(UUIDType(binary=False), primary_key=True,
default=uuid.uuid4, index=True, unique=True)
username = Column(String(length=32), index=True)
avater_url = Column(String(length=256))
banned = Column(Boolean, default=False)
posts = relationship("Post", back_populates="owner")
token = relationship("Token", back_populates="owner")
upvoted_posts = relationship(
"Post",
lambda: user_upvotes,
primaryjoin=lambda: User.id == user_upvotes.c.post_id,
secondaryjoin=lambda: User.id == user_upvotes.c.user_id,
back_populates="upvotes")
downvoted_posts = relationship(
"Post",
lambda: user_downvotes,
primaryjoin=lambda: User.id == user_downvotes.c.post_id,
secondaryjoin=lambda: User.id == user_downvotes.c.user_id,
back_populates="downvotes")
followee = relationship(
"User",
lambda: follow_table,
primaryjoin=lambda: User.id == follow_table.c.follower_id,
secondaryjoin=lambda: User.id == follow_table.c.followee_id,
backref="follower"
)
class Post(Base):
__tablename__ = 'posts'
id = Column(UUIDType(binary=False), primary_key=True,
default=uuid.uuid4, index=True)
title = Column(String(length=32))
description = Column(String(length=280))
main = Column(String(length=4000))
stdout = Column(String(length=3000))
stderr = Column(String(length=3000))
exitcode = Column(String(length=64))
post_at = Column(DateTime, default=datetime.now)
owner_id = Column(UUIDType(binary=False), ForeignKey("users.id"))
owner = relationship("User", back_populates="posts")
posted_images = relationship("PostedImage", back_populates="post")
generated_images = relationship("GeneratedImage", back_populates="post")
upvotes = relationship(
"User",
secondary=user_upvotes,
back_populates="upvoted_posts"
)
downvotes = relationship(
"User",
secondary=user_downvotes,
back_populates="downvoted_posts"
)
class PostedImage(Base):
__tablename__ = 'posted_images'
id = Column(UUIDType(binary=False), primary_key=True,
default=uuid.uuid4, index=True)
url = Column(String(length=256))
post_id = Column(UUIDType(binary=False), ForeignKey(
"posts.id", ondelete="CASCADE"))
post = relationship("Post", back_populates="posted_images")
class GeneratedImage(Base):
__tablename__ = 'generated_images'
id = Column(UUIDType(binary=False), primary_key=True,
default=uuid.uuid4, index=True)
url = Column(String(length=256))
post_id = Column(UUIDType(binary=False), ForeignKey(
"posts.id", ondelete="CASCADE"))
post = relationship("Post", back_populates="generated_images")
class Token(Base):
__tablename__ = 'tokens'
social_id = Column(BigInteger, primary_key=True, index=True, unique=True)
refresh_token = Column(String)
access_token_expire_at = Column(DateTime)
refresh_token_expire_at = Column(DateTime)
owner_id = Column(UUIDType(binary=False), ForeignKey('users.id'))
owner = relationship("User", back_populates="token")
| StarcoderdataPython |
1729878 | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" The interface for workbench views. """
# Standard library imports.
import logging
# Enthought library imports.
from pyface.api import ImageResource
from traits.api import Bool, Enum, Float, Instance, List, provides, Str
from traits.util.camel_case import camel_case_to_words
# Local imports.
from .i_perspective_item import IPerspectiveItem
from .i_workbench_part import IWorkbenchPart, MWorkbenchPart
from .perspective_item import PerspectiveItem
# Logging.
logger = logging.getLogger(__name__)
class IView(IWorkbenchPart, IPerspectiveItem):
""" The interface for workbench views. """
# Is the view busy? (i.e., should the busy cursor (often an hourglass) be
# displayed?).
busy = Bool(False)
# The category that the view belongs to (this can used to group views when
# they are displayed to the user).
category = Str('General')
# An image used to represent the view to the user (shown in the view tab
# and in the view chooser etc).
image = Instance(ImageResource)
# Whether the view is visible or not.
visible = Bool(False)
###########################################################################
# 'IView' interface.
###########################################################################
def activate(self):
""" Activate the view.
"""
def hide(self):
""" Hide the view.
"""
def show(self):
""" Show the view.
"""
@provides(IView)
class MView(MWorkbenchPart, PerspectiveItem):
""" Mixin containing common code for toolkit-specific implementations. """
#### 'IView' interface ####################################################
# Is the view busy? (i.e., should the busy cursor (often an hourglass) be
# displayed?).
busy = Bool(False)
# The category that the view belongs to (this can be used to group views
# when they are displayed to the user).
category = Str('General')
# An image used to represent the view to the user (shown in the view tab
# and in the view chooser etc).
image = Instance(ImageResource)
# Whether the view is visible or not.
visible = Bool(False)
###########################################################################
# 'IWorkbenchPart' interface.
###########################################################################
def _id_default(self):
""" Trait initializer. """
id = '%s.%s' % (type(self).__module__, type(self).__name__)
logger.warn('view %s has no Id - using <%s>' % (self, id))
# If no Id is specified then use the name.
return id
def _name_default(self):
""" Trait initializer. """
name = camel_case_to_words(type(self).__name__)
logger.warn('view %s has no name - using <%s>' % (self, name))
return name
###########################################################################
# 'IView' interface.
###########################################################################
def activate(self):
""" Activate the view.
"""
self.window.activate_view(self)
return
def hide(self):
""" Hide the view. """
self.window.hide_view(self)
return
def show(self):
""" Show the view. """
self.window.show_view(self)
return
#### EOF ######################################################################
| StarcoderdataPython |
165222 | <filename>craft2onnx.py<gh_stars>1-10
import io
import numpy as np
from torch import nn
import torch.utils.model_zoo as model_zoo
import torch.onnx
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.init as init
from craft import CRAFT
from collections import OrderedDict
#from torch2trt import torch2trt
import imgproc
import cv2
#import tensorrt as trt
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
net = CRAFT()
net.load_state_dict(copyStateDict(torch.load("weights/craft_mlt_25k.pth")))
#net.load_state_dict(torch.load("weights/craft_mlt_25k.pth"))
#image = imgproc.loadImage('./fig8/Pic_2020_10_06_231541_blockId#38.jpg')
image = imgproc.loadImage('20210507031315.jpg')
#img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, 526, interpolation=cv2.INTER_LINEAR, mag_ratio=1)
#img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, 2140, interpolation=cv2.INTER_LINEAR, mag_ratio=1)
img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, 1280, interpolation=cv2.INTER_LINEAR, mag_ratio=1)
#img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, 960, interpolation=cv2.INTER_LINEAR, mag_ratio=1.5)
ratio_h = ratio_w = 1 / target_ratio
# preprocessing
x = imgproc.normalizeMeanVariance(img_resized)
x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w]
x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w]
x = x.cuda()
net = net.cuda()
net = net.eval()
y, feature = net(x)
torch.onnx.export(
net,
x,
"craft_1280.onnx",
export_params=True,
opset_version=11,
verbose=True)
#x = torch.randn(1, 3, 928, 1280).cuda()
#model_trt = torch2trt(net, [x], int8_mode=True, int8_calib_algorithm=trt.CalibrationAlgoType.MINMAX_CALIBRATION)
#model_trt = torch2trt(net, [x], int8_mode=True, int8_calib_algorithm=trt.CalibrationAlgoType.MINMAX_CALIBRATION)
#model_trt = torch2trt(net, [x], int8_mode=True, int8_calib_algorithm=trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2)
#model_trt = torch2trt(net, [x], fp16_mode=True)
#model_trt = torch2trt(net, [x])
#torch.save(model_trt.state_dict(), 'craft_trt_int8_min_max_960_.pth')
# "craft_trt_int8_min_max.onnx",
#torch.onnx.export(
# model_trt,
# x,
# "craft_trt.onnx",
# export_params=True,
# )
#torch.save(model_trt.state_dict(), 'craft_trt_526_fp16.pth')
#torch.save(model_trt.state_dict(), 'craft_trt_2140_int8_minmax.pth')
#torch.save(model_trt.state_dict(), 'craft_trt_fp16.pth')
#net = torch.nn.DataParallel(net)
#net = torch.nn.DataParallel(net)
#cudnn.benchmark = False
| StarcoderdataPython |
1862157 | from functools import lru_cache
class Solution:
def countSubstrings(self, s: str) -> int:
n = len(s)
dp = [[False for _ in range(n)] for _ in range(n)]
ret = 0
for j in range(n):
for i in range(j, -1, -1):
if i == j:
dp[i][j] = True
ret += 1
elif j == i + 1:
dp[i][j] = s[i] == s[j]
if dp[i][j]: ret += 1
else:
if s[i] == s[j] and dp[i + 1][j - 1]:
dp[i][j] = True
ret += 1
return ret
| StarcoderdataPython |
4987920 | import sys
def solve1(groups):
sum = 0
for group in groups:
yes = set()
for ans in group:
for q in ans:
yes.add(q)
sum += len(yes)
return sum
def solve2(groups):
az = [chr(ord('a')+i) for i in range(26)]
sum = 0
for group in groups:
yes = set(az)
for ans in group:
yes &= set([c for c in ans])
sum += len(yes)
return sum
if __name__ == "__main__":
inp = [l.rstrip() for l in sys.stdin]
inp.append('')
groups = []
cur = []
for ans in inp:
if len(ans) == 0:
groups.append(cur)
cur = []
continue
cur.append(ans)
print(solve1(groups))
print(solve2(groups)) | StarcoderdataPython |
11301992 | #!/usr/bin/env python
#
# MIT License
#
# Copyright (c) 2022 GT4SD team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""Run model upload for the GT4SD.
Two steps procedure: check if the folder/model name is already in the database.
If not, upload it.
"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import IO, Iterable, Optional, cast
from ..algorithms.registry import ApplicationsRegistry
from ..training_pipelines import TRAINING_PIPELINE_ARGUMENTS_FOR_MODEL_SAVING
from ..training_pipelines.core import TrainingPipelineArguments
from .algorithms import (
AVAILABLE_ALGORITHMS,
AVAILABLE_ALGORITHMS_CATEGORIES,
filter_algorithm_applications,
get_configuration_tuples,
)
from .argument_parser import ArgumentParser, DataClassType
logger = logging.getLogger(__name__)
SUPPORTED_TRAINING_PIPELINES = sorted(
TRAINING_PIPELINE_ARGUMENTS_FOR_MODEL_SAVING.keys()
)
@dataclass
class SavingArguments:
"""Algorithm saving arguments."""
__name__ = "saving_base_args"
training_pipeline_name: str = field(
metadata={
"help": f"Training pipeline name, supported pipelines: {', '.join(SUPPORTED_TRAINING_PIPELINES)}."
},
)
target_version: str = field(
metadata={"help": "Target algorithm version to save."},
)
algorithm_type: Optional[str] = field(
default=None,
metadata={
"help": f"Inference algorithm type, supported types: {', '.join(AVAILABLE_ALGORITHMS_CATEGORIES['algorithm_type'])}."
},
)
domain: Optional[str] = field(
default=None,
metadata={
"help": f"Domain of the inference algorithm, supported types: {', '.join(AVAILABLE_ALGORITHMS_CATEGORIES['domain'])}."
},
)
algorithm_name: Optional[str] = field(
default=None,
metadata={"help": "Inference algorithm name."},
)
algorithm_application: Optional[str] = field(
default=None,
metadata={"help": "Inference algorithm application."},
)
source_version: Optional[str] = field(
default=None,
metadata={"help": "Source algorithm version to use for missing artifacts."},
)
class SavingArgumentParser(ArgumentParser):
"""Argument parser using a custom help logic."""
def print_help(self, file: Optional[IO[str]] = None) -> None:
"""Print help checking dynamically whether a specific pipeline is passed.
Args:
file: an optional I/O stream. Defaults to None, a.k.a., stdout and stderr.
"""
try:
help_args_set = {"-h", "--help"}
if (
len(set(sys.argv).union(help_args_set)) < len(help_args_set) + 2
): # considering filename
super().print_help()
return
args = [arg for arg in sys.argv if arg not in help_args_set]
parsed_arguments = super().parse_args_into_dataclasses(
args=args, return_remaining_strings=True
)
trainer_arguments = None
for arguments in parsed_arguments:
if arguments.__name__ == "trainer_base_args":
trainer_arguments = arguments
break
if trainer_arguments:
trainer_arguments.training_pipeline_name
training_pipeline_arguments = (
TRAINING_PIPELINE_ARGUMENTS_FOR_MODEL_SAVING.get(
trainer_arguments.training_pipeline_name,
TrainingPipelineArguments,
)
)
parser = ArgumentParser(
tuple(
[SavingArguments, *training_pipeline_arguments] # type:ignore
)
)
parser.print_help()
except Exception:
super().print_help()
def main() -> None:
"""
Run an algorithm saving pipeline.
Raises:
ValueError: in case the provided training pipeline provided is not supported.
"""
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
base_args = SavingArgumentParser(
cast(DataClassType, SavingArguments)
).parse_args_into_dataclasses(return_remaining_strings=True)[0]
training_pipeline_name = base_args.training_pipeline_name
if training_pipeline_name not in set(SUPPORTED_TRAINING_PIPELINES):
ValueError(
f"Training pipeline {training_pipeline_name} is not supported. Supported types: {', '.join(SUPPORTED_TRAINING_PIPELINES)}."
)
training_pipeline_saving_arguments = TRAINING_PIPELINE_ARGUMENTS_FOR_MODEL_SAVING[
training_pipeline_name
]
parser = SavingArgumentParser(
cast(
Iterable[DataClassType],
tuple([SavingArguments, training_pipeline_saving_arguments]),
)
)
saving_args, training_pipeline_saving_args, _ = parser.parse_args_into_dataclasses(
return_remaining_strings=True
)
filters = {
key: saving_args.__dict__[key]
for key in [
"algorithm_type",
"algorithm_application",
"domain",
"algorithm_name",
"source_version",
]
}
configuration_tuples = get_configuration_tuples(
filter_algorithm_applications(algorithms=AVAILABLE_ALGORITHMS, filters=filters)
)
# too many configurations compatible
if len(configuration_tuples) > 1:
logger.info(
f"Multiple configurations matching the parameters:{os.linesep}"
f"{os.linesep.join(map(str, configuration_tuples))}{os.linesep}"
f"Select one by specifying additional algorithms parameters: {','.join('--' + key for key, value in filters.items() if not value)}.",
)
return
# no configurations compatible
elif len(configuration_tuples) < 1:
provided_filters = {key: value for key, value in filters.items() if value}
logger.error(
"No configurations matching the provided parameters, "
f"please review the supported configurations:{os.linesep}"
f"{os.linesep.join(map(str, configuration_tuples))}{os.linesep}"
f"Please review the parameters provided:{os.linesep}"
f"{provided_filters}"
)
configuration_tuple = configuration_tuples[0]
logger.info(f"Selected configuration: {configuration_tuple}")
algorithm_application = ApplicationsRegistry.applications[configuration_tuple]
configuration_class = algorithm_application.configuration_class
logger.info(
f'Saving model version "{saving_args.target_version}" with the following configuration: {configuration_class}'
)
# implement upload method using minio and s3.py
# core.py and configuration.py
configuration_class.upload_version_from_training_pipeline_arguments(
training_pipeline_arguments=training_pipeline_saving_args,
target_version=saving_args.target_version,
source_version=saving_args.source_version,
)
if __name__ == "__main__":
main()
| StarcoderdataPython |
4801903 | #!/usr/bin/env python3
tests = [
# RMW Hz, runs
('f', 80, 10),
('c', 80, 10),
('f', 100, 10),
('c', 100, 10),
('f', 120, 10),
('c', 120, 10),
]
rmw_names = {'f': 'FastRTPS', 'c': 'CycloneDDS'}
rmw_colors = {
'f': [
'#0000ff',
'#0000ef',
'#0000df',
'#0000cf',
'#0000bf',
'#0000af',
'#00009f',
'#00008f',
'#00007f',
'#00006f',
],
'c': [
'#00ff00',
'#00ef00',
'#00df00',
'#00cf00',
'#00bf00',
'#00af00',
'#009f00',
'#008f00',
'#007f00',
'#006f00',
]}
# data_by_frequency = {}
for rmw, frequency, runs in tests:
rmw_name = rmw_names[rmw]
print(rmw_name, 'with', frequency, 'hz', f'({runs} runs)')
all_runs_sent = []
all_runs_received = []
for run in range(1, runs + 1):
print(' run', run, '-', 'sent', 'vs', 'received')
pub_file = f'{rmw}-p-{frequency}-{run}.txt'
sub_file = f'{rmw}-s-{frequency}-{run}.txt'
with open(pub_file, 'r') as h:
pub_lines = h.read().splitlines()
assert 'Topic name: Array60k' in pub_lines
assert f'Publishing rate: {frequency}' in pub_lines
assert 'Maximum runtime (sec): 30' in pub_lines
assert 'Number of publishers: 1' in pub_lines
assert 'Number of subscribers: 0' in pub_lines
with open(sub_file, 'r') as h:
sub_lines = h.read().splitlines()
assert 'Topic name: Array60k' in sub_lines
assert f'Publishing rate: {frequency}' in sub_lines
assert 'Maximum runtime (sec): 30' in sub_lines
assert 'Number of publishers: 0' in sub_lines
assert 'Number of subscribers: 1' in sub_lines
assert pub_lines[19].startswith('T_experiment,')
assert pub_lines[49] == 'Maximum runtime reached. Exiting.'
assert sub_lines[19].startswith('T_experiment,')
assert sub_lines[49] == 'Maximum runtime reached. Exiting.'
run_sent = []
run_received = []
for i in range(20, 49):
pub_cols = pub_lines[i].split(',\t')
sub_cols = sub_lines[i].split(',\t')
# print(pub_cols[0], sub_cols[0])
assert pub_cols[0].startswith('%d.' % (i - 18))
assert sub_cols[0].startswith('%d.' % (i - 18))
sent = pub_cols[3].strip()
received = sub_cols[2].strip()
print(' ', sent, received)
run_sent.append(int(sent))
run_received.append(int(received))
all_runs_sent.append(run_sent)
all_runs_received.append(run_received)
import pandas
data = {}
for run in range(1, runs + 1):
# data[f'Sent by Publisher #{run}'] = all_runs_sent[run - 1]
# data[f'Received by Subscription #{run}'] = all_runs_received[run - 1]
data[f'Run #{run}'] = all_runs_received[run - 1]
tdf = pandas.DataFrame(data)
tdf.index += 1 # Index from 1, since the index is really time in seconds
ax = tdf.plot(kind='line', colors=rmw_colors[rmw])
ax.set_title(
f'Array60k @ {frequency} Hz - 1 to 1 Pub/Sub across wifi\n'
f'{rmw_name}, reliable, volatile, keep_last@10')
ax.set_xlabel('Time in Seconds')
ax.set_ylabel('Number of Messages')
ax.get_figure().savefig(f'{rmw}-{frequency}.png', bbox_inches='tight')
print()
| StarcoderdataPython |
6425430 | from flask import Flask
from app import app
from flask import render_template
from flask import request
from flask import url_for
@app.route('/')
def home():
create_link = "<a href= '"+url_for('create')+"'> <h2>Create a Question </h2></a>"
return """
<html>
<head>
<title> HomePage </title>
<body> """+ create_link+ """
</body>
</head>
</html>
"""
@app.route('/create')
def create(method=['GET','POST']):
if request.method == 'GET':
return render_template('CreateQuestion.html')
elif request.method == 'POST'
title = request.form['title']
question = request.form['question']
answer = request.form['answer']
else:
return """
<h3> Invalid URL </h3>
"""
| StarcoderdataPython |
9767377 | #!/usr/bin/env python
import numpy as np
import unittest
import ray
from ray.rllib.agents.registry import get_agent_class
from ray.rllib.utils.test_utils import framework_iterator
def get_mean_action(alg, obs):
out = []
for _ in range(2000):
out.append(float(alg.compute_action(obs)))
return np.mean(out)
CONFIGS = {
"A3C": {
"explore": False,
"num_workers": 1,
},
"APEX_DDPG": {
"explore": False,
"observation_filter": "MeanStdFilter",
"num_workers": 2,
"min_iter_time_s": 1,
"optimizer": {
"num_replay_buffer_shards": 1,
},
},
"ARS": {
"explore": False,
"num_rollouts": 10,
"num_workers": 2,
"noise_size": 2500000,
"observation_filter": "MeanStdFilter",
},
"DDPG": {
"explore": False,
"timesteps_per_iteration": 100,
},
"DQN": {
"explore": False,
},
"ES": {
"explore": False,
"episodes_per_batch": 10,
"train_batch_size": 100,
"num_workers": 2,
"noise_size": 2500000,
"observation_filter": "MeanStdFilter",
},
"PPO": {
"explore": False,
"num_sgd_iter": 5,
"train_batch_size": 1000,
"num_workers": 2,
},
"SAC": {
"explore": False,
},
}
def ckpt_restore_test(use_object_store, alg_name, failures, framework="tf"):
cls = get_agent_class(alg_name)
config = CONFIGS[alg_name]
config["framework"] = framework
if "DDPG" in alg_name or "SAC" in alg_name:
alg1 = cls(config=config, env="Pendulum-v0")
alg2 = cls(config=config, env="Pendulum-v0")
else:
alg1 = cls(config=config, env="CartPole-v0")
alg2 = cls(config=config, env="CartPole-v0")
policy1 = alg1.get_policy()
for _ in range(1):
res = alg1.train()
print("current status: " + str(res))
# Sync the models
if use_object_store:
alg2.restore_from_object(alg1.save_to_object())
else:
alg2.restore(alg1.save())
for _ in range(1):
if "DDPG" in alg_name or "SAC" in alg_name:
obs = np.clip(
np.random.uniform(size=3),
policy1.observation_space.low,
policy1.observation_space.high)
else:
obs = np.clip(
np.random.uniform(size=4),
policy1.observation_space.low,
policy1.observation_space.high)
a1 = get_mean_action(alg1, obs)
a2 = get_mean_action(alg2, obs)
print("Checking computed actions", alg1, obs, a1, a2)
if abs(a1 - a2) > .1:
failures.append((alg_name, [a1, a2]))
class TestCheckpointRestore(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(num_cpus=10, object_store_memory=1e9)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_checkpoint_restore(self):
failures = []
for fw in framework_iterator(frameworks=("tf", "torch")):
for use_object_store in [False, True]:
for name in [
"A3C", "APEX_DDPG", "ARS", "DDPG", "DQN", "ES", "PPO",
"SAC"
]:
print("Testing algo={} (use_object_store={})".format(
name, use_object_store))
ckpt_restore_test(
use_object_store, name, failures, framework=fw)
assert not failures, failures
print("All checkpoint restore tests passed!")
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| StarcoderdataPython |
9678808 | """
PDF highlighter module
"""
import re
import sys
from pdfminer.high_level import extract_pages
from pdfminer.layout import LAParams, LTTextBox, LTTextLine
from pdf_annotate import PdfAnnotator, Location, Appearance
from . import base
class Highlighter(base.Highlighter):
"""
Finds text and adds annotations to PDF files.
"""
def highlight(self, infile, outfile, highlights):
annotations = []
for page, layout in enumerate(extract_pages(infile, laparams=LAParams(line_margin=1.0, char_margin=4.0))):
elements = []
# Extract elements
self.extract(elements, layout)
# Get formatted page text
text = self.text(elements)
for name, query in highlights:
result = self.search(query, text)
if result:
# Unpack start/end line numbers
start, end = result
# Colors index
index = len(annotations) % len(base.COLORS)
# Detect if annotation needs to cover multiple columns
if elements[start][0][1] < elements[end][0][1]:
eindex = start
# Get last element in first column
while eindex < end:
if elements[eindex][0][1] < elements[end][0][1]:
eindex += 1
else:
break
# Create annotation for each column
annotations.append((name, base.COLORS[index], page) + self.layout(elements[start:eindex]))
annotations.append((name, base.COLORS[index], page) + self.layout(elements[eindex:end+1]))
else:
# Single column annotation
annotations.append((name, base.COLORS[index], page) + self.layout(elements[start:end+1]))
self.annotate(annotations, infile, outfile)
return annotations
def extract(self, elements, layout):
"""
Extracts text lines and associated coordinates.
Args:
elements: list that stores extracted elements
layout: input layout elements to process
"""
# loop over the object list
for obj in layout:
if isinstance(obj, LTTextLine):
# Get text instance
text = obj.get_text()
# Clean common ligatures and unicode chars
pairs = [("ff", "ff"), ("ffi", "ffi"), ("fi", "fi"), ("fl", "fl"), ("\u2010", "-"), ("\u2013", "-")]
for find, replace in pairs:
text = text.replace(find, replace)
# Apply custom formatting of text
if self.formatter:
text = self.formatter(text)
# Add newline back to end of lines in case formatter removed them
if not text.endswith("\n"):
text += "\n"
if text:
elements.append((obj.bbox, text))
# Recursively process text boxes and figures
if isinstance(obj, LTTextBox):
self.extract(elements, obj)
def text(self, elements):
"""
Concats all text in elements into a single string for searching.
Args:
element: list of ((coordinates), text)
Returns:
text string
"""
for x, (_, t) in enumerate(elements):
if " " in t and t.endswith("-\n") and len(elements) > x + 1:
# When text is hyphenated, join word back and move to next line
t, last = t.rsplit(" ", 1)
t = t + "\n"
last = last.replace("-\n", "")
elements[x] = (elements[x][0], t)
elements[x + 1] = (elements[x + 1][0], last + elements[x + 1][1])
return "".join([t for _, t in elements])
def search(self, query, text):
"""
Searches a text string using input query.
Args:
query: query expression
text: text string to search
Returns:
(start, end) indices of matching elements
"""
# Matching indices
start, end = sys.maxsize, -1
if self.formatter:
query = self.formatter(query)
if self.chunks > 0:
# Chunk into subqueries, require at least 50 chars per chunk
n = max(int(len(query) / self.chunks), 50)
subqueries = [query[x:x+n] for x in range(0, len(query), n)]
# Ensure last chunk is n chars or bigger
if len(subqueries) > 1 and len(subqueries[-1]) < n:
subqueries[-2] += subqueries[-1]
subqueries = subqueries[:-1]
else:
subqueries = [query]
for subquery in subqueries:
# Allow any whitespace. Handles newlines.
subquery = subquery.replace(r"\ ", r"\s").replace(r" ", r"\s")
if self.chunks > 0:
# With chunks enabled, allow optional whitespace after each char. Handles newlines.
subquery = "".join([q + r"\s?" for q in subquery])
# Search text for matching string, count newlines to get matching line indices
match = re.search(subquery, text)
if match:
# Get start index, only store min start across subqueries
start = min(start, text.count("\n", 0, match.start()))
# Get end index, adjust if ends with newline
# Only store max end across subqueries
mend = text.count("\n", 0, match.end())
if match.group().endswith("\n"):
mend = max(start, mend - 1)
end = max(end, mend)
return (start, end) if end != -1 else None
def layout(self, elements):
"""
Builds a bounding box for an annotation from a list of elements. This method searches the element list
and finds the left, bottom, right and top coordinates.
Args:
elements: list of ((x1, y1, x2, y2), text)
Returns:
(left, bottom, right, top) coordinates
"""
left = min([element[0][0] for element in elements])
bottom = min([element[0][1] for element in elements])
right = max([element[0][2] for element in elements])
top = max([element[0][3] for element in elements])
return (left, bottom, right, top)
def annotate(self, annotations, infile, outfile):
"""
Annotates a file.
Args:
annotations: list of annotations (title, rgb color, page #, x1, y1, x2, y2)
infile: full path to input file
outfile: full path to output file
"""
annotator = PdfAnnotator(infile)
# List of text ranges already defined
ranges = []
for title, rgb, page, x1, y1, x2, y2 in annotations:
# Highlight text
annotator.add_annotation("square", Location(x1=x1, y1=y1, x2=x2, y2=y2, page=page),
Appearance(fill=rgb + (0.3,), stroke_color=rgb + (0.3, ), stroke_width=0))
if title:
# Determine if title text should be in left or right margin
if x1 < 250:
x1, x2 = max(5, x1 - 35), x1
else:
x1, x2 = x2, x2 + 35
# Calculate center of highlight annotation and offset
center = y1 + ((y2 - y1) / 2)
offset = min(max(5, len(title)), 20)
# Set position of text annotation. Handle column layout conflicts.
y1, y2 = self.position(ranges, page, x1 >= 250, center, offset)
# Add title annotation next to highlight
annotator.add_annotation("text", Location(x1=x1, y1=y1, x2=x2, y2=y2, page=page),
Appearance(fill=rgb + (1,), font_size=7, stroke_width=1, content=title))
# Register range
ranges.append((page, 0 if x1 < 250 else 1, y1, y2))
annotator.write(outfile)
def position(self, ranges, page, column, center, offset):
"""
Searches for the closest open range to use for an annotation element.
Args:
ranges: list of existing annotation ranges
page: page to write annotation
column: column to write annotation
center: desired center position of annotation
offset: +/- value to use from center to build layout range
Returns:
y1, y2 open vertical range to use for new annotation
"""
# Initial y1/y2 position
y1, y2 = center - offset, center + offset
# Try initial position
conflicts = self.conflicts(ranges, page, column, y1, y2)
while conflicts:
# Try with negative offset
conflicts = self.conflicts(ranges, page, column, y1 - offset, y2 - offset)
if not conflicts:
y1, y2 = y1 - offset, y2 - offset
else:
# Try with positive offset
conflicts = self.conflicts(ranges, page, column, y1 + offset, y2 + offset)
if not conflicts:
y1, y2 = y1 + offset, y2 + offset
else:
# Increase offset
offset *= 1.5
return y1, y2
def conflicts(self, ranges, page, column, y1, y2):
"""
Tests y1-y2 range for significant range conflicts on current page/column.
Args:
ranges: list of ranges to test
page: current page
column: current column
y1: y start position
y2: y end position
Returns:
True if significant range conflicts exist, False otherwise
"""
for p, c, start, end in ranges:
if page == p and column == c and self.overlaps(start, end, y1, y2) > 5:
return True
return False
def overlaps(self, start1, end1, start2, end2):
"""
Determines if two coordinate sets overlap in range.
Args:
start1: range 1 start
end1: range 1 end
start2: range 2 start
end2: range2 end
Returns:
number of overlapping coordinates
"""
return len(set(range(int(start1), int(end1))) & set(range(int(start2), int(end2))))
| StarcoderdataPython |
11394980 | <gh_stars>1000+
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import numpy as np
from pathlib import Path
from MiniFramework.EnumDef_5_0 import *
from MiniFramework.Optimizer_1_0 import *
class WeightsBias_2_1(object):
def __init__(self, n_input, n_output, init_method, optimizer_name, eta):
self.num_input = n_input
self.num_output = n_output
self.init_method = init_method
self.optimizer_name = optimizer_name
self.eta = eta
self.initial_value_filename = str.format("w_{0}_{1}_{2}_init", self.num_input, self.num_output, self.init_method.name)
def InitializeWeights(self, folder, create_new):
self.folder = folder
if create_new:
self.__CreateNew()
else:
self.__LoadExistingParameters()
# end if
self.__CreateOptimizers()
self.dW = np.zeros(self.W.shape)
self.dB = np.zeros(self.B.shape)
def __CreateNew(self):
self.W, self.B = WeightsBias_2_1.InitialParameters(self.num_input, self.num_output, self.init_method)
self.__SaveInitialValue()
def __LoadExistingParameters(self):
file_name = str.format("{0}\\{1}.npz", self.folder, self.initial_value_filename)
w_file = Path(file_name)
if w_file.exists():
self.__LoadInitialValue()
else:
self.__CreateNew()
# end if
def __CreateOptimizers(self):
self.oW = OptimizerFactory.CreateOptimizer(self.eta, self.optimizer_name)
self.oB = OptimizerFactory.CreateOptimizer(self.eta, self.optimizer_name)
def pre_Update(self):
if self.optimizer == OptimizerName.Nag:
self.W = self.oW1.pre_update(self.W)
self.B = self.oB1.pre_update(self.B)
# end if
def Update(self):
self.W = self.oW.update(self.W, self.dW)
self.B = self.oB.update(self.B, self.dB)
def __SaveInitialValue(self):
file_name = str.format("{0}\\{1}.npz", self.folder, self.initial_value_filename)
np.savez(file_name, weights=self.W, bias=self.B)
def __LoadInitialValue(self):
file_name = str.format("{0}\\{1}.npz", self.folder, self.initial_value_filename)
data = np.load(file_name)
self.W = data["weights"]
self.B = data["bias"]
def SaveResultValue(self, folder, name):
file_name = str.format("{0}\\{1}.npz", folder, name)
np.savez(file_name, weights=self.W, bias=self.B)
print("--------------")
print("W=", self.W)
w_norm_1 = np.sum(abs(self.W))
print("W_norm_1=", w_norm_1)
w_norm_2 = np.linalg.norm(self.W,2)
print("W_norm_2=", w_norm_2)
def LoadResultValue(self, folder, name):
file_name = str.format("{0}\\{1}.npz", folder, name)
data = np.load(file_name)
self.W = data["weights"]
self.B = data["bias"]
@staticmethod
def InitialParameters(num_input, num_output, method):
if method == InitialMethod.Zero:
# zero
W = np.zeros((num_input, num_output))
elif method == InitialMethod.Normal:
# normalize
W = np.random.normal(size=(num_input, num_output))
elif method == InitialMethod.MSRA:
W = np.random.normal(0, np.sqrt(2/num_output), size=(num_input, num_output))
elif method == InitialMethod.Xavier:
# xavier
W = np.random.uniform(-np.sqrt(6/(num_output+num_input)),
np.sqrt(6/(num_output+num_input)),
size=(num_input, num_output))
# end if
B = np.zeros((1, num_output))
return W, B
| StarcoderdataPython |
8078096 | from output.models.ms_data.attribute.att_j018_xsd.att_j018 import (
Doc,
Test,
)
__all__ = [
"Doc",
"Test",
]
| StarcoderdataPython |
11363438 | # Copyright (c) 2019, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
"""
Guidelines for writing new hacking checks
- Use only for openstacksdk specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range O3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the O3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to nova/tests/unit/test_hacking.py
"""
SETUPCLASS_RE = re.compile(r"def setUpClass\(")
def assert_no_setupclass(logical_line):
"""Check for use of setUpClass
O300
"""
if SETUPCLASS_RE.match(logical_line):
yield (0, "O300: setUpClass not allowed")
def factory(register):
register(assert_no_setupclass)
| StarcoderdataPython |
65027 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-20 10:23
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('driver', '0003_auto_20180520_1217'),
]
operations = [
migrations.AlterModelOptions(
name='car',
options={'ordering': ['seat_capacity']},
),
migrations.RenameField(
model_name='car',
old_name='seats_available',
new_name='seat_capacity',
),
]
| StarcoderdataPython |
5106525 | from convokit.model import Corpus, Utterance, Speaker
# test utterances
GRATITUDE = 'I really appreciate that you have done them. Thanks a lot.'
DEFERENCE = 'Nice work so far on your rewrite.'
GREETING = 'Hey, how are you doing these days?'
APOLOGY = 'Sorry to bother you, but I need someone to work on this project.'
PLEASE = 'Could you please elaborate more?'
PLEASE_START = 'Please do not remove warnings.'
BTW = 'By the way, where did you find that picture?'
DIRECT_QN = 'What is your native language?'
DIRECT_START = 'So can you retrieve it or not?'
SUBJUNCTIVE = 'Could you please proofread this article?'
INDICATIVE = 'Can you proofread this article for me?'
HEDGES = 'I suggest we start with the simplest case.'
FACTUALITY = 'In fact, our data does not support this claim.'
GRATITUDE_ZH = "非常感谢您的帮助。"
DEFERENCE_ZH = "干得漂亮!改写得非常好。"
GREETING_ZH = "嗨,你现在有空吗?"
APOLOGY_ZH = "不好意思打扰你了,你有时间帮我修改一下这份草稿吗?"
PLEASE_ZH = "可不可以请您说慢一点?"
PLEASE_START_ZH = "请留意我们的后续通知。"
BTW_ZH = "顺便问一下,你是在哪里看到这些照片的?"
DIRECT_QN_ZH = "哪里出了问题?"
HEDGES_ZH = "应该可以吧。"
FACTUALITY_ZH = "说实话,我也不懂这些。"
def politeness_test_corpus():
speakers = [Speaker(id='alice'), Speaker(id='bob')]
texts = [GRATITUDE, DEFERENCE, GREETING, APOLOGY, PLEASE,
PLEASE_START, BTW, DIRECT_QN, DIRECT_START,
SUBJUNCTIVE, INDICATIVE, HEDGES, FACTUALITY]
utterances = [Utterance(id='0', text=texts[0], speaker=speakers[1], reply_to=None)]
for i, text in enumerate(texts[1:]):
utterances.append(Utterance(id=str(i+1), text=text, speaker=speakers[i%2], reply_to=str(i)))
return Corpus(utterances=utterances)
def politeness_test_zh_corpus():
speakers = [Speaker(id='alice'), Speaker(id='bob')]
texts = [GRATITUDE_ZH, DEFERENCE_ZH, GREETING_ZH, APOLOGY_ZH, PLEASE_ZH, PLEASE_START_ZH, BTW_ZH, DIRECT_QN_ZH , HEDGES_ZH, FACTUALITY_ZH]
utterances = [Utterance(id='0', text=texts[0], speaker=speakers[1], reply_to=None)]
for i, text in enumerate(texts[1:]):
utterances.append(Utterance(id=str(i+1), text=text, speaker=speakers[i%2], reply_to=str(i)))
return Corpus(utterances=utterances)
def parsed_politeness_test_corpus():
corpus = politeness_test_corpus()
parses = {
'0': [{'rt': 2,
'toks': [{'tok': 'I', 'tag': 'PRP', 'dep': 'nsubj', 'up': 2, 'dn': []},
{'tok': 'really', 'tag': 'RB', 'dep': 'advmod', 'up': 2, 'dn': []},
{'tok': 'appreciate', 'tag': 'VBP', 'dep': 'ROOT', 'dn': [0, 1, 6, 8]},
{'tok': 'that', 'tag': 'IN', 'dep': 'mark', 'up': 6, 'dn': []},
{'tok': 'you', 'tag': 'PRP', 'dep': 'nsubj', 'up': 6, 'dn': []},
{'tok': 'have', 'tag': 'VBP', 'dep': 'aux', 'up': 6, 'dn': []},
{'tok': 'done', 'tag': 'VBN', 'dep': 'ccomp', 'up': 2, 'dn': [3, 4, 5, 7]},
{'tok': 'them', 'tag': 'PRP', 'dep': 'dobj', 'up': 6, 'dn': []},
{'tok': '.', 'tag': '.', 'dep': 'punct', 'up': 2, 'dn': []}]},
{'rt': 0,
'toks': [{'tok': 'Thanks', 'tag': 'NNS', 'dep': 'ROOT', 'dn': [2, 3]},
{'tok': 'a', 'tag': 'DT', 'dep': 'det', 'up': 2, 'dn': []},
{'tok': 'lot', 'tag': 'NN', 'dep': 'npadvmod', 'up': 0, 'dn': [1]},
{'tok': '.', 'tag': '.', 'dep': 'punct', 'up': 0, 'dn': []}]}],
'1': [{'rt': 1,
'toks': [{'tok': 'Nice', 'tag': 'JJ', 'dep': 'amod', 'up': 1, 'dn': []},
{'tok': 'work', 'tag': 'NN', 'dep': 'ROOT', 'dn': [0, 3, 4, 7]},
{'tok': 'so', 'tag': 'RB', 'dep': 'advmod', 'up': 3, 'dn': []},
{'tok': 'far', 'tag': 'RB', 'dep': 'advmod', 'up': 1, 'dn': [2]},
{'tok': 'on', 'tag': 'IN', 'dep': 'prep', 'up': 1, 'dn': [6]},
{'tok': 'your', 'tag': 'PRP$', 'dep': 'poss', 'up': 6, 'dn': []},
{'tok': 'rewrite', 'tag': 'NN', 'dep': 'pobj', 'up': 4, 'dn': [5]},
{'tok': '.', 'tag': '.', 'dep': 'punct', 'up': 1, 'dn': []}]}],
'2': [{'rt': 5,
'toks': [{'tok': 'Hey', 'tag': 'UH', 'dep': 'intj', 'up': 5, 'dn': []},
{'tok': ',', 'tag': ',', 'dep': 'punct', 'up': 5, 'dn': []},
{'tok': 'how', 'tag': 'WRB', 'dep': 'advmod', 'up': 5, 'dn': []},
{'tok': 'are', 'tag': 'VBP', 'dep': 'aux', 'up': 5, 'dn': []},
{'tok': 'you', 'tag': 'PRP', 'dep': 'nsubj', 'up': 5, 'dn': []},
{'tok': 'doing', 'tag': 'VBG', 'dep': 'ROOT', 'dn': [0, 1, 2, 3, 4, 7, 8]},
{'tok': 'these', 'tag': 'DT', 'dep': 'det', 'up': 7, 'dn': []},
{'tok': 'days', 'tag': 'NNS', 'dep': 'dobj', 'up': 5, 'dn': [6]},
{'tok': '?', 'tag': '.', 'dep': 'punct', 'up': 5, 'dn': []}]}],
'3': [{'rt': 0,
'toks': [{'tok': 'Sorry', 'tag': 'JJ', 'dep': 'ROOT', 'dn': [2, 4, 5, 7]},
{'tok': 'to', 'tag': 'TO', 'dep': 'aux', 'up': 2, 'dn': []},
{'tok': 'bother', 'tag': 'VB', 'dep': 'xcomp', 'up': 0, 'dn': [1, 3]},
{'tok': 'you', 'tag': 'PRP', 'dep': 'dobj', 'up': 2, 'dn': []},
{'tok': ',', 'tag': ',', 'dep': 'punct', 'up': 0, 'dn': []},
{'tok': 'but', 'tag': 'CC', 'dep': 'cc', 'up': 0, 'dn': []},
{'tok': 'I', 'tag': 'PRP', 'dep': 'nsubj', 'up': 7, 'dn': []},
{'tok': 'need', 'tag': 'VBP', 'dep': 'conj', 'up': 0, 'dn': [6, 8, 14]},
{'tok': 'someone', 'tag': 'NN', 'dep': 'dobj', 'up': 7, 'dn': [10]},
{'tok': 'to', 'tag': 'TO', 'dep': 'aux', 'up': 10, 'dn': []},
{'tok': 'work', 'tag': 'VB', 'dep': 'relcl', 'up': 8, 'dn': [9, 11]},
{'tok': 'on', 'tag': 'IN', 'dep': 'prep', 'up': 10, 'dn': [13]},
{'tok': 'this', 'tag': 'DT', 'dep': 'det', 'up': 13, 'dn': []},
{'tok': 'project', 'tag': 'NN', 'dep': 'pobj', 'up': 11, 'dn': [12]},
{'tok': '.', 'tag': '.', 'dep': 'punct', 'up': 7, 'dn': []}]}],
'4': [{'rt': 3,
'toks': [{'tok': 'Could', 'tag': 'MD', 'dep': 'aux', 'up': 3, 'dn': []},
{'tok': 'you', 'tag': 'PRP', 'dep': 'nsubj', 'up': 3, 'dn': []},
{'tok': 'please', 'tag': 'UH', 'dep': 'intj', 'up': 3, 'dn': []},
{'tok': 'elaborate', 'tag': 'VB', 'dep': 'ROOT', 'dn': [0, 1, 2, 4, 5]},
{'tok': 'more', 'tag': 'RBR', 'dep': 'advmod', 'up': 3, 'dn': []},
{'tok': '?', 'tag': '.', 'dep': 'punct', 'up': 3, 'dn': []}]}],
'5': [{'rt': 3,
'toks': [{'tok': 'Please', 'tag': 'UH', 'dep': 'intj', 'up': 3, 'dn': []},
{'tok': 'do', 'tag': 'VB', 'dep': 'aux', 'up': 3, 'dn': []},
{'tok': 'not', 'tag': 'RB', 'dep': 'neg', 'up': 3, 'dn': []},
{'tok': 'remove', 'tag': 'VB', 'dep': 'ROOT', 'dn': [0, 1, 2, 4, 5]},
{'tok': 'warnings', 'tag': 'NNS', 'dep': 'dobj', 'up': 3, 'dn': []},
{'tok': '.', 'tag': '.', 'dep': 'punct', 'up': 3, 'dn': []}]}],
'6': [{'rt': 7,
'toks': [{'tok': 'By', 'tag': 'IN', 'dep': 'prep', 'up': 7, 'dn': [2]},
{'tok': 'the', 'tag': 'DT', 'dep': 'det', 'up': 2, 'dn': []},
{'tok': 'way', 'tag': 'NN', 'dep': 'pobj', 'up': 0, 'dn': [1]},
{'tok': ',', 'tag': ',', 'dep': 'punct', 'up': 7, 'dn': []},
{'tok': 'where', 'tag': 'WRB', 'dep': 'advmod', 'up': 7, 'dn': []},
{'tok': 'did', 'tag': 'VBD', 'dep': 'aux', 'up': 7, 'dn': []},
{'tok': 'you', 'tag': 'PRP', 'dep': 'nsubj', 'up': 7, 'dn': []},
{'tok': 'find', 'tag': 'VB', 'dep': 'ROOT', 'dn': [0, 3, 4, 5, 6, 9, 10]},
{'tok': 'that', 'tag': 'DT', 'dep': 'det', 'up': 9, 'dn': []},
{'tok': 'picture', 'tag': 'NN', 'dep': 'dobj', 'up': 7, 'dn': [8]},
{'tok': '?', 'tag': '.', 'dep': 'punct', 'up': 7, 'dn': []}]}],
'7': [{'rt': 1,
'toks': [{'tok': 'What', 'tag': 'WP', 'dep': 'attr', 'up': 1, 'dn': []},
{'tok': 'is', 'tag': 'VBZ', 'dep': 'ROOT', 'dn': [0, 4, 5]},
{'tok': 'your', 'tag': 'PRP$', 'dep': 'poss', 'up': 4, 'dn': []},
{'tok': 'native', 'tag': 'JJ', 'dep': 'amod', 'up': 4, 'dn': []},
{'tok': 'language', 'tag': 'NN', 'dep': 'nsubj', 'up': 1, 'dn': [2, 3]},
{'tok': '?', 'tag': '.', 'dep': 'punct', 'up': 1, 'dn': []}]}],
'8': [{'rt': 3,
'toks': [{'tok': 'So', 'tag': 'RB', 'dep': 'advmod', 'up': 3, 'dn': []},
{'tok': 'can', 'tag': 'MD', 'dep': 'aux', 'up': 3, 'dn': []},
{'tok': 'you', 'tag': 'PRP', 'dep': 'nsubj', 'up': 3, 'dn': []},
{'tok': 'retrieve',
'tag': 'VB',
'dep': 'ROOT',
'dn': [0, 1, 2, 4, 5, 6, 7]},
{'tok': 'it', 'tag': 'PRP', 'dep': 'dobj', 'up': 3, 'dn': []},
{'tok': 'or', 'tag': 'CC', 'dep': 'cc', 'up': 3, 'dn': []},
{'tok': 'not', 'tag': 'RB', 'dep': 'conj', 'up': 3, 'dn': []},
{'tok': '?', 'tag': '.', 'dep': 'punct', 'up': 3, 'dn': []}]}],
'9': [{'rt': 3,
'toks': [{'tok': 'Could', 'tag': 'MD', 'dep': 'aux', 'up': 3, 'dn': []},
{'tok': 'you', 'tag': 'PRP', 'dep': 'nsubj', 'up': 3, 'dn': []},
{'tok': 'please', 'tag': 'UH', 'dep': 'intj', 'up': 3, 'dn': []},
{'tok': 'proofread', 'tag': 'VB', 'dep': 'ROOT', 'dn': [0, 1, 2, 5, 6]},
{'tok': 'this', 'tag': 'DT', 'dep': 'det', 'up': 5, 'dn': []},
{'tok': 'article', 'tag': 'NN', 'dep': 'dobj', 'up': 3, 'dn': [4]},
{'tok': '?', 'tag': '.', 'dep': 'punct', 'up': 3, 'dn': []}]}],
'10': [{'rt': 2,
'toks': [{'tok': 'Can', 'tag': 'MD', 'dep': 'aux', 'up': 2, 'dn': []},
{'tok': 'you', 'tag': 'PRP', 'dep': 'nsubj', 'up': 2, 'dn': []},
{'tok': 'proofread', 'tag': 'VB', 'dep': 'ROOT', 'dn': [0, 1, 4, 5, 7]},
{'tok': 'this', 'tag': 'DT', 'dep': 'det', 'up': 4, 'dn': []},
{'tok': 'article', 'tag': 'NN', 'dep': 'dobj', 'up': 2, 'dn': [3]},
{'tok': 'for', 'tag': 'IN', 'dep': 'dative', 'up': 2, 'dn': [6]},
{'tok': 'me', 'tag': 'PRP', 'dep': 'pobj', 'up': 5, 'dn': []},
{'tok': '?', 'tag': '.', 'dep': 'punct', 'up': 2, 'dn': []}]}],
'11': [{'rt': 1,
'toks': [{'tok': 'I', 'tag': 'PRP', 'dep': 'nsubj', 'up': 1, 'dn': []},
{'tok': 'suggest', 'tag': 'VBP', 'dep': 'ROOT', 'dn': [0, 3, 8]},
{'tok': 'we', 'tag': 'PRP', 'dep': 'nsubj', 'up': 3, 'dn': []},
{'tok': 'start', 'tag': 'VBP', 'dep': 'ccomp', 'up': 1, 'dn': [2, 4]},
{'tok': 'with', 'tag': 'IN', 'dep': 'prep', 'up': 3, 'dn': [7]},
{'tok': 'the', 'tag': 'DT', 'dep': 'det', 'up': 7, 'dn': []},
{'tok': 'simplest', 'tag': 'JJS', 'dep': 'amod', 'up': 7, 'dn': []},
{'tok': 'case', 'tag': 'NN', 'dep': 'pobj', 'up': 4, 'dn': [5, 6]},
{'tok': '.', 'tag': '.', 'dep': 'punct', 'up': 1, 'dn': []}]}],
'12': [{'rt': 7,
'toks': [{'tok': 'In', 'tag': 'IN', 'dep': 'prep', 'up': 7, 'dn': [1]},
{'tok': 'fact', 'tag': 'NN', 'dep': 'pobj', 'up': 0, 'dn': []},
{'tok': ',', 'tag': ',', 'dep': 'punct', 'up': 7, 'dn': []},
{'tok': 'our', 'tag': 'PRP$', 'dep': 'poss', 'up': 4, 'dn': []},
{'tok': 'data', 'tag': 'NNS', 'dep': 'nsubj', 'up': 7, 'dn': [3]},
{'tok': 'does', 'tag': 'VBZ', 'dep': 'aux', 'up': 7, 'dn': []},
{'tok': 'not', 'tag': 'RB', 'dep': 'neg', 'up': 7, 'dn': []},
{'tok': 'support',
'tag': 'VB',
'dep': 'ROOT',
'dn': [0, 2, 4, 5, 6, 9, 10]},
{'tok': 'this', 'tag': 'DT', 'dep': 'det', 'up': 9, 'dn': []},
{'tok': 'claim', 'tag': 'NN', 'dep': 'dobj', 'up': 7, 'dn': [8]},
{'tok': '.', 'tag': '.', 'dep': 'punct', 'up': 7, 'dn': []}]}]
}
for idx, parse in parses.items():
corpus.get_utterance(idx).add_meta('parsed', parse)
return corpus
def parsed_politeness_test_zh_corpus():
corpus = politeness_test_zh_corpus()
parses = {
'0': [{'rt': 1,
'toks': [{'tok': '非常', 'tag': 'AD', 'dep': 'advmod', 'up': 1, 'dn': []},
{'tok': '感谢', 'tag': 'VV', 'dep': 'ROOT', 'dn': [0, 4, 5]},
{'tok': '您', 'tag': 'PN', 'dep': 'nmod:assmod', 'up': 4, 'dn': [3]},
{'tok': '的', 'tag': 'DEG', 'dep': 'case', 'up': 2, 'dn': []},
{'tok': '帮助', 'tag': 'NN', 'dep': 'dobj', 'up': 1, 'dn': [2]},
{'tok': '。', 'tag': 'PU', 'dep': 'punct', 'up': 1, 'dn': []}]}],
'1': [{'rt': 7,
'toks': [{'tok': '干', 'tag': 'VV', 'dep': 'dep', 'up': 2, 'dn': []},
{'tok': '得', 'tag': 'DER', 'dep': 'dep', 'up': 2, 'dn': []},
{'tok': '漂亮', 'tag': 'VA', 'dep': 'dep', 'up': 7, 'dn': [0, 1]},
{'tok': '!改', 'tag': 'AD', 'dep': 'advmod', 'up': 7, 'dn': []},
{'tok': '写', 'tag': 'VV', 'dep': 'dep', 'up': 7, 'dn': []},
{'tok': '得', 'tag': 'DER', 'dep': 'dep', 'up': 7, 'dn': []},
{'tok': '非常', 'tag': 'AD', 'dep': 'advmod', 'up': 7, 'dn': []},
{'tok': '好', 'tag': 'VA', 'dep': 'ROOT', 'dn': [2, 3, 4, 5, 6, 8]},
{'tok': '。', 'tag': 'PU', 'dep': 'punct', 'up': 7, 'dn': []}]}],
'2': [{'rt': 6,
'toks': [{'tok': '嗨', 'tag': 'IJ', 'dep': 'dep', 'up': 4, 'dn': []},
{'tok': ',', 'tag': 'PU', 'dep': 'punct', 'up': 4, 'dn': []},
{'tok': '你', 'tag': 'PN', 'dep': 'nsubj', 'up': 4, 'dn': []},
{'tok': '现在', 'tag': 'NT', 'dep': 'nmod:tmod', 'up': 4, 'dn': []},
{'tok': '有空', 'tag': 'VV', 'dep': 'dep', 'up': 6, 'dn': [0, 1, 2, 3]},
{'tok': '吗', 'tag': 'SP', 'dep': 'discourse', 'up': 6, 'dn': []},
{'tok': '?', 'tag': 'PU', 'dep': 'ROOT', 'dn': [4, 5]}]}],
'3': [{'rt': 1,
'toks': [{'tok': '不好意思', 'tag': 'AD', 'dep': 'advmod', 'up': 1, 'dn': []},
{'tok': '打扰', 'tag': 'VV', 'dep': 'ROOT', 'dn': [0, 2, 3, 4]},
{'tok': '你', 'tag': 'PN', 'dep': 'dobj', 'up': 1, 'dn': []},
{'tok': '了', 'tag': 'SP', 'dep': 'discourse', 'up': 1, 'dn': []},
{'tok': ',', 'tag': 'PU', 'dep': 'punct', 'up': 1, 'dn': []}]},
{'rt': 11,
'toks': [{'tok': '你', 'tag': 'PN', 'dep': 'dep', 'up': 1, 'dn': []},
{'tok': '有', 'tag': 'VE', 'dep': 'dep', 'up': 11, 'dn': [0, 2, 5]},
{'tok': '时间', 'tag': 'NN', 'dep': 'dobj', 'up': 1, 'dn': []},
{'tok': '帮', 'tag': 'P', 'dep': 'case', 'up': 4, 'dn': []},
{'tok': '我', 'tag': 'PN', 'dep': 'nmod:prep', 'up': 5, 'dn': [3]},
{'tok': '修改', 'tag': 'VV', 'dep': 'conj', 'up': 1, 'dn': [4, 6, 9]},
{'tok': '一下', 'tag': 'AD', 'dep': 'advmod', 'up': 5, 'dn': []},
{'tok': '这', 'tag': 'DT', 'dep': 'det', 'up': 9, 'dn': [8]},
{'tok': '份', 'tag': 'M', 'dep': 'mark:clf', 'up': 7, 'dn': []},
{'tok': '草稿', 'tag': 'NN', 'dep': 'dobj', 'up': 5, 'dn': [7]},
{'tok': '吗', 'tag': 'SP', 'dep': 'discourse', 'up': 11, 'dn': []},
{'tok': '?', 'tag': 'PU', 'dep': 'ROOT', 'dn': [1, 10]}]}],
'4': [{'rt': 1,
'toks': [{'tok': '可不可以', 'tag': 'AD', 'dep': 'advmod', 'up': 1, 'dn': []},
{'tok': '请', 'tag': 'VV', 'dep': 'ROOT', 'dn': [0, 2, 3, 6]},
{'tok': '您', 'tag': 'PN', 'dep': 'dobj', 'up': 1, 'dn': []},
{'tok': '说', 'tag': 'VV', 'dep': 'ccomp', 'up': 1, 'dn': [4]},
{'tok': '慢', 'tag': 'VA', 'dep': 'ccomp', 'up': 3, 'dn': [5]},
{'tok': '一点', 'tag': 'AD', 'dep': 'advmod', 'up': 4, 'dn': []},
{'tok': '?', 'tag': 'PU', 'dep': 'punct', 'up': 1, 'dn': []}]}],
'5': [{'rt': 1,
'toks': [{'tok': '请', 'tag': 'VV', 'dep': 'xcomp', 'up': 1, 'dn': []},
{'tok': '留意', 'tag': 'VV', 'dep': 'ROOT', 'dn': [0, 5, 6]},
{'tok': '我们', 'tag': 'PN', 'dep': 'nmod:assmod', 'up': 5, 'dn': [3]},
{'tok': '的', 'tag': 'DEG', 'dep': 'case', 'up': 2, 'dn': []},
{'tok': '后续', 'tag': 'JJ', 'dep': 'amod', 'up': 5, 'dn': []},
{'tok': '通知', 'tag': 'NN', 'dep': 'dobj', 'up': 1, 'dn': [2, 4]},
{'tok': '。', 'tag': 'PU', 'dep': 'punct', 'up': 1, 'dn': []}]}],
'6': [{'rt': 1,
'toks': [{'tok': '顺便', 'tag': 'AD', 'dep': 'advmod', 'up': 1, 'dn': []},
{'tok': '问', 'tag': 'VV', 'dep': 'ROOT', 'dn': [0, 2, 3, 8, 12]},
{'tok': '一下', 'tag': 'AD', 'dep': 'advmod', 'up': 1, 'dn': []},
{'tok': ',', 'tag': 'PU', 'dep': 'punct', 'up': 1, 'dn': []},
{'tok': '你', 'tag': 'PN', 'dep': 'nsubj', 'up': 8, 'dn': []},
{'tok': '是', 'tag': 'VC', 'dep': 'cop', 'up': 8, 'dn': []},
{'tok': '在', 'tag': 'P', 'dep': 'case', 'up': 7, 'dn': []},
{'tok': '哪里', 'tag': 'PN', 'dep': 'nmod:prep', 'up': 8, 'dn': [6]},
{'tok': '看到', 'tag': 'VV', 'dep': 'dep', 'up': 1, 'dn': [4, 5, 7, 10, 11]},
{'tok': '这些', 'tag': 'DT', 'dep': 'det', 'up': 10, 'dn': []},
{'tok': '照片', 'tag': 'NN', 'dep': 'dobj', 'up': 8, 'dn': [9]},
{'tok': '的', 'tag': 'SP', 'dep': 'discourse', 'up': 8, 'dn': []},
{'tok': '?', 'tag': 'PU', 'dep': 'punct', 'up': 1, 'dn': []}]}],
'7': [{'rt': 1,
'toks': [{'tok': '哪里', 'tag': 'PN', 'dep': 'nsubj', 'up': 1, 'dn': []},
{'tok': '出', 'tag': 'VV', 'dep': 'ROOT', 'dn': [0, 2, 3, 4]},
{'tok': '了', 'tag': 'AS', 'dep': 'aux:asp', 'up': 1, 'dn': []},
{'tok': '问题', 'tag': 'NN', 'dep': 'dobj', 'up': 1, 'dn': []},
{'tok': '?', 'tag': 'PU', 'dep': 'punct', 'up': 1, 'dn': []}]}],
'8': [{'rt': 3,
'toks': [{'tok': '应该', 'tag': 'VV', 'dep': 'aux:modal', 'up': 1, 'dn': []},
{'tok': '可以', 'tag': 'VV', 'dep': 'dep', 'up': 3, 'dn': [0]},
{'tok': '吧', 'tag': 'SP', 'dep': 'discourse', 'up': 3, 'dn': []},
{'tok': '。', 'tag': 'PU', 'dep': 'ROOT', 'dn': [1, 2]}]}],
'9': [{'rt': 6,
'toks': [{'tok': '说', 'tag': 'VV', 'dep': 'dep', 'up': 6, 'dn': [1]},
{'tok': '实话', 'tag': 'NN', 'dep': 'dobj', 'up': 0, 'dn': []},
{'tok': ',', 'tag': 'PU', 'dep': 'punct', 'up': 6, 'dn': []},
{'tok': '我', 'tag': 'PN', 'dep': 'nsubj', 'up': 6, 'dn': []},
{'tok': '也', 'tag': 'AD', 'dep': 'advmod', 'up': 6, 'dn': []},
{'tok': '不', 'tag': 'AD', 'dep': 'neg', 'up': 6, 'dn': []},
{'tok': '懂', 'tag': 'VV', 'dep': 'ROOT', 'dn': [0, 2, 3, 4, 5, 7, 8]},
{'tok': '这些', 'tag': 'PN', 'dep': 'dobj', 'up': 6, 'dn': []},
{'tok': '。', 'tag': 'PU', 'dep': 'punct', 'up': 6, 'dn': []}]}]
}
for idx, parse in parses.items():
corpus.get_utterance(idx).add_meta('parsed', parse)
return corpus | StarcoderdataPython |
5043582 | <reponame>TheOnlyWayUp/hangmanSolver<gh_stars>1-10
from .main import HangmanSolver | StarcoderdataPython |
11215403 | <filename>extractSpeech.py<gh_stars>0
# from library
import speech_recognition
import moviepy.editor as moviepy
import librosa
import os
class SpeechExtraction:
VideoClipFile = ""
ConvertedClipFile = "converted.wav"
audio_path = "converted.wav" #By default
clip = ""
audio = ""
result = ""
recognized_text_file = ""
####
# Class constructor
#Takes two parameters
# -the video clip file path.
# -the converted clip file.
# ####
def __init__(self, VideoClipFile):
self.VideoClipFile = VideoClipFile
####
# conversion of the video clip file to an audio file.
# writes the audio file to path.
# parameter passed, instance of this class, self.
####
def convertVideoCliptoMp3(self):
self.clip = moviepy.VideoFileClip(self.VideoClipFile)
if os.path.exists(self.audio_path):
os.remove(self.audio_path)
self.clip.audio.write_audiofile(self.audio_path)
else:
self.clip.audio.write_audiofile(self.audio_path)
return self.clip
####
# Recognizes the audio text from the converted wav file
# parameter passed,instance of this class, self.
####
def recognizeAudio(self):
recognizer = speech_recognition.Recognizer()
self.audio = speech_recognition.AudioFile(self.audio_path)
with self.audio as source:
duration = librosa.get_duration(filename=self.audio_path)
audio_file = recognizer.record(source, duration=duration)
self.result = recognizer.recognize_google(audio_file)
return self.result
####
#If text file to save audio text recognized, the system creates the text file.
# Exports audio text to a file.
####
def exportAudioTextToTextFile(self):
self.result = self.recognizeAudio()
self.recognized_text_file = "recognized.txt"
if os.path.exists(self.recognized_text_file):
pass
else:
os.mkdir(self.recognized_text_file)
with open(self.recognized_text_file, mode ='w') as file:
file.write("Recognized Speech:")
file.write("\n")
file.write(self.result)
print("ready!")
| StarcoderdataPython |
1866617 | #!/usr/bin/python
# Copyright (C) 2013 <NAME>, FoldedSoft e.U.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model definition for the Mirror API."""
import monkey_patch
from google.appengine.ext import ndb
from google.appengine.ext.ndb import msgprop
from protorpc import messages
from endpoints_proto_datastore.ndb import EndpointsDateTimeProperty
from endpoints_proto_datastore.ndb import EndpointsModel
from endpoints_proto_datastore.ndb import EndpointsUserProperty
from endpoints_proto_datastore.ndb import EndpointsAliasProperty
class MenuAction(messages.Enum):
REPLY = 1
REPLY_ALL = 2
DELETE = 3
SHARE = 4
READ_ALOUD = 5
VOICE_CALL = 6
NAVIGATE = 7
TOGGLE_PINNED = 8
CUSTOM = 9
class MenuValue(EndpointsModel):
class MenuValueState(messages.Enum):
DEFAULT = 1
PENDING = 2
CONFIRMED = 3
displayName = ndb.StringProperty(required=True)
iconUrl = ndb.StringProperty(required=True)
state = msgprop.EnumProperty(MenuValueState, required=True)
class MenuItem(EndpointsModel):
action = msgprop.EnumProperty(MenuAction, required=True)
id = ndb.StringProperty()
removeWhenSelected = ndb.BooleanProperty(default=False)
values = ndb.LocalStructuredProperty(MenuValue, repeated=True)
class Location(EndpointsModel):
"""Model for location"""
_latest = False
_message_fields_schema = (
"id",
"timestamp",
"latitude",
"longitude",
"accuracy",
"displayName",
"address"
)
user = EndpointsUserProperty(required=True, raise_unauthorized=True)
timestamp = EndpointsDateTimeProperty(auto_now_add=True)
latitude = ndb.FloatProperty()
longitude = ndb.FloatProperty()
accuracy = ndb.FloatProperty()
displayName = ndb.StringProperty()
address = ndb.StringProperty()
def IdSet(self, value):
if not isinstance(value, basestring):
raise TypeError("ID must be a string.")
if value == "latest":
self._latest = True
loc_query = Location.query().order(-Location.timestamp)
loc_query = loc_query.filter(Location.user == self.user)
loc = loc_query.get()
if loc is not None:
self.UpdateFromKey(loc.key)
return
if value.isdigit():
self.UpdateFromKey(ndb.Key(Location, int(value)))
@EndpointsAliasProperty(setter=IdSet, required=False)
def id(self):
if self._latest:
return "latest"
if self.key is not None:
return str(self.key.integer_id())
class TimelineItem(EndpointsModel):
"""Model for timeline cards.
Since the created property is auto_now_add=True, Cards will document when
they were inserted immediately after being stored.
"""
class Attachment(EndpointsModel):
"""Attachment to a timeline card
Due to limitations in Cloud Endpoints this works a bit differently than
the attachments in the official API. Normally you would add attachments
by uploading the media data (as image/audio/video). Attachments in this
implementation can only by of type image and are represented either as
URL or Data-URI and can be added/retrieved/updated directly by filling
the attachments field in the timeline.insert method.
"""
id = ndb.StringProperty()
contentType = ndb.StringProperty()
contentUrl = ndb.StringProperty()
isProcessingContent = ndb.BooleanProperty(default=False)
class TimelineContact(EndpointsModel):
class ContactType(messages.Enum):
INDIVIDUAL = 1
GROUP = 2
acceptTypes = ndb.StringProperty(repeated=True)
displayName = ndb.StringProperty()
id = ndb.StringProperty(required=True)
imageUrls = ndb.StringProperty(repeated=True)
phoneNumber = ndb.StringProperty()
source = ndb.StringProperty()
type = msgprop.EnumProperty(ContactType)
class Notification(EndpointsModel):
level = ndb.StringProperty(default="DEFAULT")
deliveryTime = EndpointsDateTimeProperty()
_message_fields_schema = (
"id",
"attachments",
"bundleId",
"canonicalUrl",
"created",
"creator",
"displayTime",
"html",
"htmlPages",
"inReplyTo",
"isBundleCover",
"isDeleted",
"isPinned",
"location",
"menuItems",
"notification",
"pinScore",
"recipients",
"sourceItemId",
"speakableText",
"text",
"title",
"updated"
)
user = EndpointsUserProperty(required=True, raise_unauthorized=True)
attachments = ndb.LocalStructuredProperty(Attachment, repeated=True)
bundleId = ndb.StringProperty()
canonicalUrl = ndb.StringProperty()
created = EndpointsDateTimeProperty(auto_now_add=True)
creator = ndb.LocalStructuredProperty(TimelineContact)
displayTime = EndpointsDateTimeProperty()
html = ndb.TextProperty()
htmlPages = ndb.TextProperty(repeated=True)
inReplyTo = ndb.IntegerProperty()
isBundleCover = ndb.BooleanProperty()
isDeleted = ndb.BooleanProperty()
isPinned = ndb.BooleanProperty()
location = ndb.LocalStructuredProperty(Location)
menuItems = ndb.LocalStructuredProperty(MenuItem, repeated=True)
notification = ndb.LocalStructuredProperty(Notification)
pinScore = ndb.IntegerProperty()
recipients = ndb.LocalStructuredProperty(TimelineContact, repeated=True)
sourceItemId = ndb.StringProperty()
speakableText = ndb.TextProperty()
text = ndb.StringProperty()
title = ndb.StringProperty()
updated = EndpointsDateTimeProperty(auto_now=True)
def IncludeDeletedSet(self, value):
"""
If value is true all timelineItems will be returned.
Otherwise a filter for non-deleted items is necessary for the query.
"""
if value is None or value is False:
self._endpoints_query_info._AddFilter(TimelineItem.isDeleted == False)
@EndpointsAliasProperty(setter=IncludeDeletedSet, property_type=messages.BooleanField, default=False)
def includeDeleted(self):
"""
includedDeleted is only used as parameter in query_methods
so there should never be a reason to actually retrieve the value
"""
return None
def PinnedOnlySet(self, value):
"""
If value is true only pinned timelineItems will be returned.
Otherwise all timelineItems are returned.
"""
if value is True:
self._endpoints_query_info._AddFilter(TimelineItem.isPinned == True)
@EndpointsAliasProperty(setter=PinnedOnlySet, property_type=messages.BooleanField, default=False)
def pinnedOnly(self):
"""
pinnedOnly is only used as parameter in query_methods
so there should never be a reason to actually retrieve the value
"""
return None
def MaxResultsSet(self, value):
"""Setter to be used for default limit EndpointsAliasProperty.
Simply sets the limit on the entity's query info object, and the query
info object handles validation.
Args:
value: The limit value to be set.
"""
self._endpoints_query_info.limit = value
@EndpointsAliasProperty(setter=MaxResultsSet, property_type=messages.IntegerField, default=20)
def maxResults(self):
"""Getter to be used for default limit EndpointsAliasProperty.
Uses the ProtoRPC property_type IntegerField since a limit.
Returns:
The integer (or null) limit from the query info on the entity.
"""
return self._endpoints_query_info.limit
class Contact(EndpointsModel):
"""A person or group that can be used as a creator or a contact."""
class ContactType(messages.Enum):
INDIVIDUAL = 1
GROUP = 2
_message_fields_schema = (
"id",
"acceptTypes",
"displayName",
"imageUrls",
"phoneNumber",
"priority",
"source",
"type"
)
user = EndpointsUserProperty(required=True, raise_unauthorized=True)
acceptTypes = ndb.StringProperty(repeated=True)
displayName = ndb.StringProperty(required=True)
imageUrls = ndb.StringProperty(repeated=True)
phoneNumber = ndb.StringProperty()
priority = ndb.IntegerProperty()
source = ndb.StringProperty()
type = msgprop.EnumProperty(ContactType)
def IdSet(self, value):
if not isinstance(value, basestring):
raise TypeError("ID must be a string.")
self.UpdateFromKey(ndb.Key("User", self.user.email(), Contact, value))
@EndpointsAliasProperty(setter=IdSet, required=True)
def id(self):
if self.key is not None:
return self.key.pairs()[1][1]
class Operation(messages.Enum):
UPDATE = 1
INSERT = 2
DELETE = 3
class Subscription(EndpointsModel):
"""Model for subscriptions"""
_message_fields_schema = ("id", "collection", "userToken", "verifyToken", "operation", "callbackUrl")
user = EndpointsUserProperty(required=True, raise_unauthorized=True)
collection = ndb.StringProperty(required=True)
userToken = ndb.StringProperty(required=True)
verifyToken = ndb.StringProperty(required=True)
operation = msgprop.EnumProperty(Operation, repeated=True)
callbackUrl = ndb.StringProperty(required=True)
class Action(messages.Message):
"""ProtoRPC Message Class for actions performed on timeline cards
Since those actions are directly forwarded to subscriptions they
don't need to be saved to the data store, hence no EndpointsModel class
"""
collection = messages.StringField(1, default="timeline")
itemId = messages.IntegerField(2, required=True)
action = messages.EnumField(MenuAction, 3, required=True)
value = messages.StringField(4)
class ActionResponse(messages.Message):
"""Simple response to actions send to the Mirror API"""
success = messages.BooleanField(1, default=True)
class AttachmentListRequest(messages.Message):
itemId = messages.IntegerField(1, required=True)
class AttachmentRequest(messages.Message):
itemId = messages.IntegerField(1, required=True)
attachmentId = messages.StringField(2, required=True)
class AttachmentResponse(messages.Message):
id = messages.StringField(1)
contentType = messages.StringField(2)
contentUrl = messages.StringField(3)
isProcessingContent = messages.BooleanField(4, default=False)
class AttachmentList(messages.Message):
items = messages.MessageField(AttachmentResponse, 1, repeated=True)
| StarcoderdataPython |
1704919 | '''
Script to generate embeddings from resnet trained using pcl
Command to run:
python eval_kmeans.py --pretrained experiment_pcl_resume/checkpoint.pth.tar /home/mprabhud/dataset/shapenet_renders/npys/
'''
from __future__ import print_function
import os
import sys
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import argparse
import random
import numpy as np
from tqdm import tqdm
import faiss
from torchvision import transforms, datasets
import torchvision.models as models
import pcl.loader
import ipdb
st = ipdb.set_trace
def parse_option():
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--batch-size', type=int, default=128, help='batch size')
parser.add_argument('--num-workers', type=int, default=8, help='num of workers to use')
parser.add_argument('--cost', type=str, default='0.5')
parser.add_argument('--seed', default=0, type=int)
# model definition
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('--pretrained', default='', type=str,
help='path to pretrained checkpoint')
# dataset
parser.add_argument('--low-shot', default=False, action='store_true', help='whether to perform low-shot training.')
parser.add_argument('--low-dim', default=16, type=int,
help='feature dimension (default: 128)')
parser.add_argument('--pcl-r', default=1024, type=int,
help='queue size; number of negative pairs; needs to be smaller than num_cluster (default: 16384)')
parser.add_argument('--moco-m', default=0.999, type=float,
help='moco momentum of updating key encoder (default: 0.999)')
parser.add_argument('--temperature', default=0.2, type=float,
help='softmax temperature')
parser.add_argument('--mlp', action='store_true',
help='use mlp head')
parser.add_argument('--aug-plus', action='store_true',
help='use moco-v2/SimCLR data augmentation')
parser.add_argument('--cos', action='store_true',
help='use cosine lr schedule')
parser.add_argument('--num-cluster', default='2500,5000,10000', type=str,
help='number of clusters')
opt = parser.parse_args()
opt.num_class = 20
# if low shot experiment, do 5 random runs
if opt.low_shot:
opt.n_run = 5
else:
opt.n_run = 1
return opt
def main():
args = parse_option()
args.num_cluster = args.num_cluster.split(',')
random.seed(args.seed)
np.random.seed(args.seed)
########################################################################
# STEP 1: SETuP DATALOADER (MAKE SURE TO CONVERT IT TO PIL IMAGE !!!!!)#
########################################################################
traindir = os.path.join(args.data)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = pcl.loader.ShapeNet(
traindir,
'split_allpt.txt',
transform=transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
normalize
]))
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size*2, shuffle=False,
sampler=None, num_workers=args.num_workers, pin_memory=True)
############################
# STEP 2: INITIALIZE MODEL #
############################
# create model
print("=> creating model '{}'".format(args.arch))
kmeans_model = models.__dict__[args.arch](num_classes=16)
kmeans_model.fc = nn.Sequential(nn.Linear(2048, 2048), nn.ReLU(), kmeans_model.fc)
# load from pre-trained
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location="cpu")
state_dict = checkpoint['state_dict']
# rename pre-trained keys
for k in list(state_dict.keys()):
if k.startswith('module.encoder_k'):
# remove prefix
state_dict[k[len("module.encoder_k."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
kmeans_model.load_state_dict(state_dict, strict=False)
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
kmeans_model.cuda()
###############################
# STEP 3: GET Kmeans Clusters #
##############################
cluster_result = None
features = compute_embeddings(train_loader, kmeans_model, args) #generate embeddings based on keys encoder (different from eval_embeddings.py)
# placeholder for clustering result
cluster_result = {'im2cluster':[],'centroids':[],'density':[]}
for num_cluster in args.num_cluster:
cluster_result['im2cluster'].append(torch.zeros(len(train_dataset),dtype=torch.long).cuda())
cluster_result['centroids'].append(torch.zeros(int(num_cluster),16).cuda())
cluster_result['density'].append(torch.zeros(int(num_cluster)).cuda())
features[torch.norm(features,dim=1)>1.5] /= 2 #account for the few samples that are computed twice
features = features.numpy()
cluster_result = run_kmeans(features,args) #run kmeans clustering
def compute_embeddings(eval_loader, model, args):
print('Computing embeddings...')
model.eval()
features = torch.zeros(len(eval_loader.dataset),16).cuda()
for i, (images, index) in enumerate(tqdm(eval_loader)):
with torch.no_grad():
images = images.cuda(non_blocking=True)
feat = model(images)
features[index] = feat
return features.cpu()
def run_kmeans(x, args):
"""
Args:
x: data to be clustered
"""
results = {'im2cluster':[],'centroids':[],'density':[]}
for seed, num_cluster in enumerate(args.num_cluster):
print('performing kmeans clustering on ...',num_cluster)
# intialize faiss clustering parameters
d = x.shape[1]
k = int(num_cluster)
clus = faiss.Clustering(d, k)
clus.verbose = True
clus.niter = 20
clus.nredo = 5
clus.seed = seed
clus.max_points_per_centroid = 1000
clus.min_points_per_centroid = 10
res = faiss.StandardGpuResources()
cfg = faiss.GpuIndexFlatConfig()
cfg.useFloat16 = False
cfg.device = 0
index = faiss.GpuIndexFlatL2(res, d, cfg)
clus.train(x, index)
D, I = index.search(x, 1) # for each sample, find cluster distance and assignments
im2cluster = [int(n[0]) for n in I]
# get cluster centroids
centroids = faiss.vector_to_array(clus.centroids).reshape(k,d)
# sample-to-centroid distances for each cluster
Dcluster = [[] for c in range(k)]
for im,i in enumerate(im2cluster):
Dcluster[i].append(D[im][0])
# concentration estimation (phi)
density = np.zeros(k)
for i,dist in enumerate(Dcluster):
if len(dist)>1:
d = (np.asarray(dist)**0.5).mean()/np.log(len(dist)+10)
density[i] = d
#if cluster only has one point, use the max to estimate its concentration
dmax = density.max()
for i,dist in enumerate(Dcluster):
if len(dist)<=1:
density[i] = dmax
density = density.clip(np.percentile(density,10),np.percentile(density,90)) #clamp extreme values for stability
density = args.temperature*density/density.mean() #scale the mean to temperature
# convert to cuda Tensors for broadcast
centroids = torch.Tensor(centroids).cuda()
centroids = nn.functional.normalize(centroids, p=2, dim=1)
im2cluster = torch.LongTensor(im2cluster).cuda()
density = torch.Tensor(density).cuda()
results['centroids'].append(centroids)
results['density'].append(density)
results['im2cluster'].append(im2cluster)
return results
if __name__ == '__main__':
main() | StarcoderdataPython |
3397570 | <gh_stars>1-10
# source: https://github.com/kuangliu/pytorch-cifar/blob/master/models/densenet.py
'''DenseNet in PyTorch.'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from cnns.nnlib.pytorch_layers.conv_picker import Conv
def conv3x3(in_planes, out_planes, args, stride=1):
"""3x3 convolution with padding"""
# return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
# padding=1, bias=False)
return Conv(kernel_sizes=[3], in_channels=in_planes,
out_channels=[out_planes], strides=[stride],
padding=[1], args=args, is_bias=False).get_conv()
def conv1x1(in_planes, out_planes, args, stride=1):
"""1x1 convolution"""
# return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
# bias=False)
# It is rather unnecessary to use fft convolution for kernels of size 1x1.
return Conv(kernel_sizes=[1], in_channels=in_planes,
out_channels=[out_planes], strides=[stride],
padding=[0], args=args, is_bias=False).get_conv()
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate, args):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
# self, in_channels, out_channels, kernel_size, stride=1, adding=0, dilation=1, groups=1, bias=True
# self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
# self.conv1 = Conv(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.conv1 = conv1x1(in_planes=in_planes, out_planes=4 * growth_rate,
args=args)
self.bn2 = nn.BatchNorm2d(4 * growth_rate)
# self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
self.conv2 = conv3x3(in_planes=4 * growth_rate, out_planes=growth_rate,
args=args)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out, x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes, args=None):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
# self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
self.conv = conv1x1(in_planes, out_planes, args)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, args, growth_rate=12, reduction=0.5):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2 * growth_rate
# self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.conv1 = conv3x3(in_planes=3, out_planes=num_planes, args=args)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0],
args=args)
num_planes += nblocks[0] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans1 = Transition(num_planes, out_planes, args=args)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1],
args=args)
num_planes += nblocks[1] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans2 = Transition(num_planes, out_planes, args=args)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2],
args=args)
num_planes += nblocks[2] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans3 = Transition(num_planes, out_planes, args=args)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3],
args=args)
num_planes += nblocks[3] * growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, args.num_classes)
def _make_dense_layers(self, block, in_planes, nblock, args):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate, args=args))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121(args):
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=32, args=args)
def DenseNet169(args):
return DenseNet(Bottleneck, [6, 12, 32, 32], growth_rate=32, args=args)
def DenseNet201(args):
return DenseNet(Bottleneck, [6, 12, 48, 32], growth_rate=32, args=args)
def DenseNet161(args):
return DenseNet(Bottleneck, [6, 12, 36, 24], growth_rate=48, args=args)
def densenet_cifar(args):
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=12, args=args)
def test():
net = densenet_cifar()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y)
| StarcoderdataPython |
8172993 | import os
import subprocess
import traceback
from celery import shared_task
from imagekit.utils import open_image, save_image
from django.conf import settings
from django.db import transaction
from django.contrib.contenttypes.models import ContentType
from shapes.models import SubmittedShape, MaterialShape, ShapeSubstance
from shapes.utils import complex_polygon_area, \
mask_complex_polygon, update_shape_image_pbox
from common.utils import save_obj_attr_image, get_content_tuple
@shared_task
def fill_in_bbox_task(shape):
""" Helper to fill in the potentially empty image_bbox field """
photo = shape.photo.__class__.objects.get(id=shape.photo.id)
image_bbox = mask_complex_polygon(
image=open_image(photo.image_orig),
vertices=shape.vertices,
triangles=shape.triangles,
bbox_only=True)
save_obj_attr_image(shape, attr='image_bbox',
img=image_bbox,
format='jpg', save=True)
@shared_task
def fill_in_pbox_task(shape):
""" Helper to fill in the potentially empty image_pbox field """
update_shape_image_pbox(shape, save=True)
@shared_task
def triangulate_submitted_shapes_task(photo, user, mturk_assignment,
shape_model, submitted_shapes):
"""
Given a list of input SubmittedShape instances, intersect them into
non-overlapping complex polygons. This is performed externally using the
C++ "triangulate" program in the same repository.
"""
try:
triangulate_submitted_shapes_impl(
photo, user, mturk_assignment, shape_model, submitted_shapes)
except Exception as exc:
# Re-add to the queue so that we don't lose tasks
print 'Exception (%s) -- will retry in 5 minutes' % exc
traceback.print_exc()
raise triangulate_submitted_shapes_task.retry(
exc=exc, countdown=60 * 5)
def triangulate_submitted_shapes_impl(
photo, user, mturk_assignment, shape_model, submitted_shapes):
if not submitted_shapes:
return
if not os.path.isfile(settings.TRIANGULATE_BIN):
raise RuntimeError("ERROR: '%s' (settings.TRIANGULATE_BIN) does not exist -- "
"check that it is compiled" % settings.TRIANGULATE_BIN)
input_lines = [('%s ' % s.id) + ' '.join(
filter(None, s.vertices.split(','))) for s in submitted_shapes]
input_txt = '\n'.join(input_lines) + '\nEND'
process = None
try:
process = subprocess.Popen(
args=settings.TRIANGULATE_BIN,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
output_txt, errors_txt = process.communicate(input_txt)
except:
if process:
process.kill()
process.wait()
raise
if not output_txt:
raise ValueError(
"Error with triangulate. Bin:%s\nInput:\n%s\n\nOutput:\n%s\n\nErrors:\n%s" % (
settings.TRIANGULATE_BIN, input_txt, output_txt, errors_txt)
)
if errors_txt:
print errors_txt
#print("Bin:%s\nInput:\n%s\n\nOutput:\n%s\n\nErrors:\n%s" % (
#settings.TRIANGULATE_BIN, input_txt, output_txt, errors_txt))
new_content_tuples = []
output_lines = output_txt.split('\n')
with transaction.atomic():
for line in output_lines:
line = line.strip()
if not line:
continue
fields = line.split('|')
if len(fields) != 4:
raise ValueError("Invalid output: %s" % repr(output_txt))
ids = [int(f) for f in filter(None, fields[0].split(' '))]
if not ids:
print 'Discarding shape not contained in input'
continue
verts, tris, segs = [','.join(filter(None, f.split(' ')))
for f in fields[1:4]]
# compute polygon area and discard small polygons
area = complex_polygon_area(verts, tris)
# 0.0002 is roughly a 32x32 patch for a 2400x2400 image
if area < 0.0001:
print 'Discarding: verts: "%s", tris: "%s", segs: "%s", area: %s' % (
verts, tris, segs, area)
continue
# convert area to pixels
pixel_area = area * photo.image_orig.width * \
photo.image_orig.height
# extract segmentation times
time_ms_list = []
ss_list = []
for ss in submitted_shapes:
if int(ss.id) in ids:
ss_list.append(ss)
time_ms_list.append(ss.time_ms)
if not ss_list or not time_ms_list:
print 'Discarding shape not mapping to input shapes'
# use the average time of the submitted shapes
time_ms = sum(time_ms_list) / float(len(time_ms_list))
# auto-grant high quality for users with qualifications
quality_method = None
correct = None
if pixel_area >= 12000:
from mturk.models import MtQualificationAssignment
try:
correct = bool(MtQualificationAssignment.objects.get(
worker=user, qualification__slug="mat_seg").value)
if correct:
quality_method = 'Q'
except MtQualificationAssignment.DoesNotExist:
correct = False
new_obj, created = shape_model.objects.get_or_create(
photo=photo,
user=user,
mturk_assignment=mturk_assignment,
vertices=verts,
triangles=tris,
segments=segs,
area=area,
pixel_area=pixel_area,
time_ms=time_ms,
defaults={
'added': ss_list[0].added,
'correct': correct,
'quality_method': quality_method,
}
)
if created:
for ss in ss_list:
new_obj.submitted_shapes.add(ss)
new_content_tuples.append(get_content_tuple(new_obj))
# these are created outside of the mturk view response, so we need to
# manually add them to the pending objects queue
# (imported here to avoid circular imports)
for (ct_id, obj_id) in new_content_tuples:
mturk_assignment.submitted_contents.get_or_create(
content_type=ContentType.objects.get_for_id(ct_id),
object_id=obj_id,
)
# update photo shape count synchronously
from photos.tasks import update_photos_num_shapes
update_photos_num_shapes([photo.id])
new_content_tuples.append(get_content_tuple(photo))
# new pending objects
from mturk.tasks import add_pending_objects_task
add_pending_objects_task.delay(new_content_tuples)
@shared_task
def retriangulate_material_shapes_task(photo):
"""
DANGER ZONE: this DELETES all material shapes for this object and
recomputes the triangulation for a material shape. If multiple users
submitted, it triangulates only the one with the most vertices and deletes
the rest of the material shapes.
"""
all_submitted_shapes = list(photo.submitted_shapes.all())
if len(all_submitted_shapes) < 1:
return
# find common assignment and user
mturk_assignments = set([s.mturk_assignment for s in all_submitted_shapes])
if len(mturk_assignments) != 1:
print 'more than one user submitted -- picking assignment with most vertices'
max_verts = 0
max_asst = None
for asst in mturk_assignments:
asst_ss = SubmittedShape.objects.filter(mturk_assignment=asst)
verts = sum([ss.num_vertices for ss in asst_ss])
if verts > max_verts:
max_verts = verts
max_asst = asst
mturk_assignment = max_asst
else:
mturk_assignment = iter(mturk_assignments).next()
# delete existing images
for shape in photo.material_shapes.all():
for f in shape._ik.spec_files:
f.delete()
# delete existing triangulated shapes
photo.material_shapes.all().delete()
# only triangulate this assignment's shapes
submitted_shapes = SubmittedShape.objects.filter(
mturk_assignment=mturk_assignment)
user = mturk_assignment.worker
# triangulate new ones
# Note: have to call asynchronously since it could raise an
# exception and retry
triangulate_submitted_shapes_task.delay(
photo, user, mturk_assignment, MaterialShape, submitted_shapes)
@shared_task
def update_shape_image_crop_task(shape):
""" recompute the cropped image for a shape """
# import here to avoid cycle
from shapes.utils import update_shape_image_crop
update_shape_image_crop(shape, save=True)
@shared_task
def save_substance_grid(substance_id, outdir, show_progress=False):
try:
os.makedirs(outdir)
except OSError as e:
print e
substance = ShapeSubstance.objects.get(id=substance_id)
print 'substance: %s' % substance
qset = MaterialShape.objects.filter(
substance_id=substance_id, photo__inappropriate=False)
if not qset.exists():
print 'no shapes for %s' % substance
return
from common.utils import create_image_grid_qset
out = create_image_grid_qset(qset, 'image_square_300',
ncols=20, size=300,
max_qset_size=10 * 20 * 16 / 9,
downsample_ratio=2,
show_progress=show_progress)
outname = os.path.join(outdir, substance.name
.replace(' - ', '-').replace(' ', '-')
.replace('/', '-').replace("'", '') + '.png')
with open(outname, 'wb') as outfile:
save_image(out, outfile, format="PNG")
@shared_task
def create_shape_image_sample_task(shape, sample_width=256, sample_height=256):
from shapes.utils import create_shape_image_sample
try:
create_shape_image_sample(shape, sample_width, sample_height)
except Exception as exc:
# Re-add to the queue so that we don't lose tass
print 'Exception (%s) -- will retry in 30 seconds' % exc
traceback.print_exc()
raise create_shape_image_sample_task.retry(
exc=exc, countdown=30)
| StarcoderdataPython |
12814242 | from collections import defaultdict
class Solution:
def largestWordCount(self, messages: List[str], senders: List[str]) -> str:
dc = defaultdict(int)
max_cnt = 0
cand_list = []
for i in range(len(senders)):
dc[senders[i]] += len(messages[i].split())
if dc[senders[i]] == max_cnt:
cand_list.append(senders[i])
elif dc[senders[i]] > max_cnt:
cand_list.clear()
cand_list.append(senders[i])
max_cnt = dc[senders[i]]
return max(cand_list) | StarcoderdataPython |
12811168 | <reponame>BytewaveMLP/Advent-of-Code-2020<filename>Day 18/part2.py
import sys
import re
input_lines = [line for line in open(sys.argv[1]).read().split('\n') if line != '']
class N:
def __init__(self, n):
self.n = n
def __truediv__(self, b):
return N(self.n + b.n)
def __add__(self, b):
return N(self.n * b.n)
results = []
for line in input_lines:
line = re.sub(r'(\d+)', r'N(\1)', line.replace('+', '/').replace('*', '+'))
print(line)
result = eval(line).n
print(result)
results.append(result)
print(sum(results))
| StarcoderdataPython |
6501211 | <reponame>mshafer1/preview-ahk-gen<gh_stars>10-100
import re
from collections import defaultdict
from enum import Enum
import pytest
from selenium.webdriver.common.by import By
class TriggerTypes(Enum):
KEY = "KEY"
STRING = "STRING"
class AssertionObject:
def __init__(self, expected_trigger_types=[], expected_hotkey_ids=[]):
self._trigger_types = expected_trigger_types
self._hotkey_ids = expected_hotkey_ids
def check(self, browser, parser, subtests):
page = browser.page_source
if self._trigger_types:
checked_selectors = browser.find_elements(
By.CSS_SELECTOR, "input[type='radio']:checked"
)
values = [selector.get_attribute("value") for selector in checked_selectors]
with subtests.test(
expected_trigger_types=self._trigger_types, actual_trigger_types=values
):
assert values == [trigger_type.value for trigger_type in self._trigger_types]
if self._hotkey_ids:
parsed = parser(page)
row_id_inputs = parsed.find_all("input", {"class": "js-index"})
values = [id_input["value"] for id_input in row_id_inputs]
with subtests.test(expected_hotkey_ids=self._hotkey_ids, hotkey_ids=values):
assert values == self._hotkey_ids
def _get_elements_through_browser(
path_type, path, filter, filter_attr, browser,
):
elements = browser.find_elements(path_type, path)
desired_elements = [i for i in elements if filter(i.get_attribute(filter_attr))]
return desired_elements
def _get_elements_and_desired_value_through_browser(
path_type, path, filter, filter_attr, desired_attr, browser, sort_attribute="name"
):
desired_elements = _get_elements_through_browser(path_type, path, filter, filter_attr, browser)
result = {}
for element in desired_elements:
sort_key = element.get_attribute(sort_attribute)
if sort_key in result:
if not isinstance(result[sort_key], list):
result[sort_key] = [result[sort_key]]
result[sort_key].append(element.get_attribute(desired_attr))
else:
result[sort_key] = element.get_attribute(desired_attr)
return result
def __sanitize_html_inputs(function_signature):
r"""
>>> __sanitize_html_inputs('ActivateOrOpen( "<input type="text" name="Window0" id="window0" placeholder="Window" class="keyWidth" oninput="markDirty()" required="">", <span class="w3-hide-large"><br></span> "<input id="program0" type="text" name="Program0" placeholder="Program" class="keyWidth" oninput="markDirty()" required="">") <input type="hidden" value="ActivateOrOpen" name="option0" id="option0">')
'ActivateOrOpen("\\{Window0\\}", "\\{Program0\\}")'
>>> __sanitize_html_inputs('Send( "<input name="input0" id="input0" type="text" placeholder="input" oninput="markDirty()" required="">") <input type="hidden" value="Send" name="option0" id="option0">')
'Send("\\{input0\\}")'
>>> __sanitize_html_inputs('Replace( "<input type="text" name="input0" id="input0" placeholder="input" oninput="markDirty()" required="">") <input type="hidden" value="Replace" name="option0" id="option0">')
'Replace("\\{input0\\}")'
>>> __sanitize_html_inputs('SendUnicodeChar(<input name="input0" id="input0" type="text" placeholder="0x000" class="keyWidth" oninput="markDirty()" required="">)')
'SendUnicodeChar(\\{input0\\})'
>>> __sanitize_html_inputs('Custom: <textarea name=\"Code0\" id=\"code0\" placeholder=\"code\" class=\"codeArea\" oninput=\"markDirty()\" required=\"\"></textarea>)')
'Custom: \\{Code0\\})'
>>> __sanitize_html_inputs('<span title="Removes what was just typed (for hotstring, but treated as Send for hotkey) and sends the valued\ni.e. Replace("by the way") can be used with a hotstring of btw to cause it to be expanded when typed">Replace(\n "\\{text0\\}"\n)</span>')
'Replace("\\{text0\\}")'
>>> __sanitize_html_inputs('ActivateOrOpenChrome(<span class="w3-hide-large w3-hide-medium"><br/></span>"<input type="text" name="Window0" id="window0" placeholder="tab name" class="keyWidth" oninput="markDirty()" required/>", <span class="w3-hide-large"><br/></span>"<input id="program0" type="text" name="Program0" placeholder="URL" class="keyWidth" oninput="markDirty()" required/>")')
'ActivateOrOpenChrome("\\{Window0\\}", "\\{Program0\\}")'
>>> __sanitize_html_inputs('<span title="A sandbox for creating your own usage of the hotkey/hotstring">Custom:<span class="may-break-space"> </span><span class="w3-hide-large "><br></span><textarea name="Code0" id="code0" placeholder="Code" class="keyWidth" oninput="markDirty()" title="" required=""></textarea></span>')
'Custom: \\{Code0\\}'
"""
_arg_regex = r"(\"?)\<(input|textarea) .*?name=\"(.+?)\".+?\>(?:\<\/\2\>)?\1"
# remove hidden option input
function_signature = re.sub(r"\<input type=\"hidden\".+?\/?\>", "", function_signature).strip()
# remove title text span
function_signature = re.sub(
r"\<span title=\"[\d\D]+?\"[\d\D]*?\>([\d\D]+)\<\/span\>", r"\1", function_signature,
) # TODO: after integration, add title to testing
# repace arg inputs with names
function_signature = re.sub(_arg_regex, r"\1\{\3\}\1", function_signature).replace("\t", "")
# clean up newlines
function_signature = re.sub(r"(?:\\n|\n)", r"", function_signature)
# remove white space before quote marks, except after commas
function_signature = re.sub(r"([^,])[\s \n]+\"", r'\1"', function_signature)
# remove page break insertions
function_signature = re.sub(r"\<span .+?\<br\/?\>\<\/span\>", "", function_signature)
# add spaces after commas (like in-between parameters)
function_signature = re.sub(r",([\"\'])", r", \1", function_signature)
# add space between : and arg
function_signature = re.sub(r"\:\\\{", r": \\{", function_signature)
return function_signature
def _get_input(selector, matcher, desired_value, id_filter, browser, parser, dest_name, data_store):
trigger_type_inputs = _get_elements_and_desired_value_through_browser(
By.CSS_SELECTOR, selector, matcher, "name", desired_value, browser,
)
for name, trigger_type in trigger_type_inputs.items():
id_value = id_filter(name)
data_store[id_value][dest_name] = trigger_type
def loaded_data(browser, parser):
data = defaultdict(dict)
page = browser.page_source
parsed = parser(page)
uses_ids = True
row_id_inputs = parsed.find_all("input", {"class": "js-index"})
hotkey_ids = [id_input["value"] for id_input in row_id_inputs]
if not hotkey_ids:
uses_ids = False
# else:
# data["hotkey_ids"] = hotkey_ids
_get_input(
selector="input[type='radio']:checked",
matcher=lambda v: v.startswith("func") and v[-1].isnumeric(),
desired_value="value",
id_filter=lambda name: name[len("func") :],
dest_name="trigger_type",
browser=browser,
parser=parser,
data_store=data,
)
_get_input(
selector="input[type='text']",
matcher=lambda v: v.startswith("comment"),
desired_value="value",
id_filter=lambda name: name[len("comment") :],
dest_name="comment",
browser=browser,
parser=parser,
data_store=data,
)
_get_input(
selector="input[type='text']",
matcher=lambda v: v.startswith("skeyValue"),
desired_value="value",
id_filter=lambda name: name[len("skeyValue") :],
dest_name="trigger_keys",
browser=browser,
parser=parser,
data_store=data,
)
_get_input(
selector="input[type='checkbox']:checked",
matcher=lambda name: name.startswith("skey") and name.endswith("[]"),
desired_value="value",
id_filter=lambda name: name[len("skey") : -2],
dest_name="modifier_keys",
browser=browser,
parser=parser,
data_store=data,
)
selected_functions = _get_elements_through_browser(
By.CSS_SELECTOR,
path="span",
filter=lambda id: id.startswith("function"),
filter_attr="id",
browser=browser,
)
for function in selected_functions:
html_id = function.get_attribute("id")
id_value = html_id[len("function") :]
function_signature = function.get_attribute("innerHTML")
function_signature = __sanitize_html_inputs(function_signature)
args = _get_elements_and_desired_value_through_browser(
By.CSS_SELECTOR,
r'input[type="text"], textarea',
filter=lambda _: True,
filter_attr="name",
desired_attr="value",
browser=function,
)
data[id_value]["action"] = {"function": function_signature, "args": args}
return dict(data)
| StarcoderdataPython |
1851076 | # Description: Delete all H-bonds in the selection, which is all by default.
# Source: placeHolder
"""
cmd.do('cmd.delete("hbonds")')
"""
cmd.do('cmd.delete("hbonds")')
| StarcoderdataPython |
1917966 | <reponame>Borda/kaggle_cell-inst-segm
import os.path
_ROOT_TESTS = os.path.dirname(__file__)
_ROOT_DATA = os.path.join(_ROOT_TESTS, "_data")
| StarcoderdataPython |
3406516 | # Main entry point for server
import logging
import threading
from config import SERVER_CONFIG
from app import App, Handler
from sys import exit, stdin
from utils import random_string
if __name__ == "__main__":
# Configure logging
logging.basicConfig(
level=SERVER_CONFIG['log_level'],
format='[%(levelname)s %(threadName)s] %(message)s',
)
# Start server
server_address = (SERVER_CONFIG['listen_host'], SERVER_CONFIG['listen_port'])
app = App(server_address, Handler)
thread = threading.Thread(target=app.serve_forever)
thread.start()
logging.info("Started serving at %s:%s" % server_address)
try:
# Recieve commands
while True:
cmd = stdin.readline()[:-1] # Cut out the newline character
if cmd == "":
continue
logging.info("Received Console Command: " + cmd)
if cmd == "shutdown":
break
elif cmd == "geninvitecode":
code = random_string(10)
with app.pool.acquire() as conn:
sql = "INSERT INTO InviteCode (code, created_by) VALUES (%s, %s)"
conn.execute(sql, (code, "console"))
success = conn.rowcount == 1
if success:
logging.info("Console created invite code: %s" % code)
else:
logging.warn("Failed to create invite code.")
elif "make_admin" in cmd:
# TODO
pass
except KeyboardInterrupt as _:
pass # Exit on keyboard interrupt
logging.info("Shutting down...")
app.shutdown()
thread.join()
| StarcoderdataPython |
8101242 | <filename>1.1/1.1.py
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Suppress warning that it can't use CUDA
import tensorflow as tf
v = tf.Variable(0)
@tf.function
def increment(v):
v = tf.add(v, 1)
return v
for _ in range(3):
v = increment(v)
print(v)
a = tf.constant([5])
b = tf.constant([2])
c = tf.add(a,b)
d = tf.subtract(a,b)
print ('c =: %s' % c)
print ('d =: %s' % d) | StarcoderdataPython |
9656244 | from Block import Block
from ListaBlockchain import *
l = BlockChain()
tabla = []
#VERIFICAR QUE FUNCIONE BIEN EL METODO AL HACER LAS LLAMADAS!!!!!!!!!
def activar_SaveModo(registro):
tabla = registro
genesis_block = Block("0", tabla)
if l.listaVacia() is True:
#genesis_block = Block("0", tabla)
l.agregarLista(str(genesis_block.transaction), str(genesis_block.block_hash))
#bloque_anterior = bloque_genesis.block_hash
l.archivo_json()
l.GraficarConArchivo()
#print(l.imprimir())
else:
tabla1 = registro
second_block = Block(genesis_block.block_hash, tabla1)
l.agregarLista(str(second_block.transaction), str(second_block.block_hash))
#bloque_anterior = bloque.block_hash
#l.imprimir()
l.GraficarConArchivo()
l.archivo_json()
def modificar_cadena(indice,registro):
l.modificarNodo(indice,registro)
#l.GraficarConArchivo()
#l.archivo_json()
def desactivar_SaveModo(database, table):
#0 operación exitora, 1 error en la operación,
#2 base de datos inexistente, 3 tabla inexistente, 4 modo seguro no existente.
return None
def abrir():
l.abrirImagen() | StarcoderdataPython |
393676 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import sentry.db.models.fields.bounded
import sentry.db.models.fields.foreignkey
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Adding indexes to large tables. These indexes should be created concurrently,
# unfortunately we can't run migrations outside of a transaction until Django
# 1.10. So until then these should be run manually.
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
dependencies = [("sentry", "0002_912_to_recent")]
operations = [
migrations.CreateModel(
name="AlertRuleTriggerAction",
fields=[
(
"id",
sentry.db.models.fields.bounded.BoundedBigAutoField(
serialize=False, primary_key=True
),
),
("type", models.SmallIntegerField()),
("target_type", models.SmallIntegerField()),
("target_identifier", models.TextField(null=True)),
("target_display", models.TextField(null=True)),
("date_added", models.DateTimeField(default=django.utils.timezone.now)),
(
"alert_rule_trigger",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
to="sentry.AlertRuleTrigger"
),
),
(
"integration",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
to="sentry.Integration", null=True
),
),
],
options={"db_table": "sentry_alertruletriggeraction"},
),
migrations.AlterField(
model_name="auditlogentry",
name="event",
field=sentry.db.models.fields.bounded.BoundedPositiveIntegerField(
choices=[
(1, b"member.invite"),
(2, b"member.add"),
(3, b"member.accept-invite"),
(5, b"member.remove"),
(4, b"member.edit"),
(6, b"member.join-team"),
(7, b"member.leave-team"),
(8, b"member.pending"),
(20, b"team.create"),
(21, b"team.edit"),
(22, b"team.remove"),
(30, b"project.create"),
(31, b"project.edit"),
(32, b"project.remove"),
(33, b"project.set-public"),
(34, b"project.set-private"),
(35, b"project.request-transfer"),
(36, b"project.accept-transfer"),
(37, b"project.enable"),
(38, b"project.disable"),
(10, b"org.create"),
(11, b"org.edit"),
(12, b"org.remove"),
(13, b"org.restore"),
(40, b"tagkey.remove"),
(50, b"projectkey.create"),
(51, b"projectkey.edit"),
(52, b"projectkey.remove"),
(53, b"projectkey.enable"),
(53, b"projectkey.disable"),
(60, b"sso.enable"),
(61, b"sso.disable"),
(62, b"sso.edit"),
(63, b"sso-identity.link"),
(70, b"api-key.create"),
(71, b"api-key.edit"),
(72, b"api-key.remove"),
(80, b"rule.create"),
(81, b"rule.edit"),
(82, b"rule.remove"),
(100, b"servicehook.create"),
(101, b"servicehook.edit"),
(102, b"servicehook.remove"),
(103, b"servicehook.enable"),
(104, b"servicehook.disable"),
(110, b"integration.add"),
(111, b"integration.edit"),
(112, b"integration.remove"),
(113, b"sentry-app.add"),
(115, b"sentry-app.remove"),
(116, b"sentry-app.install"),
(117, b"sentry-app.uninstall"),
(130, b"internal-integration.create"),
(135, b"internal-integration.add-token"),
(136, b"internal-integration.remove-token"),
(90, b"ondemand.edit"),
(91, b"trial.started"),
(92, b"plan.changed"),
(93, b"plan.cancelled"),
(140, b"invite-request.create"),
(141, b"invite-request.remove"),
]
),
),
migrations.AlterField(
model_name="sentryappwebhookerror",
name="response_code",
field=models.PositiveSmallIntegerField(null=True),
),
]
| StarcoderdataPython |
12855950 | from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from fdadb.models import MedicationName, MedicationNDC, MedicationStrength
class APITests(APITestCase):
def setUp(self):
for name in ("DrugName", "OtherDrugName", "DruuuugName", "NamedDrug"):
medication_name = MedicationName.objects.create(
name=name, active_substances=[name + " Substance 1", name + " Substance 2"]
)
for strength in (1, 2, 3):
medication_strength = MedicationStrength.objects.create(
medication_name=medication_name,
strength={
name + " Substance 1": {"strength": strength, "unit": "mg/l"},
name + " Substance 2": {"strength": strength + 5, "unit": "mg/l"},
},
)
for manufacturer in ("M1", "M2"):
MedicationNDC.objects.create(
medication_strength=medication_strength,
ndc=name[:5] + str(strength) + manufacturer,
manufacturer=manufacturer,
)
def test_names_api(self):
url = reverse("fdadb-medications-names")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["count"], 4)
self.assertEqual(response.data["results"][0]["name"], "DrugName")
self.assertEqual(
response.data["results"][0]["active_substances"], ["DrugName Substance 1", "DrugName Substance 2"]
)
response = self.client.get(url + "?q=Druuu")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["count"], 1)
self.assertEqual(response.data["results"][0]["name"], "DruuuugName")
self.assertEqual(
response.data["results"][0]["active_substances"], ["DruuuugName Substance 1", "DruuuugName Substance 2"]
)
def test_strengths_api(self):
url = reverse("fdadb-medications-strengths", kwargs={"medication_name": "NamedDrug"})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["count"], 3)
self.assertEqual(response.data["results"][0]["name"], "NamedDrug")
self.assertEqual(
response.data["results"][0]["active_substances"], ["NamedDrug Substance 1", "NamedDrug Substance 2"]
)
self.assertEqual(
response.data["results"][0]["strength"],
{
"NamedDrug Substance 1": {"strength": 1, "unit": "mg/l"},
"NamedDrug Substance 2": {"strength": 6, "unit": "mg/l"},
},
)
response = self.client.get(url + "?q=3")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["count"], 1)
self.assertEqual(response.data["results"][0]["name"], "NamedDrug")
self.assertEqual(
response.data["results"][0]["active_substances"], ["NamedDrug Substance 1", "NamedDrug Substance 2"]
)
self.assertEqual(
response.data["results"][0]["strength"],
{
"NamedDrug Substance 1": {"strength": 3, "unit": "mg/l"},
"NamedDrug Substance 2": {"strength": 8, "unit": "mg/l"},
},
)
def test_ndcs_api(self):
strength = MedicationStrength.objects.filter(medication_name__name="OtherDrugName").first()
url = reverse("fdadb-medications-ndcs", kwargs={"medication_name": "OtherDrugName", "strength_id": strength.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["count"], 2)
self.assertEqual(response.data["results"][0]["name"], "OtherDrugName")
self.assertEqual(
response.data["results"][0]["active_substances"], ["OtherDrugName Substance 1", "OtherDrugName Substance 2"]
)
self.assertEqual(
response.data["results"][0]["strength"],
{
"OtherDrugName Substance 1": {"strength": 1, "unit": "mg/l"},
"OtherDrugName Substance 2": {"strength": 6, "unit": "mg/l"},
},
)
self.assertEqual(response.data["results"][0]["manufacturer"], "M1")
self.assertEqual(response.data["results"][0]["ndc"], "Other1M1")
strength = MedicationStrength.objects.filter(medication_name__name="OtherDrugName").first()
url = reverse("fdadb-medications-ndcs", kwargs={"medication_name": "OtherDrugName", "strength_id": strength.pk})
response = self.client.get(url + "?q=m2")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["count"], 1)
self.assertEqual(response.data["results"][0]["name"], "OtherDrugName")
self.assertEqual(
response.data["results"][0]["active_substances"], ["OtherDrugName Substance 1", "OtherDrugName Substance 2"]
)
self.assertEqual(
response.data["results"][0]["strength"],
{
"OtherDrugName Substance 1": {"strength": 1, "unit": "mg/l"},
"OtherDrugName Substance 2": {"strength": 6, "unit": "mg/l"},
},
)
self.assertEqual(response.data["results"][0]["manufacturer"], "M2")
self.assertEqual(response.data["results"][0]["ndc"], "Other1M2")
| StarcoderdataPython |
8178998 | <filename>beginner_source/former_torchies/nnft_tutorial.py<gh_stars>1000+
# -*- coding: utf-8 -*-
"""
nn package
==========
We’ve redesigned the nn package, so that it’s fully integrated with
autograd. Let's review the changes.
**Replace containers with autograd:**
You no longer have to use Containers like ``ConcatTable``, or modules like
``CAddTable``, or use and debug with nngraph. We will seamlessly use
autograd to define our neural networks. For example,
* ``output = nn.CAddTable():forward({input1, input2})`` simply becomes
``output = input1 + input2``
* ``output = nn.MulConstant(0.5):forward(input)`` simply becomes
``output = input * 0.5``
**State is no longer held in the module, but in the network graph:**
Using recurrent networks should be simpler because of this reason. If
you want to create a recurrent network, simply use the same Linear layer
multiple times, without having to think about sharing weights.
.. figure:: /_static/img/torch-nn-vs-pytorch-nn.png
:alt: torch-nn-vs-pytorch-nn
torch-nn-vs-pytorch-nn
**Simplified debugging:**
Debugging is intuitive using Python’s pdb debugger, and **the debugger
and stack traces stop at exactly where an error occurred.** What you see
is what you get.
Example 1: ConvNet
------------------
Let’s see how to create a small ConvNet.
All of your networks are derived from the base class ``nn.Module``:
- In the constructor, you declare all the layers you want to use.
- In the forward function, you define how your model is going to be
run, from input to output
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class MNISTConvNet(nn.Module):
def __init__(self):
# this is the place where you instantiate all your modules
# you can later access them using the same names you've given them in
# here
super(MNISTConvNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(10, 20, 5)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
# it's the forward function that defines the network structure
# we're accepting only a single input in here, but if you want,
# feel free to use more
def forward(self, input):
x = self.pool1(F.relu(self.conv1(input)))
x = self.pool2(F.relu(self.conv2(x)))
# in your model definition you can go full crazy and use arbitrary
# python code to define your model structure
# all these are perfectly legal, and will be handled correctly
# by autograd:
# if x.gt(0) > x.numel() / 2:
# ...
#
# you can even do a loop and reuse the same module inside it
# modules no longer hold ephemeral state, so you can use them
# multiple times during your forward pass
# while x.norm(2) < 10:
# x = self.conv1(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
###############################################################
# Let's use the defined ConvNet now.
# You create an instance of the class first.
net = MNISTConvNet()
print(net)
########################################################################
# .. note::
#
# ``torch.nn`` only supports mini-batches The entire ``torch.nn``
# package only supports inputs that are a mini-batch of samples, and not
# a single sample.
#
# For example, ``nn.Conv2d`` will take in a 4D Tensor of
# ``nSamples x nChannels x Height x Width``.
#
# If you have a single sample, just use ``input.unsqueeze(0)`` to add
# a fake batch dimension.
#
# Create a mini-batch containing a single sample of random data and send the
# sample through the ConvNet.
input = torch.randn(1, 1, 28, 28)
out = net(input)
print(out.size())
########################################################################
# Define a dummy target label and compute error using a loss function.
target = torch.tensor([3], dtype=torch.long)
loss_fn = nn.CrossEntropyLoss() # LogSoftmax + ClassNLL Loss
err = loss_fn(out, target)
err.backward()
print(err)
########################################################################
# The output of the ConvNet ``out`` is a ``Tensor``. We compute the loss
# using that, and that results in ``err`` which is also a ``Tensor``.
# Calling ``.backward`` on ``err`` hence will propagate gradients all the
# way through the ConvNet to it’s weights
#
# Let's access individual layer weights and gradients:
print(net.conv1.weight.grad.size())
########################################################################
print(net.conv1.weight.data.norm()) # norm of the weight
print(net.conv1.weight.grad.data.norm()) # norm of the gradients
########################################################################
# Forward and Backward Function Hooks
# -----------------------------------
#
# We’ve inspected the weights and the gradients. But how about inspecting
# / modifying the output and grad\_output of a layer?
#
# We introduce **hooks** for this purpose.
#
# You can register a function on a ``Module`` or a ``Tensor``.
# The hook can be a forward hook or a backward hook.
# The forward hook will be executed when a forward call is executed.
# The backward hook will be executed in the backward phase.
# Let’s look at an example.
#
# We register a forward hook on conv2 and print some information
def printnorm(self, input, output):
# input is a tuple of packed inputs
# output is a Tensor. output.data is the Tensor we are interested
print('Inside ' + self.__class__.__name__ + ' forward')
print('')
print('input: ', type(input))
print('input[0]: ', type(input[0]))
print('output: ', type(output))
print('')
print('input size:', input[0].size())
print('output size:', output.data.size())
print('output norm:', output.data.norm())
net.conv2.register_forward_hook(printnorm)
out = net(input)
########################################################################
#
# We register a backward hook on conv2 and print some information
def printgradnorm(self, grad_input, grad_output):
print('Inside ' + self.__class__.__name__ + ' backward')
print('Inside class:' + self.__class__.__name__)
print('')
print('grad_input: ', type(grad_input))
print('grad_input[0]: ', type(grad_input[0]))
print('grad_output: ', type(grad_output))
print('grad_output[0]: ', type(grad_output[0]))
print('')
print('grad_input size:', grad_input[0].size())
print('grad_output size:', grad_output[0].size())
print('grad_input norm:', grad_input[0].norm())
net.conv2.register_backward_hook(printgradnorm)
out = net(input)
err = loss_fn(out, target)
err.backward()
########################################################################
# A full and working MNIST example is located here
# https://github.com/pytorch/examples/tree/master/mnist
#
# Example 2: Recurrent Net
# ------------------------
#
# Next, let’s look at building recurrent nets with PyTorch.
#
# Since the state of the network is held in the graph and not in the
# layers, you can simply create an nn.Linear and reuse it over and over
# again for the recurrence.
class RNN(nn.Module):
# you can also accept arguments in your model constructor
def __init__(self, data_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
input_size = data_size + hidden_size
self.i2h = nn.Linear(input_size, hidden_size)
self.h2o = nn.Linear(hidden_size, output_size)
def forward(self, data, last_hidden):
input = torch.cat((data, last_hidden), 1)
hidden = self.i2h(input)
output = self.h2o(hidden)
return hidden, output
rnn = RNN(50, 20, 10)
########################################################################
#
# A more complete Language Modeling example using LSTMs and Penn Tree-bank
# is located
# `here <https://github.com/pytorch/examples/tree/master/word\_language\_model>`_
#
# PyTorch by default has seamless CuDNN integration for ConvNets and
# Recurrent Nets
loss_fn = nn.MSELoss()
batch_size = 10
TIMESTEPS = 5
# Create some fake data
batch = torch.randn(batch_size, 50)
hidden = torch.zeros(batch_size, 20)
target = torch.zeros(batch_size, 10)
loss = 0
for t in range(TIMESTEPS):
# yes! you can reuse the same network several times,
# sum up the losses, and call backward!
hidden, output = rnn(batch, hidden)
loss += loss_fn(output, target)
loss.backward()
| StarcoderdataPython |
11239807 | <gh_stars>1-10
import pytest
from wemake_python_styleguide.violations.consistency import (
InconsistentReturnVariableViolation,
)
from wemake_python_styleguide.visitors.ast.keywords import (
ConsistentReturningVariableVisitor,
)
# Correct:
correct_example1 = """
def some_function():
return 1
"""
correct_example2 = """
def some_function():
some_value = 1
other_value = 2
return some_value + other_value
"""
correct_example3 = """
def some_function():
some_value = 1
name = last_name + some_value
return name, some_value
"""
correct_example4 = """
def some_function():
some_value = 1
some_value += 1
return some_value
"""
correct_example5 = """
def some_function():
some_value = []
some_value.append(1)
return some_value
"""
correct_example6 = """
def foo():
x, _ = some_tuple
return x
"""
correct_example7 = """
def foo():
x.id += some_tuple
return x.id
"""
correct_example8 = """
def foo():
x[0]: int = s[0]
return x[0]
"""
correct_example9 = """
def foo():
x.attr = 1
return x.attr
"""
# Regression to 1116
# https://github.com/wemake-services/wemake-python-styleguide/issues/1116
correct_example10 = """
def foo():
x.attr = 1
print()
return x.attr
"""
# Regression to 1116
# https://github.com/wemake-services/wemake-python-styleguide/issues/1116
correct_example11 = """
def foo():
attr = 1
print()
return attr
"""
correct_example12 = """
def some():
if something:
return something
"""
correct_example13 = """
def some():
if something:
other = 1
return something
"""
correct_example14 = """
def some():
other = 2
if something:
other = 1
else:
return other
"""
correct_example15 = """
def some():
return some
"""
correct_example16 = """
def some():
x = 1
return
"""
correct_example17 = """
def some():
x, y = 1
return y, x
"""
correct_example18 = """
def some():
x, y, z = 1, 2, 3
return x, y
"""
correct_example19 = """
def some():
x, y, z = 1, 2, 3
return y, z
"""
correct_example20 = """
def some():
x, y, z = 1, 2, 3
return 0, y, z
"""
correct_example21 = """
def some():
x, y, z = 1, 2, 3
return x, y, z, 0
"""
correct_example22 = """
def some():
x, y, z = some
return x(), y, z
"""
correct_example23 = """
def some():
x, y, z = some
return x[0], y, z
"""
# Wrong:
wrong_example1 = """
def function():
some_value = 1
return some_value
"""
wrong_example2 = """
def some_function():
some_value = 1
return some_value
"""
wrong_example3 = """
def some_function():
some_value: int = 1
return some_value
"""
# Regression to 598
# https://github.com/wemake-services/wemake-python-styleguide/issues/598
wrong_example4 = """
def foo():
function_result = function(*args, **kwargs)
return function_result
"""
# Regression to 674
# https://github.com/wemake-services/wemake-python-styleguide/issues/674
wrong_example5 = """
def report_progress(function):
def decorator(*args, **kwargs):
function_result = function(*args, **kwargs)
return function_result
return decorator
"""
# ifs
wrong_example6 = """
def wrong_if():
if something:
other = 1
return other
"""
wrong_example7 = """
def wrong_if():
if something:
...
else:
other = 1
return other
"""
# fors
wrong_example8 = """
def wrong_for():
for i in something:
other = i
return other
"""
wrong_example9 = """
def wrong_for():
for i in something:
...
else:
other = 0
return other
"""
# whiles
wrong_example10 = """
def wrong_while():
while something:
other = 1
return other
"""
wrong_example11 = """
def wrong_while():
while something:
...
else:
other = 2
return other
"""
# tries
wrong_example12 = """
def wrong_try():
try:
other = 1
return other
except:
...
"""
wrong_example13 = """
def wrong_try():
try:
...
except:
other = 1
return other
"""
wrong_example14 = """
def wrong_try():
try:
...
except:
...
else:
other = 1
return other
"""
wrong_example15 = """
def wrong_try():
try:
...
finally:
other = 1
return other
"""
# tuples
wrong_example16 = """
def wrong_try():
x, y, z = 1, 2, 3
return x, y, z
"""
wrong_example16 = """
def wrong_try():
x, y, z = 1, 2, 3
return x, y, z
"""
# double
double_wrong_example1 = """
def some():
if something() == 1:
some = 1
return some
else:
other = 2
return other
"""
@pytest.mark.parametrize('code', [
wrong_example1,
wrong_example2,
wrong_example3,
wrong_example4,
wrong_example5,
wrong_example6,
wrong_example7,
wrong_example8,
wrong_example9,
wrong_example10,
wrong_example11,
wrong_example12,
wrong_example13,
wrong_example14,
wrong_example15,
])
def test_wrong_return_variable(
assert_errors,
parse_ast_tree,
code,
default_options,
mode,
):
"""Testing incorrect `return` statements."""
tree = parse_ast_tree(mode(code))
visitor = ConsistentReturningVariableVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [InconsistentReturnVariableViolation])
@pytest.mark.parametrize('code', [
correct_example1,
correct_example2,
correct_example3,
correct_example4,
correct_example5,
correct_example6,
correct_example7,
correct_example8,
correct_example9,
correct_example10,
correct_example11,
correct_example12,
correct_example13,
correct_example14,
correct_example15,
correct_example16,
correct_example17,
correct_example18,
correct_example19,
correct_example20,
correct_example21,
correct_example22,
correct_example23,
])
def test_correct_return_statements(
assert_errors,
parse_ast_tree,
code,
default_options,
mode,
):
"""Testing correct `return` statements."""
tree = parse_ast_tree(mode(code))
visitor = ConsistentReturningVariableVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
def test_double_wrong_return_variable(
assert_errors,
parse_ast_tree,
default_options,
mode,
):
"""Testing double incorrect `return` statements."""
tree = parse_ast_tree(mode(double_wrong_example1))
visitor = ConsistentReturningVariableVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [
InconsistentReturnVariableViolation,
InconsistentReturnVariableViolation,
])
| StarcoderdataPython |
9781813 | <gh_stars>0
# stdlib
import re
import sys
from typing import Any
from typing import List
from typing import Tuple
from typing import Union
# third party
from google.protobuf.reflection import GeneratedProtocolMessageType
from nacl.signing import VerifyKey
# syft relative
from ... import serialize
from ...proto.core.node.common.action.action_pb2 import Action as Action_PB
from ...proto.core.plan.plan_pb2 import Plan as Plan_PB
from ..common.object import Serializable
from ..common.serde.serializable import bind_protobuf
from ..node.abstract.node import AbstractNode
from ..node.common.action.common import Action
from ..node.common.util import listify
from ..pointer.pointer import Pointer
CAMEL_TO_SNAKE_PAT = re.compile(r"(?<!^)(?=[A-Z])")
@bind_protobuf
class Plan(Serializable):
"""
A plan is a collection of actions, plus some variable inputs, that together form a computation graph.
Attributes:
actions: list of actions
inputs: Pointers to the inputs. Defaults to None.
"""
def __init__(
self, actions: List[Action], inputs: Union[Pointer, List[Pointer], None] = None
):
self.actions = actions
self.inputs: List[Pointer] = listify(inputs)
def __call__(
self, node: AbstractNode, verify_key: VerifyKey, *args: Tuple[Any]
) -> None:
"""
1) For all pointers that were passed into the init as `inputs`, this method
replaces those pointers in self.actions by the pointers passed in as *args.
2) Executes the actions in self.actions one by one
*While this function requires `node` and `verify_key` as inputs, during remote
execution, passing these is handled in `RunClassMethodAction`*
*Note that this method will receive *args as pointers during execution. Normally,
pointers are resolved during `RunClassMethodAction.execute()`, but not for plans,
as they need to operate on the pointer to enable remapping of the inputs.*
Args:
*args: the new inputs for the plan, passed as pointers
"""
inputs = listify(args)
# this is pretty cumbersome, we are searching through all actions to check
# if we need to redefine some of their attributes that are inputs in the
# graph of actions
for i, (current_input, new_input) in enumerate(zip(self.inputs, inputs)):
for a in self.actions:
if hasattr(a, "remap_input"):
a.remap_input(current_input, new_input) # type: ignore
# redefine the inputs of the plan
self.inputs[i] = new_input
for a in self.actions:
a.execute_action(node, verify_key)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return Plan_PB
def _object2proto(self) -> Plan_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: ObjectWithID_PB
.. note::
This method is purely an internal method. Please use object.serialize() or one of
the other public serialization methods if you wish to serialize an
object.
"""
def camel_to_snake(s: str) -> str:
return CAMEL_TO_SNAKE_PAT.sub("_", s).lower()
actions_pb = [
Action_PB(
obj_type=".".join([action.__module__, action.__class__.__name__]),
**{camel_to_snake(action.__class__.__name__): serialize(action)}
)
for action in self.actions
]
inputs_pb = [inp._object2proto() for inp in self.inputs]
return Plan_PB(actions=actions_pb, inputs=inputs_pb)
@staticmethod
def _proto2object(proto: Plan_PB) -> "Plan":
"""Creates a ObjectWithID from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of Plan
:rtype: Plan
.. note::
This method is purely an internal method. Please use syft.deserialize()
if you wish to deserialize an object.
"""
actions = []
for action_proto in proto.actions:
module, cls_name = action_proto.obj_type.rsplit(".", 1)
action_cls = getattr(sys.modules[module], cls_name)
# protobuf does no inheritance, so we wrap action subclasses
# in the main action class.
inner_action = getattr(action_proto, action_proto.WhichOneof("action"))
actions.append(action_cls._proto2object(inner_action))
inputs = [
Pointer._proto2object(pointer_proto) for pointer_proto in proto.inputs
]
return Plan(actions=actions, inputs=inputs)
| StarcoderdataPython |
6560677 | <reponame>nevooronni/PasswordLocker<filename>accounts.py
import pyperclip
import random
import string
class User:
"""
class that generates new instance of users
"""
#class variables
user_list = []
def save_user(self):
"""
save_account method save accounts into the user_list
"""
User.user_list.append(self)
def delete_user(self):
"""
deletes a saved user account from user list
"""
User.user_list.remove(self)
@classmethod
def find_by(cls,email,password):
"""
Takes in an email and password and return that user with those details.
Args:
email: user email
password: <PASSWORD>
Returns :
user account of a person that matches those details.
"""
for user in cls.user_list:
if user.email == email and user.password == password:
return user
@classmethod
def user_exist(cls,email,password):
"""
authenticates if a user accounts exists
Args:
email: user email address
password: <PASSWORD>
Returns :
Boolean: True or false depending on whether the user account exists
"""
for user in cls.user_list:
if user.email == email and user.password == password:
return True
@classmethod
def list_users(cls):
"""
returns the list of users
"""
return cls.user_list
def __init__(self,first_name,last_name,email,password):
"""
__init__ method that defines properties for our user account>
Args:
first_name: New user first name.
last_name: New user last name
email: New user email
password: <PASSWORD>
"""
#instance variables
self.first_name = first_name
self.last_name = last_name
self.email = email
self.password = password
class Credentials:
"""
class that generate new instances of credentials
"""
#class variable that can be accessed by all instances of the class used to store our created credential accounts objects
credential_list = []
def save_credential(self):
"""
this method save a new credential account for the user
"""
Credentials.credential_list.append(self)
def generate_password(cls,length):
"""
method that auto generates a password for a user credential account
Args:
number: number that dictates how long the password should be
Returns:
A random number that will be saved and used as a password for the user specific credential
"""
new_password = string.ascii_uppercase + string.digits
''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))
for credential in cls.credential_list:
credential.password == <PASSWORD>
return credential
def delete_credential(self):
"""
method that deletes a credential form a the users credential list
"""
Credentials.credential_list.remove(self)
@classmethod
def find_by_name(cls,name):
"""
method which takes an input and returns a credential that mathces the input
Args:
name: name of the specific user credential/account the user wants to find
Returns:
credential of the user that matches.
"""
for credential in cls.credential_list:
if credential.account_name == name:
return credential
@classmethod
def credential_check(cls,name):
"""
checks if a user credential account exist
Args:
name: name of the specific user credential/account the user wants to find .
Retruns: true or false depending if the credential exists.
"""
for credential in cls.credential_list:
if credential.account_name == name:
return True
return False
@classmethod
def show_credentials(cls):
"""
method will return a list of all credentials in the app
"""
return cls.credential_list
#@classmethod
#def copy_password(cls,name):
#"""
#method copies our credentials to the clipboard
#"""
#find_credential = Credentials.find_by_name(name)
#pyperclip.copy(find_credential.password)
@classmethod
def generate_random_password(cls):
'''
generates custom password for user/credential accounts
'''
length = 8
characters = string.ascii_uppercase + string.ascii_lowercase + string.digits + '!$<>#%*'
return ''.join(random.choice(characters) for _ in range(length))
def __init__(self,account_name,description,password):
"""
__init__ deines for us properties four our credentials objects
Args:
account_name: name of the account user wants to store/generate pasword.
description: succint description of the specific account/imporotant details to remember.
password: password for that specific user account custom or generated for the user.
"""
#instance variable that take up the acountname, description, password of our new credential
self.account_name = account_name
self.description = description
self.password = password
pass
| StarcoderdataPython |
6493958 | # Задача 12. Вариант 7.
# Разработайте игру "Крестики-нолики". (см. М.Доусон Программируем на Python гл. 6)
# <NAME>.
# 26.05.2016
import random
#крестики-нолики
X = "X"
O = "O"
EMPTY = " "
TIE = "ничья"
NUM_SQUARES = 9
def main():
return 0
def display_instruct():
print ("""
0 | 1 | 2
—-----—
3 | 4 | 5
—-----—
6 | 7 | 8
""")
def ask_yes_no(question):
response = None
while response not in ("y", "n"):
response = input (question).lower()
return response
def ask_number(question, low, high):
response = None
while response not in range(low, high):
response = int(input(question))
return response
def pieces():
go_first = ask_yes_no("Кто ходит первым?\n")
if go_first == "y":
print ("Твой ход!")
human = X
computer = O
else:
print ("Начинаю")
human = O
computer = X
return computer, human
def new_board():
board = []
for square in range(NUM_SQUARES):
board.append(EMPTY)
return board
def display_board(board): | StarcoderdataPython |
1932240 | # -*- coding: utf-8 -*-
#pylint: skip-file
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import ply.lex as lex
#CORE_LIBS_PATH = os.path.join(os.path.dirname(__file__), 'libs')
#CORE_LIBS = os.listdir(CORE_LIBS_PATH)
class OqasmLexer(object):
"""OPENQASM Lexer.
This is a wrapper around the PLY lexer to support the "include" statement
by creating a stack of lexers.
"""
# pylint: disable=invalid-name,missing-docstring,unused-argument
# pylint: disable=attribute-defined-outside-init
def __init__(self):
self.tokens = OqasmLexer.tokens
self.reserved = OqasmLexer.reserved
# ---- Beginning of the PLY lexer ----
literals = r'=()[]{};<>,.+-/*^"'
reserved = {
'barrier': 'BARRIER',
'creg': 'CREG',
'gate': 'GATE',
'if': 'IF',
'measure': 'MEASURE',
'opaque': 'OPAQUE',
'qreg': 'QREG',
'pi': 'PI',
'reset': 'RESET',
}
tokens = [
'NNINTEGER',
'REAL',
'CX',
'U',
'FORMAT',
'ASSIGN',
'MATCHES',
'ID',
'IGNORE',
] + list(reserved.values())
def t_REAL(self, t):
r'(([0-9]+|([0-9]+)?\.[0-9]+|[0-9]+\.)[eE][+-]?[0-9]+)|(([0-9]+)?\.[0-9]+|[0-9]+\.)'
t.value = float(t.value)
return t
def t_NNINTEGER(self, t):
r'[1-9]+[0-9]*|0'
t.value = int(t.value)
return t
def t_ASSIGN(self, t):
'->'
return t
def t_MATCHES(self, t):
'=='
return t
def t_IGNORE(self, t):
r'\"([^\\\"]|\\.)*\"'
return t
# The include might be dropped, or ignored, as we probably won't need it
def t_INCLUDE(self, t):
'include'
#
# Now eat up the next two tokens which must be
# 1 - the name of the include file, and
# 2 - a terminating semicolon
#
# Then push the current lexer onto the stack, create a new one from
# the include file, and push it onto the stack.
#
# When we hit eof (the t_eof) rule, we pop.
next_token = self.lexer.token()
lineno = next_token.lineno
# print('NEXT', next, "next.value", next.value, type(next))
#if isinstance(next_token.value, str):
# incfile = next_token.value.strip('"')
#else:
# raise SyntaxError("Invalid include: must be a quoted string.")
#if incfile in CORE_LIBS:
# incfile = os.path.join(CORE_LIBS_PATH, incfile)
next_token = self.lexer.token()
if next_token is None or next_token.value != ';':
raise SyntaxError('Invalid syntax, missing ";" at line', str(lineno))
#if not os.path.exists(incfile):
# raise IOError(
# 'Include file %s cannot be found, line %s, file %s' %
# (incfile, str(next_token.lineno), self.filename))
#self.push(incfile)
return self.lexer.token()
def t_FORMAT(self, t):
r'OPENQASM\s+(\d+)\.(\d+)'
return t
def t_COMMENT(self, t):
r'//.*'
pass
def t_CX(self, t):
'CX'
return t
def t_U(self, t):
'U'
return t
def t_ID(self, t):
r'[a-z][a-zA-Z0-9_]*'
if t.value in self.reserved:
t.type = self.reserved[t.value]
return t
return t
def t_newline(self, t):
r'\n+'
self.lexer.lineno += len(t.value)
t_ignore = ' \t'
def t_error(self, t):
print("Unable to match any token rule, got -->%s<--" % t.value[0])
print("Check your OPENQASM source and any include statements.")
t.lexer.skip(1)
def build(self, **kwargs):
""" Builds the lexer """
self.lexer = lex.lex(module=self, **kwargs)
| StarcoderdataPython |
175478 | #
# CircleCI
#
from utils import abort_with_message
import constants
import json
import os
import requests
import sys
def get_circleci_api_token():
circleci_api_token = os.environ.get(constants.CIRCLECI_API_TOKEN_ENV_VAR)
if not circleci_api_token:
abort_with_message('You need to set the CIRCLECI_API_TOKEN environment variable.')
print 'Found CircleCI API token.'
return circleci_api_token
def do_circleci_request(branch):
url = constants.URL_CIRCLECI + branch
params = {'circle-token': get_circleci_api_token()}
print 'CircleCI request to %s (params: %s)' % (url, json.dumps(params))
continue_release = raw_input("\nDo you want to start a build? ").lower()
if not continue_release.startswith('y'):
print 'Aborting release'
sys.exit()
r = requests.post(url, params)
print '- CircleCI response code: %s' % r.status_code
| StarcoderdataPython |
313928 | from __future__ import division
import unittest
import tttrlib
import numpy as np
class Tests(unittest.TestCase):
def test_getter_setter_pda(self):
# test getter and setter
pda = tttrlib.Pda()
pda.hist2d_valid = True
self.assertEqual(pda.hist2d_valid, True)
pda.hist2d_valid = False
self.assertEqual(pda.hist2d_valid, False)
pda.hist2d_valid = True
pda.background_ch1 = 1.7
self.assertEqual(pda.background_ch1, 1.7)
self.assertEqual(pda.hist2d_valid, False)
pda.hist2d_valid = True
pda.background_ch2 = 0.7
self.assertEqual(pda.background_ch2, 0.7)
self.assertEqual(pda.hist2d_valid, False)
pda.hist2d_valid = True
pda.hist2d_nmin = 5
self.assertEqual(pda.hist2d_nmin, 5)
self.assertEqual(pda.hist2d_valid, False)
pda.hist2d_valid = True
pda.hist2d_nmax = 12
self.assertEqual(pda.hist2d_nmax, 12)
self.assertEqual(pda.hist2d_valid, False)
def test_pda_python_extension(self):
pda = tttrlib.Pda()
pf = np.arange(0, 10)
pda.pf = pf
self.assertEqual(np.all(pf == pda.pf), True)
self.assertEqual(pda.hist2d_valid, False)
# pda.hist2d_valid = True
# pg = np.array([0.1, 0.7, 0.2, 0.7, 0.7, 0.8])
# pda.spectrum_ch1 = pg
# self.assertEqual(np.all(pda.spectrum_ch1 == pg), True)
# self.assertEqual(pda.hist2d_valid, False)
#
# pda.hist2d_valid = True
# amplitudes = np.array([0.3, 0.7])
# pda.species_amplitudes = amplitudes
# self.assertEqual(np.all(pda.species_amplitudes == amplitudes), True)
# self.assertEqual(pda.hist2d_valid, False)
#
# def test_pda_constructor(self):
# kw = {
# "hist2d_nmax": 222,
# "hist2d_nmin": 36,
# }
# pda = tttrlib.Pda(**kw)
# self.assertEqual(pda.hist2d_nmax, kw["hist2d_nmax"])
# self.assertEqual(pda.hist2d_nmin, kw["hist2d_nmin"])
#
# def test_pda_1(self):
# green_background = 1.5
# red_background = 0.6
# max_number_of_photons = 5
# pda = tttrlib.Pda(
# hist2d_nmax=5
# )
# pda.background_ch1 = green_background
# pda.background_ch2 = red_background
# self.assertEqual(
# pda.background_ch1,
# green_background,
# )
# self.assertEqual(
# pda.background_ch2,
# red_background
# )
#
# amplitude = 0.5
# probability_green = 0.7
# pda.append(
# amplitude=amplitude,
# probability_ch1=probability_green
# )
# pF = np.ones(max_number_of_photons, dtype=np.float)
# pda.setPF(pF)
# ref = np.array(
# [[0.06122821, 0.05510539, 0.0275527, 0.01047002, 0.00347164,
# 0.00089271],
# [0.13470207, 0.13408979, 0.07604544, 0.03344897, 0.01122497,
# 0.],
# [0.16317319, 0.18414385, 0.12087368, 0.05405563, 0.,
# 0.],
# [0.1486621, 0.19416385, 0.13015894, 0., 0.,
# 0.],
# [0.1169788, 0.15670619, 0., 0., 0.,
# 0.],
# [0.07159453, 0., 0., 0., 0.,
# 0.]
# ]
# )
# self.assertEqual(np.allclose(pda.s1s2, ref), True)
| StarcoderdataPython |
3579821 | ##
# Copyright (c) 2014-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
from getopt import getopt, GetoptError
import os
import re
import subprocess
import sys
from twext.enterprise.dal.model import Schema, Table, Column, Sequence
from twext.enterprise.dal.parseschema import addSQLToSchema, schemaFromPath
from twisted.python.filepath import FilePath
USERNAME = "caldav"
DATABASENAME = "caldav"
PGSOCKETDIR = "127.0.0.1"
SCHEMADIR = "./txdav/common/datastore/sql_schema/"
# Executables:
PSQL = "../postgresql/_root/bin/psql"
def usage(e=None):
name = os.path.basename(sys.argv[0])
print("usage: %s [options] username" % (name,))
print("")
print(" Check calendar server postgres database and schema")
print("")
print("options:")
print(" -d: path to server's sql_schema directory [./txdav/common/datastore/sql_schema/]")
print(" -k: postgres socket path (value for psql -h argument [127.0.0.1])")
print(" -p: location of psql tool if not on PATH already [psql]")
print(" -x: use default values for OS X server")
print(" -h --help: print this help and exit")
print(" -v --verbose: print additional information")
print("")
if e:
sys.stderr.write("%s\n" % (e,))
sys.exit(64)
else:
sys.exit(0)
def execSQL(title, stmt, verbose=False):
"""
Execute the provided SQL statement, return results as a list of rows.
@param stmt: the SQL to execute
@type stmt: L{str}
"""
cmdArgs = [
PSQL,
"-h", PGSOCKETDIR,
"-d", DATABASENAME,
"-U", USERNAME,
"-t",
"-c", stmt,
]
try:
if verbose:
print("\n{}".format(title))
print("Executing: {}".format(" ".join(cmdArgs)))
out = subprocess.check_output(cmdArgs, stderr=subprocess.STDOUT)
if verbose:
print(out)
except subprocess.CalledProcessError, e:
if verbose:
print(e.output)
raise CheckSchemaError(
"%s failed:\n%s (exit code = %d)" %
(PSQL, e.output, e.returncode)
)
return [s.strip() for s in out.splitlines()[:-1]]
def getSchemaVersion(verbose=False):
"""
Return the version number for the schema installed in the database.
Raise CheckSchemaError if there is an issue.
"""
out = execSQL(
"Reading schema version...",
"select value from calendarserver where name='VERSION';",
verbose
)
try:
version = int(out[0])
except ValueError, e:
raise CheckSchemaError(
"Failed to parse schema version: %s" % (e,)
)
return version
def dumpCurrentSchema(verbose=False):
schema = Schema("Dumped schema")
tables = {}
# Tables
rows = execSQL(
"Schema tables...",
"select table_name from information_schema.tables where table_schema = 'public';",
verbose
)
for row in rows:
name = row
table = Table(schema, name)
tables[name] = table
# Columns
rows = execSQL(
"Reading table '{}' columns...".format(name),
"select column_name from information_schema.columns where table_schema = 'public' and table_name = '{}';".format(name),
verbose
)
for row in rows:
name = row
# TODO: figure out the type
column = Column(table, name, None)
table.columns.append(column)
# Indexes
# TODO: handle implicit indexes created via primary key() and unique() statements within CREATE TABLE
rows = execSQL(
"Schema indexes...",
"select indexdef from pg_indexes where schemaname = 'public';",
verbose
)
for indexdef in rows:
addSQLToSchema(schema, indexdef.replace("public.", ""))
# Sequences
rows = execSQL(
"Schema sequences...",
"select sequence_name from information_schema.sequences where sequence_schema = 'public';",
verbose
)
for row in rows:
name = row
Sequence(schema, name)
return schema
def checkSchema(dbversion, verbose=False):
"""
Compare schema in the database with the expected schema file.
"""
dbschema = dumpCurrentSchema(verbose)
# Find current schema
fp = FilePath(SCHEMADIR)
fpschema = fp.child("old").child("postgres-dialect").child("v{}.sql".format(dbversion))
if not fpschema.exists():
fpschema = fp.child("current.sql")
expectedSchema = schemaFromPath(fpschema)
mismatched = dbschema.compare(expectedSchema)
if mismatched:
print("\nCurrent schema in database is mismatched:\n\n" + "\n".join(mismatched))
else:
print("\nCurrent schema in database is a match to the expected server version")
class CheckSchemaError(Exception):
pass
def error(s):
sys.stderr.write("%s\n" % (s,))
sys.exit(1)
def main():
try:
(optargs, _ignore_args) = getopt(
sys.argv[1:], "d:hk:vx", [
"help",
"verbose",
],
)
except GetoptError, e:
usage(e)
verbose = False
global SCHEMADIR, PGSOCKETDIR, PSQL
for opt, arg in optargs:
if opt in ("-h", "--help"):
usage()
elif opt in ("-d",):
SCHEMADIR = arg
elif opt in ("-k",):
PGSOCKETDIR = arg
elif opt in ("-p",):
PSQL = arg
elif opt in ("-x",):
sktdir = FilePath("/var/run/caldavd")
for skt in sktdir.children():
if skt.basename().startswith("ccs_postgres_"):
PGSOCKETDIR = skt.path
PSQL = "/Applications/Server.app/Contents/ServerRoot/usr/bin/psql"
SCHEMADIR = "/Applications/Server.app/Contents/ServerRoot/usr/share/caldavd/lib/python/txdav/common/datastore/sql_schema/"
elif opt in ("-v", "--verbose"):
verbose = True
else:
raise NotImplementedError(opt)
# Retrieve the db_version number of the installed schema
try:
db_version = getSchemaVersion(verbose=verbose)
except CheckSchemaError, e:
db_version = 0
# Retrieve the version number from the schema file
currentschema = FilePath(SCHEMADIR).child("current.sql")
try:
data = currentschema.getContent()
except IOError:
print("Unable to open the current schema file: %s" % (currentschema.path,))
else:
found = re.search("insert into CALENDARSERVER values \('VERSION', '(\d+)'\);", data)
if found is None:
print("Schema is missing required schema VERSION insert statement: %s" % (currentschema.path,))
else:
current_version = int(found.group(1))
if db_version == current_version:
print("Schema version {} is current".format(db_version))
else: # upgrade needed
print("Schema needs to be upgraded from {} to {}".format(db_version, current_version))
checkSchema(db_version, verbose)
if __name__ == "__main__":
main()
| StarcoderdataPython |
6509166 | {
"name": "Library Book Checkout",
"description": "Members can borrow books from the library.",
"author": "<NAME>",
"depends": ["library_member", "mail"],
"data": [
"security/ir.model.access.csv",
"views/library_menu.xml",
"views/checkout_view.xml",
"wizard/checkout_mass_message_wizard_view.xml",
"data/stage_data.xml",
],
}
| StarcoderdataPython |
1626366 | <filename>comm/communication_server.py
import http.server
class ServerMain(http.server.SimpleHTTPRequestHandler):
def do_HEAD(self):
self.send_response(200)
self.send_header('Allow', 'GET, POST, OPTIONS')
self.send_header("Content-Type", "text/html")
self.send_header("charset", "utf-8")
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Allow-Headers", "*")
self.send_header('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
self.end_headers()
def do_GET(self):
self.send_response(200)
self.send_header('Allow', 'GET, POST, OPTIONS')
self.send_header('charset', 'utf-8')
self.send_header("Content-Type", "text/html")
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Allow-Headers", "*")
self.send_header('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
self.end_headers()
if self.path == "/boardstate":
with open("boardstate.txt", "r") as board:
out = """<DOCTYPE html><html><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"></head>""" + board.readline() + """</html>"""
self.wfile.write(out.encode('utf-8'))
if "?" in self.path:
post_board = str(self.path[self.path.find("?") + 1:])
if len(post_board) >= 32:
with open("boardstate.txt", "w") as wOut:
wOut.write(post_board)
self.wfile.write(post_board.encode('utf-8'))
if __name__ == '__main__':
server_class = http.server.HTTPServer
httpd = server_class(("localhost", 3000), ServerMain)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
| StarcoderdataPython |
3404966 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import itertools
import optparse
import os
import Queue
import sys
import threading
import time
import traceback
from conary.conaryclient import callbacks as clientCallbacks
from conary.conaryclient import cmdline
from conary import conarycfg, callbacks, trove
from conary.lib import cfg, util, log
from conary.repository import errors, changeset, netclient
from conary.deps.deps import parseFlavor
class OptionError(Exception):
def __init__(self, errcode, errmsg, *args):
self.errcode = errcode
self.errmsg = errmsg
Exception.__init__(self, *args)
def parseArgs(argv):
parser = optparse.OptionParser(version = '%prog 0.1')
parser.add_option("--config-file", dest = "configFile",
help = "configuration file", metavar = "FILE")
parser.add_option("--full-sig-sync", dest = "infoSync",
action = "store_true", default = False,
help = "deprecated: alias to --full-info-sync")
parser.add_option("--full-info-sync", dest = "infoSync",
action = "store_true", default = False,
help = "replace all the trove signatures and metadata "
"in the target repository")
parser.add_option("--fast-sync", dest = "fastSync",
action = "store_true", default = False,
help = "skip checking/mirroring of changed info records "
"for already mirrored troves")
parser.add_option("--absolute", dest = "absolute",
action = "store_true", default = False,
help = "use only absolute changesets when mirroring content")
parser.add_option("--full-trove-sync", dest = "sync", action = "store_true",
default = False,
help = "ignore the last-mirrored timestamp in the "
"target repository")
parser.add_option("--check-sync", dest = "checkSync", action = "store_true",
default = False,
help = "only check if the source and target(s) are in sync")
parser.add_option("--test", dest = "test", action = "store_true",
default = False,
help = "skip commiting changes to the target repository")
parser.add_option("-v", "--verbose", dest = "verbose",
action = "store_true", default = False,
help = "display information on what is going on")
(options, args) = parser.parse_args(argv)
if options.configFile is None:
raise OptionError(1, 'a mirror configuration must be provided')
elif args:
raise OptionError(1, 'unexpected arguments: %s' % " ".join(args))
return options
class VerboseChangesetCallback(clientCallbacks.ChangesetCallback):
def done(self):
self.clearPrefix()
self._message('\r')
class ChangesetCallback(callbacks.ChangesetCallback):
def setPrefix(self, *args):
pass
def clearPrefix(self):
pass
class MirrorConfigurationSection(cfg.ConfigSection):
repositoryMap = conarycfg.CfgRepoMap
user = conarycfg.CfgUserInfo
entitlement = conarycfg.CfgEntitlement
class MirrorFileConfiguration(cfg.SectionedConfigFile):
host = cfg.CfgString
entitlementDirectory = cfg.CfgPath
labels = conarycfg.CfgInstallLabelPath
matchTroves = cfg.CfgSignedRegExpList
matchTroveSpecs = cfg.CfgSignedRegExpList
recurseGroups = (cfg.CfgBool, False)
uploadRateLimit = (conarycfg.CfgInt, 0,
"Upload rate limit, in bytes per second")
downloadRateLimit = (conarycfg.CfgInt, 0,
"Download rate limit, in bytes per second")
lockFile = cfg.CfgString
useHiddenCommits = (cfg.CfgBool, True)
absoluteChangesets = (cfg.CfgBool, False)
includeSources = (cfg.CfgBool, False)
splitNodes = (cfg.CfgBool, False,
"Split jobs that would commit two versions of a trove at once. "
"Needed for compatibility with older repositories.")
noPGP = (cfg.CfgBool, False)
_allowNewSections = True
_defaultSectionType = MirrorConfigurationSection
# some sanity checks for the mirror configuration
def checkConfig(cfg):
if not cfg.host:
log.error("ERROR: cfg.host is not defined")
raise RuntimeError("cfg.host is not defined")
# make sure that each label belongs to the host we're mirroring
for label in cfg.labels:
if label.getHost() != cfg.host:
log.error("ERROR: label %s is not on host %s", label, cfg.host)
raise RuntimeError("label %s is not on host %s", label, cfg.host)
def _getMirrorClient(mirrorCfg, section):
section = mirrorCfg.getSection(section)
cfg = conarycfg.ConaryConfiguration(False)
for name in ['repositoryMap', 'user', 'entitlement']:
cfg[name] = section[name]
for name in ['uploadRateLimit', 'downloadRateLimit', 'entitlementDirectory']:
cfg[name] = mirrorCfg[name]
return netclient.NetworkRepositoryClient(cfg=cfg)
def mainWorkflow(cfg = None, callback=ChangesetCallback(),
test=False, sync=False, infoSync=False,
checkSync=False, fastSync=False):
import fcntl
if cfg.lockFile:
try:
log.debug('checking for lock file')
lock = open(cfg.lockFile, 'w')
fcntl.lockf(lock, fcntl.LOCK_EX|fcntl.LOCK_NB)
except IOError:
log.warning('lock held by another process, exiting')
return
# need to make sure we have a 'source' section
if not cfg.hasSection('source'):
log.debug("ERROR: mirror configuration file is missing a [source] section")
raise RuntimeError("Mirror configuration file is missing a [source] section")
sourceRepos = _getMirrorClient(cfg, 'source')
# Optional reference repository
if cfg.hasSection('reference'):
refRepos = _getMirrorClient(cfg, 'reference')
else:
refRepos = sourceRepos
# we need to build a target repo client for each of the "target*"
# sections in the config file
targets = []
for name in cfg.iterSectionNames():
if not name.startswith("target"):
continue
target = _getMirrorClient(cfg, name)
target = TargetRepository(target, cfg, name, test=test)
targets.append(target)
# checkSync is a special operation...
if checkSync:
return checkSyncRepos(cfg, refRepos, targets)
# we pass in the sync flag only the first time around, because after
# that we need the targetRepos mark to advance accordingly after being
# reset to -1
callAgain = mirrorRepository(sourceRepos, targets, cfg,
test = test, sync = sync,
syncSigs = infoSync,
callback = callback,
fastSync = fastSync,
referenceRepos=refRepos,
)
while callAgain:
callAgain = mirrorRepository(sourceRepos, targets, cfg,
test = test, callback = callback,
fastSync = fastSync,
referenceRepos=refRepos,
)
def Main(argv=None):
if argv is None:
argv = argv=sys.argv[1:]
try:
options = parseArgs(argv)
except OptionError, e:
sys.stderr.write(e.errmsg)
sys.stderr.write("\n")
return e.errcode
cfg = MirrorFileConfiguration()
cfg.read(options.configFile, exception = True)
callback = ChangesetCallback()
if options.absolute:
cfg.absoluteChangesets = True
if options.verbose:
log.setVerbosity(log.DEBUG)
callback = VerboseChangesetCallback()
if options.fastSync: # make --fast-sync imply --full-trove-sync
options.sync = True
try:
mainWorkflow(cfg, callback, options.test,
sync = options.sync, infoSync = options.infoSync,
fastSync = options.fastSync, checkSync = options.checkSync)
except KeyboardInterrupt:
print >> sys.stderr
print >> sys.stderr, 'Terminating due to user interrupt'
sys.exit(1)
def groupTroves(troveList):
# combine the troves into indisolvable groups based on their version and
# flavor; it's assumed that adjacent troves with the same version/flavor
# must be in a single commit
grouping = {}
for info in troveList:
(n, v, f) = info[1]
crtGrp = grouping.setdefault((v,f), [])
crtGrp.append(info)
grouping = grouping.values()
# make sure the groups are sorted in ascending order of their mark
def _groupsort(a, b):
ret = cmp(a[0][0], b[0][0])
if ret:
return ret
# if they have the same mark, sort the groups at the end
ahasgrp = [x[1][1] for x in a if trove.troveIsGroup(x[1][0])]
bhasgrp = [x[1][1] for x in b if trove.troveIsGroup(x[1][0])]
if len(ahasgrp) > len(bhasgrp):
return 1
if len(bhasgrp) > len(ahasgrp):
return -1
return cmp(ahasgrp, bhasgrp)
grouping.sort(_groupsort)
return grouping
def buildJobList(src, target, groupList, absolute=False, splitNodes=True,
jobSize=20):
# Match each trove with something we already have; this is to mirror
# using relative changesets, which is a lot more efficient than using
# absolute ones.
q = {}
srcAvailable = {}
for group in groupList:
for mark, (name, version, flavor) in group:
# force groups to always be transferred using absolute changesets
if trove.troveIsGroup(name):
continue
srcAvailable[(name,version,flavor)] = True
d = q.setdefault(name, {})
l = d.setdefault(version.branch(), [])
l.append(flavor)
# check that the latestavailable versions from the target are
# present on the source to be able to use relative changesets
latestAvailable = {}
if len(q):
latestAvailable = target.getTroveLeavesByBranch(q)
latestAvailable = dict(
(name, dict(
(version, set(flavors))
for (version, flavors) in versions.iteritems()
)) for (name, versions) in latestAvailable.iteritems())
if len(latestAvailable):
def _tol(d):
for n, vd in d.iteritems():
for v, fl in vd.iteritems():
for f in fl:
yield (n,v,f)
ret = src.hasTroves(list(_tol(latestAvailable)), hidden=True)
srcAvailable.update(ret)
def _split():
# Stop adding troves to this job and allow its troves to be used for
# the next job's relative changesets.
for mark, job in jobList[-1]:
name = job[0]
if trove.troveIsGroup(name):
continue
oldVersion, oldFlavor = job[1]
newVersion, newFlavor = job[2]
srcAvailable[(name, newVersion, newFlavor)] = True
d = latestAvailable.setdefault(name, {})
if oldVersion in d and oldVersion.branch() == newVersion.branch():
# If the old version is on the same branch as the new one,
# replace the old with the new. If it's on a different
# branch, we'll track both.
flavorList = d[oldVersion]
flavorList.discard(oldFlavor)
if not flavorList:
del d[oldVersion]
flavorList = d.setdefault(newVersion, set())
flavorList.add(newFlavor)
if jobList[-1]:
jobList.append([])
# we'll keep latestAvailable in sync with what the target will look like
# as the mirror progresses
jobList = [[]]
currentNodes = set()
currentHost = None
for group in groupList:
# for each job find what it's relative to and build up a job list
thisJob = []
for mark, (name, version, flavor) in group:
# name, version, versionDistance, flavorScore
currentMatch = (None, None, None, None)
if absolute or name not in latestAvailable:
job = (name, (None, None), (version, flavor), True)
else:
d = latestAvailable[name]
for repVersion, flavorList in d.iteritems():
# the versions have to be on the same host to be
# able to generate relative changesets
if version.getHost() != repVersion.getHost():
continue
for repFlavor in flavorList:
if not srcAvailable.get((name, repVersion, repFlavor), False):
continue
score = flavor.score(repFlavor)
if score is False:
continue
if repVersion == version:
closeness = 100000
else:
closeness = version.closeness(repVersion)
if score < currentMatch[3]:
continue
elif score > currentMatch[3]:
currentMatch = (repVersion, repFlavor, closeness,
score)
elif closeness < currentMatch[2]:
continue
else:
currentMatch = (repVersion, repFlavor, closeness,
score)
job = (name, (currentMatch[0], currentMatch[1]),
(version, flavor), currentMatch[0] is None)
thisJob.append((mark, job))
newNodes = set((x[1][0], x[1][2][0].branch()) for x in thisJob)
newHosts = set(x[1][2][0].getHost() for x in thisJob)
assert len(newHosts) == 1
newHost = list(newHosts)[0]
if (len(jobList[-1]) >= jobSize
# Can't commit two versions of the same trove
or (splitNodes and newNodes & currentNodes)
# Can't commit troves on different hosts
or currentHost not in (None, newHost)
):
_split()
currentNodes = set()
jobList[-1].extend(thisJob)
currentNodes.update(newNodes)
currentHost = newHost
if not jobList[-1]:
jobList.pop()
return jobList
recursedGroups = set()
def recurseTrove(sourceRepos, name, version, flavor,
callback = ChangesetCallback()):
global recursedGroups
assert(trove.troveIsGroup(name))
# there's nothing much we can recurse from the source
if name.endswith(":source"):
return []
# avoid grabbing the same group multiple times
if (name, version, flavor) in recursedGroups:
return []
log.debug("recursing group trove: %s=%s[%s]" % (name, version, flavor))
groupCs = sourceRepos.createChangeSet(
[(name, (None, None), (version, flavor), True)],
withFiles=False, withFileContents=False, recurse=False,
callback = callback)
recursedGroups.add((name, version, flavor))
ret = []
for troveCs in groupCs.iterNewTroveList():
for name, ops in troveCs.iterChangedTroves(True, True):
for oper, version, flavor, byDefault in ops:
if oper != '-':
ret.append((name, version, flavor))
return ret
def _toBraces(items):
if len(items) > 1:
return '{%s}' % (','.join(sorted(items)))
else:
return list(items)[0]
def formatTroveNames(names):
"""Group trove names by package and format them like a shell glob."""
# Group names by package
packages = {}
for name in names:
if ':' in name:
package, component = name.split(':')
component = ':' + component
else:
package, component = name, ''
packages.setdefault(package, []).append(component)
# If all the component sets are the same, collapse them.
componentSets = set(tuple(x) for x in packages.values())
if len(componentSets) == 1:
components = list(componentSets)[0]
if len(components) > 1:
prefix = _toBraces(packages)
suffix = _toBraces(components)
return prefix + suffix
# Format the components for each package
nameList = []
for package, components in sorted(packages.items()):
if len(components) == 1:
# foo or foo:bar
formatted = package + components[0]
else:
# foo and foo:bar
components.sort()
formatted = package + _toBraces(components)
nameList.append(formatted)
# Combine into one big set
if len(nameList) == 1:
return nameList[0]
else:
nameList.sort()
return _toBraces(nameList)
def displayBundle(bundle):
"""Format a job bundle for display"""
minMark = min([x[0] for x in bundle])
# Group by version and flavor
trovesByVF = {}
for mark, (name, oldVF, newVF, absolute) in bundle:
trovesByVF.setdefault((oldVF, newVF), set()).add(name)
# Within each VF set, sort and fold the names and format for display.
lines = []
for (oldVF, newVF), names in trovesByVF.items():
allNames = formatTroveNames(names)
# Add version and flavor info
if oldVF[0]:
if oldVF[1] != newVF[1]:
oldInfo = '%s[%s]--' % oldVF
else:
oldInfo = '%s--' % (oldVF[0],)
else:
oldInfo = ''
newInfo = '%s[%s]' % newVF
lines.append(''.join((allNames, '=', oldInfo, newInfo)))
lines.sort()
lines.insert(0, '')
lines.append('New mark: %.0f' % (minMark,))
return "\n ".join(lines)
# wrapper for displaying a simple jobList
def displayJobList(jobList):
return displayBundle([(0, x) for x in jobList])
# mirroring stuff when we are running into PathIdConflict errors
def splitJobList(jobList, src, targetSet, hidden = False, callback = ChangesetCallback()):
log.debug("Changeset Key conflict detected; splitting job further...")
jobs = {}
for job in jobList:
name = job[0]
if ':' in name:
name = name.split(':')[0]
l = jobs.setdefault(name, [])
l.append(job)
i = 0
for smallJobList in jobs.itervalues():
(outFd, tmpName) = util.mkstemp()
os.close(outFd)
log.debug("jobsplit %d of %d %s" % (
i + 1, len(jobs), displayBundle([(0,x) for x in smallJobList])))
src.createChangeSetFile(smallJobList, tmpName, recurse = False,
callback = callback, mirrorMode = True)
_parallel(targetSet, TargetRepository.commitChangeSetFile,
tmpName, hidden=hidden, callback=callback)
os.unlink(tmpName)
callback.done()
i += 1
return
# filter a trove tuple based on cfg
def _filterTup(troveTup, cfg):
(n, v, f) = troveTup
troveSpec = cmdline.toTroveSpec(n, str(v), f)
# filter by trovespec
if cfg.matchTroveSpecs and cfg.matchTroveSpecs.match(troveSpec) <= 0:
return False
# if we're matching troves
if cfg.matchTroves and cfg.matchTroves.match(n) <= 0:
return False
# filter by host/label
if v.getHost() != cfg.host:
return False
if cfg.labels and v.branch().label() not in cfg.labels:
return False
return True
# get all the trove info to be synced
def _getAllInfo(src, cfg):
log.debug("resync all trove info from source. This will take a while...")
# grab the full list of all the trove versions and flavors in the src
troveDict = src.getTroveVersionList(cfg.host, { None : None })
troveList = []
# filter out the stuff we don't need
for name, versionD in troveDict.iteritems():
for version, flavorList in versionD.iteritems():
for flavor in flavorList:
tup = (name, version, flavor)
troveList.append(tup)
del troveDict
# retrieve the sigs and the metadata records to sync over
sigList = src.getTroveSigs(troveList)
metaList = src.getTroveInfo(trove._TROVEINFO_TAG_METADATA, troveList)
infoList = []
for t, s, ti in itertools.izip(troveList, sigList, metaList):
if ti is None:
ti = trove.TroveInfo()
ti.sigs.thaw(s)
infoList.append((t, ti))
return infoList
# while talking to older repos - get the new trove sigs
def _getNewSigs(src, cfg, mark):
# talking to an old source server. We do the best and we get the sigs out
sigList = src.getNewSigList(cfg.host, str(mark))
log.debug("obtained %d changed trove sigs", len(sigList))
sigList = [ x for x in sigList if _filterTup(x[1], cfg) ]
log.debug("%d changed sigs after label and match filtering", len(sigList))
# protection against duplicate items returned in the list by some servers
sigList = list(set(sigList))
sigList.sort(lambda a,b: cmp(a[0], b[0]))
log.debug("downloading %d signatures from source repository", len(sigList))
# XXX: we could also get the metadata in here, but getTroveInfo
# would use a getChangeSet call against older repos, severely
# impacting performance
sigs = src.getTroveSigs([ x[1] for x in sigList ])
# need to convert the sigs into TroveInfo instances
def _sig2info(sig):
ti = trove.TroveInfo()
ti.sigs.thaw(sig)
return ti
sigs = [ _sig2info(s) for s in sigs]
# we're gonna iterate repeatedely over the returned set, no itertools can do
return [(m, t, ti) for (m,t),ti in itertools.izip(sigList, sigs) ]
# get the changed trove info entries for the troves comitted
def _getNewInfo(src, cfg, mark):
# first, try the new getNewTroveInfo call
labels = cfg.labels or []
mark = str(long(mark)) # xmlrpc chokes on longs
infoTypes = [trove._TROVEINFO_TAG_SIGS, trove._TROVEINFO_TAG_METADATA]
try:
infoList = src.getNewTroveInfo(cfg.host, mark, infoTypes, labels)
except errors.InvalidServerVersion:
# otherwise we mirror just the sigs...
infoList = _getNewSigs(src, cfg, mark)
return infoList
def _parallel_run(index, results, targets, classMethod, args, kwargs):
try:
target = targets[index]
ret = (index, True, classMethod(target, *args, **kwargs))
except Exception as err:
ret = (index, False, (err, traceback.format_exc()))
results.put(ret)
def _parallel(targets, classMethod, *args, **kwargs):
"""
Map a method call across multiple targets concurrently
"""
if len(targets) == 1:
return [classMethod(targets[0], *args, **kwargs)]
results = Queue.Queue()
threads = []
for index in range(len(targets)):
thread = threading.Thread(target=_parallel_run,
args=(index, results, targets, classMethod, args, kwargs,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
ret = [None] * len(targets)
last_error = None
for thread in threads:
index, ok, result = results.get()
if ok:
ret[index] = result
else:
last_error, trace = result
log.error("Error updating target %s:\n%s",
targets[index].name, trace)
if last_error is not None:
raise last_error
return ret
# mirror new trove info for troves we have already mirrored.
def mirrorTroveInfo(src, targets, mark, cfg, resync=False):
if resync:
log.debug("performing a full trove info sync")
infoList = _getAllInfo(src, cfg)
infoList = [(mark, t, ti) for t, ti in infoList ]
else:
log.debug("getting new trove info entries")
infoList = _getNewInfo(src, cfg, mark)
log.debug("obtained %d trove info records for mirroring", len(infoList))
infoList = [(m,t,ti) for (m,t,ti) in infoList if _filterTup(t, cfg)]
if not len(infoList):
log.debug("no troveinfo records need to be mirrored")
return 0
log.debug("mirroring %d changed trove info records" % len(infoList))
updateCount = sum(_parallel(targets,
TargetRepository.setTroveInfo, infoList))
return updateCount
# this mirrors all the troves marked as removed from the sourceRepos into the targetRepos
def mirrorRemoved(sourceRepos, targetRepos, troveSet, test = False, callback = ChangesetCallback()):
if not troveSet:
return 0
log.debug("checking on %d removed troves", len(troveSet))
# these removed troves better exist on the target
present = targetRepos.hasTroves(list(troveSet))
missing = [ x for x in troveSet if not present[x] ]
# we can not have any "missing" troves while we mirror removals
for t in missing:
log.warning("Mirroring removed trove: valid trove not found on target: %s", t)
troveSet.remove(t)
# for the remaining removed troves, are any of them already mirrored?
jobList = [ (name, (None, None), (version, flavor), True) for
(name, version, flavor) in troveSet ]
cs = targetRepos.createChangeSet(jobList, recurse=False, withFiles=False,
withFileContents=False, callback=callback)
for trvCs in cs.iterNewTroveList():
if trvCs.getType() == trove.TROVE_TYPE_REMOVED:
troveSet.remove(trvCs.getNewNameVersionFlavor())
log.debug("mirroring %d removed troves", len(troveSet))
if not troveSet:
return 0
jobList = [ (name, (None, None), (version, flavor), True) for
(name, version, flavor) in troveSet ]
log.debug("mirroring removed troves %s" % (displayJobList(jobList),))
# grab the removed troves changeset
cs = sourceRepos.createChangeSet(jobList, recurse = False,
withFiles = False, withFileContents = False,
callback = callback)
log.debug("committing")
targetRepos.commitChangeSet(cs, mirror = True, callback = callback)
callback.done()
return len(jobList)
# target repo class that helps dealing with testing mode
class TargetRepository:
def __init__(self, repo, cfg, name = 'target', test=False):
self.repo = repo
self.test = test
self.cfg = cfg
self.mark = None
self.name = name
self.__gpg = {}
def getMirrorMark(self):
if self.mark is None:
self.mark = self.repo.getMirrorMark(self.cfg.host)
self.mark = str(long(self.mark))
return long(self.mark)
def setMirrorMark(self, mark):
self.mark = str(long(mark))
log.debug("%s setting mirror mark to %s", self.name, self.mark)
if self.test:
return
self.repo.setMirrorMark(self.cfg.host, self.mark)
def mirrorGPG(self, src, host):
if self.cfg.noPGP:
return
if self.__gpg.has_key(host):
return
keyList = src.getNewPGPKeys(host, -1)
self.__gpg[host] = keyList
if not len(keyList):
return
log.debug("%s adding %d gpg keys", self.name, len(keyList))
if self.test:
return
self.repo.addPGPKeyList(self.cfg.host, keyList)
def setTroveInfo(self, infoList):
log.debug("%s checking what troveinfo needs to be mirrored", self.name)
# Items whose mark is the same as currentMark might not have their trove
# available on the server (it might be coming as part of this mirror
# run).
inQuestion = [ x[1] for x in infoList if str(long(x[0])) >= self.mark ]
present = self.repo.hasTroves(inQuestion, hidden=True)
# filter out the not present troves which will get mirrored in
# the current mirror run
infoList = [ (t, ti) for (m, t, ti) in infoList if present.get(t, True) ]
# avoid busy work for troveinfos which are empty
infoList = [ (t, ti) for (t, ti) in infoList if len(ti.freeze()) > 0 ]
if self.test:
return 0
try:
self.repo.setTroveInfo(infoList)
except errors.InvalidServerVersion: # to older servers we can only transport sigs
infoList = [ (t, ti.sigs.freeze()) for t, ti in infoList ]
# only send up the troves that actually have a signature change
infoList = [ x for x in infoList if len(x[1]) > 0 ]
log.debug("%s pushing %d trove sigs...", self.name, len(infoList))
self.repo.setTroveSigs(infoList)
else:
log.debug("%s uploaded %d info records", self.name, len(infoList))
return len(infoList)
def addTroveList(self, tl):
# Filter out troves which are already in the local repository. Since
# the marks aren't distinct (they increase, but not monotonically), it's
# possible that something new got committed with the same mark we
# last updated to, so we have to look again at all of the troves in the
# source repository with the last mark which made it into our target.
present = self.repo.hasTroves([ x[1] for x in tl ], hidden = True)
ret = [ x for x in tl if not present[x[1]] ]
log.debug("%s found %d troves not present", self.name, len(ret))
return ret
def commitChangeSetFile(self, filename, hidden, callback):
if self.test:
return 0
callback = copy.copy(callback)
callback.setPrefix(self.name + ": ")
t1 = time.time()
ret = self.repo.commitChangeSetFile(filename, mirror=True, hidden=hidden,
callback=callback)
t2 = time.time()
callback.done()
hstr = ""
if hidden: hstr = "hidden "
log.debug("%s %scommit (%.2f sec)", self.name, hstr, t2-t1)
return ret
def presentHiddenTroves(self, newMark):
log.debug("%s unhiding comitted troves", self.name)
self.repo.presentHiddenTroves(self.cfg.host)
self.setMirrorMark(newMark)
# split a troveList in changeset jobs
def buildBundles(sourceRepos, target, troveList, absolute=False,
splitNodes=True):
bundles = []
log.debug("grouping %d troves based on version and flavor", len(troveList))
groupList = groupTroves(troveList)
log.debug("building grouped job list")
bundles = buildJobList(sourceRepos, target.repo, groupList, absolute,
splitNodes)
return bundles
# return the new list of troves to process after filtering and sanity checks
def getTroveList(src, cfg, mark):
# FIXME: getNewTroveList should accept and only return troves on
# the labels we're interested in
log.debug("looking for new troves")
# make sure we always treat the mark as an integer
troveList = [(long(m), (n,v,f), t) for m, (n,v,f), t in
src.getNewTroveList(cfg.host, str(mark))]
if not len(troveList):
# this should be the end - no more troves to look at
log.debug("no new troves found")
return (mark, [])
# we need to protect ourselves from duplicate items in the troveList
l = len(troveList)
troveList = list(set(troveList))
if len(troveList) < l:
l = len(troveList)
log.debug("after duplicate elimination %d troves are left", len(troveList))
# if we filter out the entire list of troves we have been
# returned, we need to tell the caller what was the highest mark
# we had so it can continue asking for more
maxMark = max([x[0] for x in troveList])
# filter out troves on labels and parse through matchTroves
troveList = [ x for x in troveList if _filterTup(x[1],cfg) ]
if len(troveList) < l:
l = len(troveList)
log.debug("after label filtering and matchTroves %d troves are left", l)
if not troveList:
return (maxMark, [])
# sort deterministically by mark, version, flavor, reverse name
troveList.sort(lambda a,b: cmp(a[0], b[0]) or
cmp(a[1][1], b[1][1]) or
cmp(a[1][2], b[1][2]) or
cmp(b[1][0], a[1][0]) )
log.debug("%d new troves returned", len(troveList))
# We cut off the last troves that have the same flavor, version to
# avoid committing an incomplete trove. This could happen if the
# server side only listed some of a trove's components due to
# server side limits on how many results it can return on each query
lastIdx = len(troveList)-1
# compare with the last one
ml, (nl,vl,fl), tl = troveList[-1]
while lastIdx >= 0:
lastIdx -= 1
m, (n,v,f), t = troveList[lastIdx]
if v == vl and f == fl:
continue
lastIdx += 1
break
# the min mark of the troves we skip has to be higher than max
# mark of troves we'll commit or otherwise we'll skip them for good...
if lastIdx >= 0:
firstMark = max([x[0] for x in troveList[:lastIdx]])
lastMark = min([x[0] for x in troveList[lastIdx:]])
if lastMark > firstMark:
troveList = troveList[:lastIdx]
log.debug("reduced new trove list to %d to avoid partial commits", len(troveList))
# since we're returning at least on trove, the caller will make the next mark decision
return (mark, troveList)
def _makeTargets(cfg, targetRepos, test = False):
if not hasattr(targetRepos, '__iter__'):
targetRepos = [ targetRepos ]
targets = []
for t in targetRepos:
if isinstance(t, netclient.NetworkRepositoryClient):
targets.append(TargetRepository(t, cfg, test=test))
elif isinstance(t, TargetRepository):
targets.append(t)
else:
raise RuntimeError("Can not handle unknown target repository type", t)
return targets
# syncSigs really means "resync all info", but we keep the parameter
# name for compatibility reasons
def mirrorRepository(sourceRepos, targetRepos, cfg,
test = False, sync = False, syncSigs = False,
callback = ChangesetCallback(),
fastSync = False,
referenceRepos=None,
):
if referenceRepos is None:
referenceRepos = sourceRepos
checkConfig(cfg)
targets = _makeTargets(cfg, targetRepos, test)
log.debug("-" * 20 + " start loop " + "-" * 20)
hidden = len(targets) > 1 or cfg.useHiddenCommits
if hidden:
log.debug("will use hidden commits to synchronize target mirrors")
marks = _parallel(targets, TargetRepository.getMirrorMark)
if sync:
currentMark = -1
else:
# we use the oldest mark as a starting point (since we have to
# get stuff from source for that oldest one anyway)
currentMark = min(marks)
log.debug("using common mirror mark %s", currentMark)
# reset mirror mark to the lowest common denominator
for t, mark in zip(targets, marks):
if mark != currentMark:
t.setMirrorMark(currentMark)
# mirror gpg signatures from the src into the targets
_parallel(targets, TargetRepository.mirrorGPG, referenceRepos, cfg.host)
# mirror changed trove information for troves already mirrored
if fastSync:
updateCount = 0
log.debug("skip trove info records sync because of fast-sync")
else:
updateCount = mirrorTroveInfo(referenceRepos, targets, currentMark,
cfg, syncSigs)
newMark, troveList = getTroveList(referenceRepos, cfg, currentMark)
if not troveList:
if newMark > currentMark: # something was returned, but filtered out
_parallel(targets, TargetRepository.setMirrorMark, newMark)
return -1 # call again
return 0
# prepare a new max mark to be used when we need to break out of a loop
crtMaxMark = max(long(x[0]) for x in troveList)
if currentMark > 0 and crtMaxMark == currentMark:
# if we're hung on the current max then we need to
# forcibly advance the mark in case we're stuck
crtMaxMark += 1 # only used if we filter out all troves below
initTLlen = len(troveList)
# removed troves are a special blend - we keep them separate
removedSet = set([ x[1] for x in troveList if x[2] == trove.TROVE_TYPE_REMOVED ])
troveList = [ (x[0], x[1]) for x in troveList if x[2] != trove.TROVE_TYPE_REMOVED ]
# figure out if we need to recurse the group-troves
if cfg.recurseGroups:
# avoid adding duplicates
troveSetList = set([x[1] for x in troveList])
for mark, (name, version, flavor) in troveList:
if trove.troveIsGroup(name):
recTroves = recurseTrove(referenceRepos, name,
version, flavor, callback=callback)
# add sources here:
if cfg.includeSources:
troveInfo = referenceRepos.getTroveInfo(
trove._TROVEINFO_TAG_SOURCENAME, recTroves)
sourceComps = set()
for nvf, source in itertools.izip(recTroves, troveInfo):
sourceComps.add((source(), nvf[1].getSourceVersion(),
parseFlavor('')))
recTroves.extend(sourceComps)
# add the results at the end with the current mark
for (n, v, f) in recTroves:
if (n, v, f) not in troveSetList:
troveList.append((mark, (n, v, f)))
troveSetList.add((n, v, f))
log.debug("after group recursion %d troves are needed", len(troveList))
# we need to make sure we mirror the GPG keys of any newly added troves
newHosts = set([x[1].getHost() for x in troveSetList.union(removedSet)])
for host in newHosts.difference(set([cfg.host])):
_parallel(targets, TargetRepository.mirrorGPG,
referenceRepos, host)
# we check which troves from the troveList are needed on each
# target and we split the troveList into separate lists depending
# on how many targets require each
byTarget = {}
targetSetList = []
if len(troveList):
byTrove = {}
for i, target in enumerate(targets):
for t in target.addTroveList(troveList):
bt = byTrove.setdefault(t, set())
bt.add(i)
# invert the dict by target now
for trv, ts in byTrove.iteritems():
targetSet = [ targets[i] for i in ts ]
try:
targetIdx = targetSetList.index(targetSet)
except ValueError:
targetSetList.append(targetSet)
targetIdx = len(targetSetList)-1
bt = byTarget.setdefault(targetIdx, [])
bt.append(trv)
del byTrove
# if we were returned troves, but we filtered them all out, advance the
# mark and signal "try again"
if len(byTarget) == 0 and len(removedSet) == 0 and initTLlen:
# we had troves and now we don't
log.debug("no troves found for our label %s" % cfg.labels)
_parallel(targets, TargetRepository.setMirrorMark, crtMaxMark)
# try again
return -1
# now we get each section of the troveList for each targetSet. We
# start off mirroring by those required by fewer targets, using
# the assumption that those troves are what is required for the
# targets to catch up to a common set
if len(byTarget) > 1:
log.debug("split %d troves into %d chunks by target", len(troveList), len(byTarget))
# sort the targetSets by length
targetSets = list(enumerate(targetSetList))
targetSets.sort(lambda a,b: cmp(len(a[1]), len(b[1])))
bundlesMark = 0
for idx, targetSet in targetSets:
troveList = byTarget[idx]
if not troveList: # XXX: should not happen...
continue
log.debug("mirroring %d troves into %d targets", len(troveList), len(targetSet))
# since these troves are required for all targets, we can use
# the "first" one to build the relative changeset requests
target = list(targetSet)[0]
bundles = buildBundles(sourceRepos, target, troveList,
cfg.absoluteChangesets, cfg.splitNodes)
for i, bundle in enumerate(bundles):
jobList = [ x[1] for x in bundle ]
# XXX it's a shame we can't give a hint as to what server to use
# to avoid having to open the changeset and read in bits of it
if test:
log.debug("test mode: not mirroring (%d of %d) %s" % (i + 1, len(bundles), jobList))
updateCount += len(bundle)
continue
(outFd, tmpName) = util.mkstemp()
os.close(outFd)
log.debug("getting (%d of %d) %s" % (i + 1, len(bundles), displayBundle(bundle)))
try:
sourceRepos.createChangeSetFile(jobList, tmpName, recurse = False,
callback = callback, mirrorMode = True)
except changeset.ChangeSetKeyConflictError:
splitJobList(jobList, sourceRepos, targetSet, hidden=hidden,
callback=callback)
else:
_parallel(targetSet, TargetRepository.commitChangeSetFile,
tmpName, hidden=hidden, callback=callback)
try:
os.unlink(tmpName)
except OSError:
pass
callback.done()
updateCount += len(bundle)
# compute the max mark of the bundles we comitted
mark = max([min([x[0] for x in bundle]) for bundle in bundles])
if mark > bundlesMark:
bundlesMark = mark
else: # only when we're all done looping advance mark to the new max
if bundlesMark == 0 or bundlesMark <= currentMark:
bundlesMark = crtMaxMark # avoid repeating the same query...
if hidden: # if we've hidden the last commits, show them now
_parallel(targets, TargetRepository.presentHiddenTroves,
bundlesMark)
else:
_parallel(targets, TargetRepository.setMirrorMark, bundlesMark)
# mirroring removed troves requires one by one processing
for target in targets:
copySet = removedSet.copy()
updateCount += mirrorRemoved(referenceRepos, target.repo, copySet,
test=test, callback=callback)
# if this was a noop because the removed troves were already mirrored
# we need to keep going
if updateCount == 0 and len(removedSet):
_parallel(targets, TargetRepository.setMirrorMark, crtMaxMark)
return -1
return updateCount
# check if the sourceRepos is in sync with targetRepos
def checkSyncRepos(config, sourceRepos, targetRepos):
checkConfig(config)
targets = _makeTargets(config, targetRepos)
log.setVerbosity(log.DEBUG)
# retrieve the set of troves from a give repository
def _getTroveSet(config, repo):
def _flatten(troveSpec):
l = []
for name, versionD in troveSpec.iteritems():
for version, flavorList in versionD.iteritems():
l += [ (name, version, flavor) for flavor in flavorList ]
return set(l)
troveSpecs = {}
if config.labels:
d = troveSpecs.setdefault(None, {})
for l in config.labels:
d[l] = ''
t = repo.getTroveVersionsByLabel(troveSpecs, troveTypes = netclient.TROVE_QUERY_ALL)
else:
troveSpecs = {None : None}
t = repo.getTroveVersionList(config.host, troveSpecs,
troveTypes = netclient.TROVE_QUERY_ALL)
return _flatten(t)
# compare source with each target
def _compare(src, dst):
srcName, srcSet = src
dstName, dstSet = dst
counter = 0
for x in srcSet.difference(dstSet):
log.debug(" - %s %s " % (srcName, x))
counter += 1
for x in dstSet.difference(srcSet):
log.debug(" + %s %s" % (dstName, x))
counter += 1
return counter
log.debug("Retrieving list of troves from source %s" % str(sourceRepos.c.map))
sourceSet = _getTroveSet(config, sourceRepos)
hasDiff = 0
for target in targets:
log.debug("Retrieving list of troves from %s %s" % (target.name, str(target.repo.c.map)))
targetSet = _getTroveSet(config, target.repo)
log.debug("Diffing source and %s" % target.name)
hasDiff += _compare( ("source", sourceSet), (target.name, targetSet) )
log.debug("Done")
return hasDiff
if __name__ == '__main__':
sys.exit(Main())
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.