id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
358224 | <reponame>connorescajeda/sat
import sys
def anyhop_main(planner):
if len(sys.argv) == 1:
print(f"Usage: python3 {sys.argv[0]} -v:[verbosity] -s:[max seconds] [planner_file]+")
else:
verbosity = 1
max_seconds = None
for filename in sys.argv[1:]:
if filename.startswith("-v"):
verbosity = int(filename.split(':')[1])
elif filename.startswith("-s"):
max_seconds = float(filename.split(':')[1])
else:
exec(open(filename).read())
plans = planner.anyhop(state, [('start', goals)], max_seconds=max_seconds, verbose=verbosity)
for (plan, time) in plans:
print(plan)
for (plan, time) in plans:
print(f"Length: {len(plan)} time: {time}")
print(len(plans), "total plans generated") | StarcoderdataPython |
4800281 | <gh_stars>1-10
import json
import os
dirr = os.listdir('content/projects/')
projects = []
for x in dirr:
if ".md" in x:
projects.append(x)
out = []
for file_name in projects:
with open(os.path.join(os.curdir, "content/projects/", file_name), 'r') as file:
objects = file.read().split("---")
current_object = {
"text": objects[2].replace("\n", "").replace("\\", "").strip().replace('"', "")
}
for line in objects[1].splitlines():
arr = line.split(':')
if len(arr) > 2:
arr[1] = arr[1] + arr[2]
if len(arr) < 2:
continue
name = arr[0].strip().replace(
"\n", "").replace("\\", "").replace('"', "")
current_object[(name)] = arr[1].replace(
"\n", "").replace("\\", "").strip().replace('"', "")
out.append(current_object)
print(out)
with open(os.path.join(os.curdir, "content/projects/file.json"), 'w') as file:
json.dump(out, file)
| StarcoderdataPython |
3222205 | <filename>tests/components/aladdin_connect/test_config_flow.py
"""Test the Aladdin Connect config flow."""
from unittest.mock import patch
from homeassistant import config_entries
from homeassistant.components.aladdin_connect.config_flow import InvalidAuth
from homeassistant.components.aladdin_connect.const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from tests.common import MockConfigEntry
async def test_form(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "Aladdin Connect"
assert result2["data"] == {
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass: HomeAssistant) -> None:
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
side_effect=InvalidAuth,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass: HomeAssistant) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
side_effect=ConnectionError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
side_effect=TypeError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=False,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_already_configured(hass):
"""Test we handle already configured error."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_USERNAME: "test-username", CONF_PASSWORD: "<PASSWORD>"},
unique_id="test-username",
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == config_entries.SOURCE_USER
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
async def test_import_flow_success(hass: HomeAssistant) -> None:
"""Test a successful import of yaml."""
with patch(
"homeassistant.components.aladdin_connect.cover.async_setup_platform",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.cover.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_USERNAME: "test-user",
CONF_PASSWORD: "<PASSWORD>",
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "Aladdin Connect"
assert result2["data"] == {
CONF_USERNAME: "test-user",
CONF_PASSWORD: "<PASSWORD>",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_reauth_flow(hass: HomeAssistant) -> None:
"""Test a successful reauth flow."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "test-username", "password": "<PASSWORD>"},
unique_id="test-username",
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_entry.unique_id,
"entry_id": mock_entry.entry_id,
},
data={"username": "test-username", "password": "<PASSWORD>"},
)
assert result["step_id"] == "reauth_confirm"
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.aladdin_connect.cover.async_setup_platform",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.cover.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PASSWORD: "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == "reauth_successful"
assert mock_entry.data == {
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
}
async def test_reauth_flow_auth_error(hass: HomeAssistant) -> None:
"""Test a successful reauth flow."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "test-username", "password": "<PASSWORD>"},
unique_id="test-username",
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_entry.unique_id,
"entry_id": mock_entry.entry_id,
},
data={"username": "test-username", "password": "<PASSWORD>"},
)
assert result["step_id"] == "reauth_confirm"
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.aladdin_connect.cover.async_setup_platform",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=False,
), patch(
"homeassistant.components.aladdin_connect.cover.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PASSWORD: "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_reauth_flow_other_error(hass: HomeAssistant) -> None:
"""Test an unsuccessful reauth flow."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "test-username", "password": "<PASSWORD>"},
unique_id="test-username",
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_entry.unique_id,
"entry_id": mock_entry.entry_id,
},
data={"username": "test-username", "password": "<PASSWORD>"},
)
assert result["step_id"] == "reauth_confirm"
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.aladdin_connect.cover.async_setup_platform",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
side_effect=ValueError,
), patch(
"homeassistant.components.aladdin_connect.cover.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PASSWORD: "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
| StarcoderdataPython |
3541512 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-08 18:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('push_notifications', '0009_gcmdevice_rename_device_uuid'),
]
operations = [
migrations.AlterField(
model_name='gcmdevice',
name='device_id',
field=models.CharField(blank=True, db_index=True, help_text='ANDROID_ID / TelephonyManager.getDeviceId()', max_length=50, null=True, verbose_name='Device ID'),
),
]
| StarcoderdataPython |
1743730 | import logging
from traitlets import Unicode, Integer, List
from traitlets.config import Application
from conda_store_server.app import CondaStore
class CondaStoreWorker(Application):
aliases = {
"config": "CondaStoreWorker.config_file",
}
log_level = Integer(
logging.INFO,
help="log level to use",
config=True,
)
watch_paths = List(
[], help="list of paths to watch for environment changes", config=True
)
config_file = Unicode(
"conda_store_config.py", help="config file to load for conda-store", config=True
)
def initialize(self, *args, **kwargs):
super().initialize(*args, **kwargs)
self.load_config_file(self.config_file)
@property
def conda_store(self):
if hasattr(self, "_conda_store"):
return self._conda_store
self._conda_store = CondaStore(parent=self, log=self.log)
return self._conda_store
def start(self):
argv = [
"worker",
"--loglevel=INFO",
"--beat",
]
self.conda_store.ensure_directories()
self.conda_store.celery_app.worker_main(argv)
| StarcoderdataPython |
11398088 | <filename>src/encoder_decoder/finetuning.py
import os
import torch
import torch.nn.functional as F
from datasets import DatasetDict
from torch.utils.data import DataLoader
from tqdm.auto import tqdm, trange
from transformers import AutoTokenizer, set_seed
from src.encoder_decoder.config import Config
from src.encoder_decoder.encoder_decoder import EncoderDecoderModel
def encode_data(data, tokenizer, max_length):
inputs = tokenizer(data["question"], padding="max_length", truncation=True, max_length=max_length)
outputs = tokenizer(data["answer"], padding="max_length", truncation=True, max_length=max_length)
data["input_ids"] = inputs.input_ids
data["attention_mask"] = inputs.attention_mask
data["decoder_input_ids"] = outputs.input_ids
data["decoder_attention_mask"] = outputs.attention_mask
return data
def calculate_loss(outputs, targets, batch):
outputs = outputs.permute(0, 2, 1)
if outputs.shape[0] != targets.shape[0] or outputs.shape[0] * targets.shape[0] == 0:
return None
loss = F.cross_entropy(outputs, targets, reduction="none")
mask = targets != 1
loss = loss * mask
return loss.sum() / batch
def train_model(config):
set_seed(config.seed)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/paraphrase-multilingual-mpnet-base-v2")
model = EncoderDecoderModel(num_hidden_layers=config.num_hidden_layers)
model.to(device)
model.train()
optimizer = torch.optim.AdamW(params=model.parameters(), lr=config.learning_rate, weight_decay=config.weight_decay)
raw_datasets = DatasetDict.from_csv(
{
"train": os.path.join(config.input_path, "df_train.csv"),
"val": os.path.join(config.input_path, "df_eval.csv"),
}
)
raw_datasets = raw_datasets.map(lambda x: encode_data(x, tokenizer, config.max_length))
raw_datasets.set_format(
type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask"]
)
dataloaders = {x: DataLoader(raw_datasets[x], batch_size=config.batch_size, shuffle=True) for x in ["train", "val"]}
train_iterator = trange(0, config.epochs, desc="Epoch")
iteration = 0
for _ in train_iterator:
progressbar = tqdm(dataloaders["train"])
model.train()
for batch in progressbar:
optimizer.zero_grad()
outputs = model(batch["input_ids"].to(device))
loss = calculate_loss(outputs["logits"], batch["decoder_input_ids"].to(device), config.batch_size)
loss.backward()
optimizer.step()
progressbar.desc = f"iterations (loss: {round(loss.item(), 2)})"
iteration += 1
model.eval()
eval_loss = 0.0
eval_steps = 0
progressbar_eval = tqdm(dataloaders["val"], desc="validation")
for batch in progressbar_eval:
with torch.no_grad():
outputs = model(batch["input_ids"].to(device))
loss = calculate_loss(outputs["logits"], batch["decoder_input_ids"].to(device), config.batch_size)
eval_loss += loss.item()
eval_steps += 1
eval_loss = eval_loss / eval_steps
print("=== validation: loss ===", round(eval_loss, 2))
torch.save(model.state_dict(), os.path.join(config.output_path, f"encoder_decoder_model_loss_{eval_loss}.pt"))
if __name__ == "__main__":
train_model(Config())
| StarcoderdataPython |
1738572 | <filename>workflow/scripts/count/create_demultiplexed_index.py
# Author: <NAME> 2021
# print out barcodes and correspoding assignment (tsv format to standard out) from a picke file
import click
import csv
import pandas as pd
# options
@click.command()
@click.option('--experiment',
'experiment_file',
required=True,
type=click.Path(exists=True, readable=True),
help='Experiment file.')
@click.option('--output',
'output_file',
required=True,
type=click.Path(exists=True, writable=True),
help='Output file.')
def cli(experiment_file, output_file):
exp = pd.read_csv(experiment_file)
exp_dna = pd.concat(
[
exp[["BC_DNA"]],
exp[["Condition", "Replicate"]].agg("_".join, axis=1) + "_DNA",
],
axis=1,
).rename(columns={"BC_DNA": "BC", 0: "Sample"})
exp_rna = pd.concat(
[
exp[["BC_RNA"]],
exp[["Condition", "Replicate"]].agg("_".join, axis=1) + "_RNA",
],
axis=1,
).rename(columns={"BC_RNA": "BC", 0: "Sample"})
exp_dna.append(exp_rna).to_csv(output_file, sep="\t", header=False, index=False)
if __name__ == '__main__':
cli()
| StarcoderdataPython |
341009 | <gh_stars>1-10
import nextcord, random
from nextcord.ext import commands, tasks
from nextcord.ext import commands
status = ['Arma 3', 'Minecraft', 'Left 4 Dead 2', 'Jackbox Party Pack', 'Metal Gear Solid V', 'Phasmophobia', 'Counter-Strike: Global Offensive', 'Garry\'s Mod', 'Barotrauma']
class statusChange(commands.Cog):
def __init__(self, client):
self.client = client
#Status Changin'
@tasks.loop(hours=2)
async def change_status(self):
await self.client.change_presence(activity=nextcord.Game(random.choice(status)))
@commands.Cog.listener()
async def on_ready(self):
await self.change_status.start()
def setup(client):
client.add_cog(statusChange(client))
| StarcoderdataPython |
9646051 | h = 5
print("*" * h)
for i in range(1,h):
for j in range(h):
if j == h - i:
print("*",end='')
else:
print(" ",end='')
print()
print("*" * h)
| StarcoderdataPython |
3487313 | <filename>scripts/format_script.py<gh_stars>1-10
import numpy as np
from telstate_interface import TelstateInterface
import redis
import pickle
import time
TelInt = TelstateInterface()
cal_K, cal_G, cal_B, cal_all, timestamp = TelInt.query_telstate('10.98.2.128:31829', '/home/danielc/')
red = redis.StrictRedis(port='6379', decode_responses=True)
print(cal_all)
test2 = cal_all
def cal_array(cals):
"""Format calibration solutions into a multidimensional array.
Args:
cals (dict): dictionary of calibration solutions as returned from
Telstate.
Returns:
cal_mat (numpy matrix): complex float values of dimensions:
(pol, nchans, nants)
Antennas are sorted by number.
H-pol is first.
"""
# Determine number of channels:
ant_keys = list(cals.keys())
try:
nchans = len(cals[ant_keys[0]])
except:
# if a single value for each antenna
nchans = 1
# Determine number of antennas:
nants = int(len(ant_keys)/2) # since two polarisations
# Ordered list of antenna number:
ant_n = []
for key in ant_keys:
ant_n.append(int(key[1:4]))
ant_n = np.unique(np.array(ant_n))
ant_n = np.sort(ant_n)
# Fill multidimensional array:
result_array = np.zeros((2, nchans, nants), dtype=np.complex)
for i in range(len(ant_n)):
# hpol:
ant_name = 'm{}h'.format(str(ant_n[i]).zfill(3))
result_array[0, :, i] = cals[ant_name]
# vpol:
ant_name = 'm{}v'.format(str(ant_n[i]).zfill(3))
result_array[1, :, i] = cals[ant_name]
return result_array
def format_cals(product_id, cal_K, cal_G, cal_B, cal_all, nants, ants, nchans, timestamp):
"""Write calibration solutions into a Redis hash under the correct key.
Calibration data is serialised before saving.
"""
# Serialise calibration data
cal_K = pickle.dumps(cal_array(cal_K))
cal_G = pickle.dumps(cal_array(cal_G))
cal_B = pickle.dumps(cal_array(cal_B))
cal_all = pickle.dumps(cal_array(cal_all))
# Save current calibration session to Redis
hash_key = "{}:cal_solutions:{}".format(product_id, timestamp)
hash_dict = {"cal_K":cal_K, "cal_G":cal_G, "cal_B":cal_B, "cal_all":cal_all,
"nants":nants, "antenna_list":str(ants), "nchan":nchans}
red.hmset(hash_key, hash_dict)
# Save to index (zset)
index_name = "{}:cal_solutions:index".format(product_id)
index_score = int(time.time())
red.zadd(index_name, {hash_key:index_score})
ant_key = 'array_1:antennas'
ant_list = red.lrange(ant_key, 0, red.llen(ant_key))
format_cals('array_1', cal_K, cal_G, cal_B, cal_all, 63, ant_list, 4096, timestamp)
| StarcoderdataPython |
130060 | import pytest
import os
import warnings
from fixtures import COMPRESSION_NAMES
import zipfile
from compress_pickle import (
dump,
dumps,
load,
loads,
get_compression_read_mode,
get_compression_write_mode,
)
@pytest.mark.usefixtures("wrong_compressions")
def test_dump_fails_on_unhandled_compression(wrong_compressions):
with pytest.raises(ValueError):
dump(
1,
"test_path.pkl",
compression=wrong_compressions,
set_default_extension=False,
)
@pytest.mark.usefixtures("wrong_compressions")
def test_load_fails_on_unhandled_compression(wrong_compressions):
with pytest.raises(ValueError):
load(
"test_path.pkl", compression=wrong_compressions, set_default_extension=False
)
@pytest.mark.usefixtures("simple_dump_and_remove")
def test_dump_compresses(simple_dump_and_remove):
path, compression, message = simple_dump_and_remove
kwargs = dict()
if compression == "zipfile":
kwargs = dict(zipfile_compression=zipfile.ZIP_DEFLATED)
dump(message, path, compression=compression, set_default_extension=False, **kwargs)
with open(path, "rb") as f:
compressed_message = f.read()
if compression in (None, "pickle"):
assert len(compressed_message) > len(message)
else:
assert len(compressed_message) < len(message)
@pytest.mark.usefixtures("dump_load")
def test_dump_load(dump_load):
(
message,
path,
compression,
set_default_extension,
expected_file,
expected_fail,
) = dump_load
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
if expected_fail is None:
dump(
message, path, compression, set_default_extension=set_default_extension
)
loaded_message = load(
path, compression, set_default_extension=set_default_extension
)
assert loaded_message == message
else:
with pytest.raises(expected_fail):
dump(
message,
path,
compression,
set_default_extension=set_default_extension,
)
with pytest.raises(expected_fail):
load(path, compression, set_default_extension=set_default_extension)
@pytest.mark.usefixtures("random_message", "compressions")
def test_dumps_loads(random_message, compressions):
message = random_message
assert loads(dumps(message, compressions), compressions) == message
@pytest.mark.usefixtures("simple_dump_and_remove")
def test_dump_vs_dumps(simple_dump_and_remove):
path, compression, message = simple_dump_and_remove
dump(message, path, compression=compression, set_default_extension=False)
cmp1 = dumps(message, compression=compression, arcname=path)
with open(path, "rb") as f:
cmp2 = f.read()
if compression != "gzip":
assert cmp1 == cmp2
else:
assert loads(cmp1, compression) == loads(cmp2, compression)
@pytest.mark.usefixtures("simple_dump_and_remove")
def test_dump_load_on_filestreams(simple_dump_and_remove):
path, compression, message = simple_dump_and_remove
read_mode = "rb" # get_compression_read_mode(compression)
write_mode = "wb" # get_compression_write_mode(compression)
with open(path, write_mode) as f:
dump(message, f, compression=compression)
with open(path, read_mode) as f:
raw_content = f.read()
f.seek(0)
loaded_message = load(f, compression=compression)
assert loaded_message == message
os.remove(path)
dump(message, path, compression=compression, set_default_extension=False)
with open(path, read_mode) as f:
benchmark = f.read()
# zipfile compression stores the data in a zip archive. The archive then
# contains a file with the data. Said file's mtime will always be
# different between the two dump calls, so we skip the follwing assertion
if compression != "zipfile":
assert raw_content == benchmark
@pytest.mark.usefixtures("simple_dump_and_remove")
def test_load_vs_loads(simple_dump_and_remove):
path, compression, message = simple_dump_and_remove
dump(message, path, compression=compression, set_default_extension=False)
with open(path, "rb") as f:
data = f.read()
cmp1 = loads(data, compression=compression, arcname=os.path.basename(path))
cmp2 = load(path, compression=compression, set_default_extension=False)
assert cmp1 == cmp2
assert cmp1 == message
| StarcoderdataPython |
3288477 | <reponame>laylalaisy/Educational-Administration-System<gh_stars>1-10
from django.db import models
from basicInfo.config import course_type
class observer:
def __init__(self):
pass
def update(self,*text):
pass
class OperationObserver(observer):
def update(self,*text):
operation.objects.create(description=text[0])
print("log saved")
class TeacherObserver(observer):
def update(self,*text):
teacher.objects.create(*text)
class account(models.Model):
'''
accout(accout_id int, password varchar(20) not null) //统一账户
'''
observers=[OperationObserver(),TeacherObserver()] # observer list
account_id=models.CharField(max_length=20,primary_key=True)
password=models.CharField(max_length=200,null=False)
salt=models.CharField(max_length=8,null=False,default="<PASSWORD>")
type=models.IntegerField(null=False,default=0)
def save(self, *args, **kwargs):
print("account saved")
super(account, self).save(*args, **kwargs)
def __str__(self):
return self.account_id
class attrib(models.Model):
'''
attrib(account_id int, nickname varchar(40), picture varchar(40)), email varchar(40), exp int, coin int) //账户属性 ref account
'''
account_id=models.ForeignKey(account,on_delete=models.CASCADE,primary_key=True)
nickname=models.CharField(max_length=40,null=False)
picture=models.ImageField(upload_to="basicInfo/static/basicInfo/picture",null=True)
email=models.CharField(max_length=40,null=True)
exp=models.IntegerField(null=True)
coin=models.IntegerField(null=True)
class student(models.Model):
'''
student(student_id int, name varchar(20) not null, dorm varchar(40)) //学生
'''
student_id=models.CharField(max_length=20,primary_key=True)
name=models.CharField(max_length=20,null=False)
dorm=models.CharField(max_length=40)
grade=models.IntegerField(default=1)
class college(models.Model):
'''
college(college_id int, name varchar(40) not null, intro text) //学院
'''
college_id=models.AutoField(primary_key=True)
name=models.CharField(max_length=40,null=False)
intro=models.TextField()
class discipline(models.Model):
'''
discipline(discipline_id int, name varchar(40) not null, intro text) //专业
'''
discipline_id=models.AutoField(primary_key=True)
name=models.CharField(max_length=40,null=False)
intro=models.TextField()
class major(models.Model):
'''
major(student_id int, discipline_id int) //主修 ref student, discipline
'''
student_id=models.CharField(max_length=20,primary_key=True)
discipline_id=models.IntegerField()
class Meta:
unique_together = ("student_id", "discipline_id")
primary = ("student_id", "discipline_id")
class minor(models.Model):
'''
minor(student_id int, discipline_id int) //辅修 ref student, discipline
'''
student_id=models.CharField(max_length=20,primary_key=True)
discipline_id = models.IntegerField()
class Meta:
unique_together = ("student_id", "discipline_id")
primary = ("student_id", "discipline_id")
class belong(models.Model):
'''
belong(major_id int, college_id int) //专业所在学院 ref major, college
'''
discipline_id=models.ForeignKey(discipline,on_delete=models.CASCADE)
college_id=models.ForeignKey(college,on_delete=models.CASCADE)
class Meta:
unique_together = ("discipline_id", "college_id")
primary=("discipline_id", "college_id")
class course(models.Model):
'''
course(course_id int, name varchar(40) not null, credit real not null, capacity int not null, intro text, type varchar(40) not null) //课程
'''
course_id=models.CharField(max_length=10,primary_key=True)
name=models.CharField(max_length=40,null=False)
credit=models.DecimalField(max_digits=3,decimal_places=1)
hour=models.FloatField()
intro=models.TextField()
duplicate = models.IntegerField(default=0)
type=models.CharField(max_length=40,null=False,choices=course_type,default="0")
semester=models.CharField(max_length=10,null=False,default="Spring")
exam_date = models.DateTimeField(null=True)
class pre(models.Model):
'''
pre(tmp_course_id int, pre_course_id int) //课程预修 ref course
'''
tmp_course_id=models.IntegerField()
pre_course_id=models.ForeignKey(course,on_delete=models.CASCADE)
class Meta:
unique_together=("tmp_course_id","pre_course_id")
primary = ("tmp_course_id","pre_course_id")
class room(models.Model):
'''
room(room_id int, capacity int not null, location varchar(40) not null, type varchar(40) not null) //教室
'''
room_id=models.AutoField(primary_key=True)
capacity=models.IntegerField(null=False)
location=models.CharField(max_length=40,null=False)
type=models.CharField(max_length=40,null=False)
class learn(models.Model):
'''
learn(student_id int, course_id int, grade int, status int) //参加课程 ref student, course
'''
student_id=models.ForeignKey(student,on_delete=models.CASCADE)
course_id=models.ForeignKey(course,on_delete=models.CASCADE)
grade=models.IntegerField(null=True)
status=models.IntegerField(null=False,default=0)
class Meta:
unique_together=("student_id","course_id")
primary=("student_id","course_id")
class time(models.Model):
'''
time(time_id int ,start time not null, end time not null, day int not null) // 时间段 const
'''
time_id=models.AutoField(primary_key=True)
start=models.TimeField(null=False)
end=models.TimeField(null=False)
day=models.IntegerField(null=False)
class teacher(models.Model):
'''
teacher(teacher_id int, name varchar(20) not null) //教师
'''
teacher_id=models.ForeignKey(account,on_delete=models.CASCADE,related_name="teacherId",primary_key=True)
name=models.CharField(max_length=20,null=False)
title=models.CharField(max_length=10,null=False,default="lecturer")
office=models.CharField(max_length=40,null=False)
management=models.CharField(max_length=40,null=True)
class teach(models.Model):
'''
teach(teacher_id int, course_id int) //讲授课程 ref teacher, course
'''
teach_id=models.AutoField(primary_key=True)
teacher_id=models.ForeignKey(teacher,on_delete=models.CASCADE,related_name="teacher_id_1")
course_id=models.ForeignKey(course,on_delete=models.CASCADE,related_name="college_id_1")
capacity=models.IntegerField(null=False)
duplicate=models.IntegerField(default=1)
class readyteach(models.Model):
'''
readyteach(teacher_id int, course_id int) //讲授课程 ref teacher, course
'''
teacher_id=models.ForeignKey(teacher,on_delete=models.CASCADE)
course_id=models.ForeignKey(course,on_delete=models.CASCADE)
capacity=models.IntegerField(null=False)
class takeup(models.Model):
'''
takeup(course_id int, time_id int, room_id int, type int not null) //课程时空信息 ref course, time, room
'''
teach_id=models.ForeignKey(teach,on_delete=models.CASCADE)
time_id=models.ForeignKey(time,on_delete=models.CASCADE)
room_id=models.ForeignKey(room,on_delete=models.CASCADE)
teacher_id=models.ForeignKey(teacher,on_delete=models.CASCADE)
class Meta:
unique_together = ("teach_id", "time_id","room_id")
primary = ("teach_id", "time_id","room_id")
class examination(models.Model):
'''
exam(student_id int, takeup_id int, position int)//考生座位信息 ref student, takeup
'''
student_id=models.ForeignKey(student,on_delete=models.CASCADE)
takeup_id=models.ForeignKey(takeup,on_delete=models.CASCADE)
position=models.IntegerField(null=True)
class Meta:
unique_together = ("student_id", "takeup_id","position")
primary = ("student_id", "takeup_id","position")
class evaluate(models.Model):
'''
evalueate(student_id int, teacher_id int, point real not null, description text) //学生评价 ref student, teacher
'''
student_id=models.ForeignKey(student,on_delete=models.CASCADE)
teacher_id=models.ForeignKey(teacher,on_delete=models.CASCADE)
point=models.DecimalField(max_digits=3,decimal_places=2)
description=models.TextField()
class Meta:
unique_together = ("student_id", "teacher_id")
primary = ("student_id", "teacher_id")
class work(models.Model):
'''
work(teacher_id int, college_id int) //归属学院 ref teacher, college
'''
teacher_id=models.ForeignKey(teacher,on_delete=models.CASCADE)
college_id=models.ForeignKey(college,on_delete=models.CASCADE)
class Meta:
unique_together = ("teacher_id","college_id")
primary = ("teacher_id","college_id")
class master(models.Model):
'''
master(teacher_id int, college_id int) //管理学院 ref teacher, college
'''
teacher_id=models.ForeignKey(teacher,on_delete=models.CASCADE)
college_id=models.ForeignKey(college,on_delete=models.CASCADE)
class Meta:
unique_together = ("teacher_id","college_id")
primary = ("teacher_id","college_id")
class assist(models.Model):
'''
assist(student_id int, course_id int) //助教 ref student, course
'''
student_id = models.ForeignKey(student, on_delete=models.CASCADE)
course_id = models.ForeignKey(course, on_delete=models.CASCADE)
class Meta:
unique_together = ("student_id", "course_id")
primary = ("student_id", "course_id")
class admin(models.Model):
'''
admin(admin_id int, name varchar(20) not null) //系统管理员
'''
admin_id=models.CharField(max_length=20,primary_key=True)
name=models.CharField(max_length=20,null=False)
class operation(models.Model):
'''
operation(operation_id int, description text not null) //表操作 const
'''
operation_id=models.AutoField(primary_key=True)
description=models.TextField(null=False)
class log(models.Model):
'''
log(log_id int, operation_id int, time time not null, content text) //操作记录 ref operation
'''
log_id=models.AutoField(primary_key=True)
operation_id=models.ForeignKey(operation,on_delete=models.CASCADE)
time=models.DateTimeField(auto_now_add=True,null=False)
content=models.TextField()
| StarcoderdataPython |
9766842 | <filename>python/8kyu/is_he_gonna_survive.py<gh_stars>1-10
"""Kata url: https://www.codewars.com/kata/59ca8246d751df55cc00014c."""
def hero(bullets: int, dragons: int) -> bool:
return bullets >= dragons * 2
| StarcoderdataPython |
11286813 | #
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class Metric(object):
def __init__(self, name, description, resource_type, unit, min_value, max_value, gauges, internal_id=None):
self.__internal_id = internal_id
self.__name = name
self.__description = description
self.__resource_type = resource_type
self.__unit = unit
self.__min_value = min_value
self.__max_value = max_value
self.__gauges = gauges
@property
def internal_id(self):
return self.__internal_id
@internal_id.setter
def internal_id(self, value):
self.__internal_id = value
@property
def name(self):
return self.__name
@property
def description(self):
return self.__description
@property
def resource_type(self):
return self.__resource_type
@property
def unit(self):
return self.__unit
@property
def min_value(self):
return self.__min_value
@property
def max_value(self):
return self.__max_value
@property
def gauges(self):
return self.__gauges
def __repr__(self):
return ('Metric(internal_id={}, name={}, description={}, resource_type={}, unit={}, min_value={}, '
'max_value={}, gauges={})').format(self.internal_id, self.name, self.description, self.resource_type,
self.unit, self.min_value, self.max_value, self.gauges)
def __eq__(self, other):
return self.__class__ == other.__class__ and repr(self) == repr(other)
class MetricResourceType(object):
CPU = u'CPU'
RAM = u'RAM'
GPU = u'GPU'
GPU_RAM = u'GPU_RAM'
OTHER = u'OTHER'
| StarcoderdataPython |
9419 | # Copyright 2005-2008, <NAME>
# Copyright 2010, 2012 <NAME>
# This software's license gives you freedom; you can copy, convey,
# propagate, redistribute, modify and/or redistribute modified versions of
# this program under the terms of the GNU Affero General Public License
# (AGPL) as published by the Free Software Foundation (FSF), either
# version 3 of the License, or (at your option) any later version of the
# AGPL published by the FSF.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import url, include
from django.contrib import admin, admindocs
from conservancy import feeds, frontpage, sponsors
import conservancy.apps.fundgoal.views as fundgoal_views
import conservancy.static.views as static_views
admin.autodiscover()
urlpatterns = [
url(r'^$', frontpage.view),
url(r'^sponsors$', frontpage.view),
url(r'^sponsors/$', sponsors.view),
url(r'^sponsors/index.html$', sponsors.view),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', admin.site.urls),
url(r'^feeds/blog/?$', feeds.BlogFeed()),
url(r'^feeds/news/?$', feeds.PressReleaseFeed()),
url(r'^feeds/omnibus/?$', feeds.OmnibusFeed()),
url(r'^feeds/?$', feeds.view),
url(r'^news(/|$)', include('conservancy.apps.news.urls')),
url(r'^blog(/|$)', include('conservancy.apps.blog.urls')),
# formerly static templated things... (dirs with templates)
url(r'^error/(40[134]|500)(?:/index\.html|/|)$', static_views.handler),
url(r'^error', static_views.index),
url(r'^about', static_views.index),
url(r'^donate', static_views.index),
url(r'^copyleft-compliance', static_views.index,
{'fundraiser_sought' : 'vmware-match-0'}),
url(r'^projects', static_views.index),
url(r'^npoacct', static_views.index,
{'fundraiser_sought' : 'npoacct'}),
url(r'^contractpatch', include('conservancy.apps.contractpatch.urls')),
url(r'^overview', static_views.index),
url(r'^privacy-policy', static_views.index),
url(r'^supporter', include('conservancy.apps.supporter.urls')),
url(r'^fundraiser_data', fundgoal_views.view),
]
| StarcoderdataPython |
4803594 | #Part 1
grid = {}
with open("input.txt") as data:
for line in data.readlines():
l, r = line.split(" -> ")
x1, y1 = map(int, l.split(","))
x2, y2 = map(int, r.split(","))
if x1 == x2 or y1 == y2:
for x in range(min(x1, x2), max(x1, x2) + 1):
for y in range(min(y1, y2), max(y1, y2) + 1):
grid[(x, y)] = grid.get((x, y), 0) + 1
t = 0
for v in grid.values():
if v > 1:
t += 1
print(t)
#Part 2
grid = {}
with open("input.txt") as data:
for line in data.readlines():
l, r = line.split(" -> ")
x1, y1 = map(int, l.split(","))
x2, y2 = map(int, r.split(","))
dx = x2 - x1
dy = y2 - y1
if dx: dx = dx // abs(dx)
if dy: dy = dy // abs(dy)
x = x1
y = y1
while True:
grid[(x, y)] = grid.get((x, y), 0) + 1
if x == x2 and y == y2:
break
x += dx
y += dy
t = 0
for v in grid.values():
if v > 1:
t += 1
print(t)
| StarcoderdataPython |
1778222 | <reponame>Alex-Greenen/SpectralNeuralAnimation
# process data
from ProcessData.ProcessData import ProcessData
import os
# Clear
filelist = [ f for f in os.listdir('TrainingData') ]
for f in filelist: os.remove(os.path.join('TrainingData', f))
filelist = [ f for f in os.listdir('ValidationData') ]
for f in filelist: os.remove(os.path.join('ValidationData', f))
# Process
ProcessData(['walk1_subject1.bvh', 'walk3_subject1.bvh', 'run2_subject1.bvh'], 'TrainingData', [[[100, 1400], [2450, 3670]], [[6690, 7330]], [[100, 3000], [3800, -1]]], False)
#ProcessData(['walk1_subject1.bvh', 'run2_subject1.bvh'], 'TrainingData', [[[100, 1400], [2450, 3670]], [[100, 3000], [3800, -1]]], True)
ProcessData(['walk2_subject1.bvh', 'run2_subject1.bvh'], 'ValidationData', [[[100, 730]], [[3000, 3800]]], False)
#ProcessData(['walk2_subject1.bvh', 'run2_subject1.bvh'], 'ValidationData', [[[100, 730]], [[3000, 3800]]], True)
| StarcoderdataPython |
3570676 | <filename>miniSearchEngine/construct_engine/encoding_decoding.py
def vb_encoding(ori):
ori = bin(ori)[2:]
target = (7 - len(ori) % 7) * '0' + ori
tem = list(target)
count = 0
for i in range(len(target) // 7):
if i == len(target) // 7 - 1:
tem.insert(7 * i + count, '1')
else:
tem.insert(7 * i + count, '0')
count += 1
encoded = ''.join(tem)
# print(encoded)
return encoded
def vb_decoding(encoded):
# encoded is supposed to be a string
tem_list = list()
for i in range(len(encoded) // 8):
tem_list.append(encoded[i * 8:(i + 1) * 8][1:])
for k in range(len(tem_list[0])):
if tem_list[0][k] != '0':
tem_list[0] = tem_list[0][k:]
break
tem = ''.join(tem_list)
decoded = int(tem, 2)
# print(decoded)
return decoded
def gamma_encoding(ori):
offset = bin(ori)[3:]
length = ''.join(['1' for _ in range(len(offset))]) + '0'
return length + offset
def gamma_decoding(encoded):
pos = encoded.find('0')
decoded = int('1' + encoded[pos + 1:], 2)
# print(decoded)
return decoded
| StarcoderdataPython |
4863320 | <filename>hms/exceptions.py
class ApiCallError(Exception):
def __init__(self, message, detail=None):
Exception.__init__(self, message)
self.detail = detail
| StarcoderdataPython |
1608261 | <filename>tensorflow/python/distribute/values.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various classes representing distributed values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import weakref
import six
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
def _devices_match(d1, d2):
return device_util.canonicalize(d1) == device_util.canonicalize(d2)
class DeviceMap(object):
"""A mapping of replicas & logical device ids to devices."""
@property
def all_devices(self):
"""Returns a tuple of strings with all devices in this DeviceMap."""
raise NotImplementedError("Required for DeviceMap implementations.")
@property
def devices_by_replica(self):
"""Returns a tuple `t` where `t[replica]` is the devices for `replica`."""
raise NotImplementedError("Required for DeviceMap implementations.")
@property
def num_logical_devices(self):
"""Count of the number of devices each replica may be defined across."""
raise NotImplementedError("Required for DeviceMap implementations.")
@property
def num_replicas_in_graph(self):
"""Number of replicas defined in this graph."""
raise NotImplementedError("Required for DeviceMap implementations.")
def logical_device_from_values(self, values):
"""Returns the logical device index `values` is on."""
raise NotImplementedError("Required for DeviceMap implementations.")
def logical_to_actual_devices(self, logical_device_id):
"""Returns sequence of `num_replicas_in_graph` devices."""
raise NotImplementedError("Required for DeviceMap implementations.")
def select_for_current_replica(self, values, replica_context):
"""Select the element of `values` for the current replica."""
raise NotImplementedError("Required for DeviceMap implementations.")
def replica_for_device(self, device):
"""Return the replica id containing `device`."""
raise NotImplementedError("Required for DeviceMap implementations.")
def select_for_device(self, values, device):
"""Select the element of `values` to access from `device`."""
raise NotImplementedError("Required for DeviceMap implementations.")
def is_device_in_replica(self, device, replica_id):
"""Returns whether `device` is a member of replica `replica_id`."""
raise NotImplementedError("Required for DeviceMap implementations.")
class SingleDeviceMap(DeviceMap):
"""A device map for 1 non-computation device.
Use `SingleDeviceMap` when the device does not correspond to some replica of
the computation. For computation devices, use `ReplicaDeviceMap` below (even
if there is only a single device in the map).
"""
def __init__(self, device):
"""Initialize a `SingleDeviceMap`.
Args:
device: A string device.
"""
assert isinstance(device, six.string_types)
self._device = device_util.canonicalize(device)
self._devices = (self._device,)
@property
def all_devices(self):
return self._devices
@property
def devices_by_replica(self):
raise ValueError("SingleDeviceMap not indexed by replicas")
@property
def num_logical_devices(self):
return 1
@property
def num_replicas_in_graph(self):
return 1
def logical_device_from_values(self, values):
del values
return 0
def logical_to_actual_devices(self, logical_device_id):
assert logical_device_id == 0
return self._devices
def select_for_current_replica(self, values, replica_context):
assert len(values) == 1
del replica_context
return values[0]
def replica_for_device(self, device):
raise ValueError("SingleDeviceMap not indexed by replicas")
def select_for_device(self, values, device):
assert len(values) == 1
if self._device != device:
raise ValueError("Device %s not found in %s (current device %s)" %
(device, self._devices, device_util.current()))
return values[0]
def is_device_in_replica(self, device, replica_id):
raise ValueError("SingleDeviceMap not indexed by replicas")
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._device)
class ReplicaDeviceMap(DeviceMap):
"""A device map for 1 device per replica."""
def __init__(self, devices):
"""Initialize a `ReplicaDeviceMap`.
Args:
devices: `devices[i]` is the string device for replica `i`.
"""
self._devices = tuple(device_util.canonicalize(d) for d in devices)
if len(set(self._devices)) != len(self._devices):
raise ValueError("Duplicate devices in %s, after canonicalization: %s" %
(devices, self._devices))
self._device_to_replica = {d: r for r, d in enumerate(self._devices)}
@property
def all_devices(self):
return self._devices
@property
def devices_by_replica(self):
return ((d,) for d in self._devices)
@property
def num_logical_devices(self):
return 1
@property
def num_replicas_in_graph(self):
return len(self._devices)
def logical_device_from_values(self, values):
del values
return 0
def logical_to_actual_devices(self, logical_device_id):
assert logical_device_id == 0
return self._devices
def select_for_current_replica(self, values, replica_context):
assert len(values) == len(self._devices)
replica_id = replica_context.replica_id_in_sync_group
if not isinstance(replica_id, int):
replica_id = tensor_util.constant_value(replica_id)
if replica_id is None:
replica_id = 0
return values[replica_id]
def replica_for_device(self, device):
return self._device_to_replica.get(device)
def select_for_device(self, values, device):
assert len(values) == len(self._devices)
replica_id = self._device_to_replica.get(device)
if replica_id is None:
raise ValueError("Device %s not found in %s (current device %s)" %
(device, self._devices, device_util.current()))
return values[replica_id]
def is_device_in_replica(self, device, replica_id):
return _devices_match(device, self._devices[replica_id])
def __str__(self):
return "[%s]" % (", ".join(self._devices))
def __repr__(self):
return "%s([%s])" % (self.__class__.__name__,
", ".join(repr(d) for d in self._devices))
LogicalDeviceSpec = collections.namedtuple(
"LogicalDeviceSpec", ("device_map", "logical_device"))
class WorkerDeviceMap(DeviceMap):
"""A device map for one value per worker."""
def __init__(self, devices, num_replicas_per_worker):
"""Initialize a `WorkerDeviceMap`.
Args:
devices: `devices[i]` is the string device for worker `i` in in-graph
relication case; devices is single-element list for its corresponding
worker in between-graph case.
num_replicas_per_worker: number of replicas per worker, useful in in-graph
replication case.
"""
self._devices = tuple(device_util.canonicalize(d) for d in devices)
if len(set(self._devices)) != len(self._devices):
raise ValueError("Duplicate devices in %s, after canonicalization: %s" %
(devices, self._devices))
self._num_replicas_per_worker = num_replicas_per_worker
@property
def all_devices(self):
return self._devices
@property
def devices_by_replica(self):
raise ValueError("`WorkerDeviceMap` is not indexed by replicas")
@property
def num_logical_devices(self):
return 1
@property
def num_replicas_in_graph(self):
return len(self._devices)
def logical_device_from_values(self, values):
del values
return 0
def logical_to_actual_devices(self, logical_device_id):
assert logical_device_id == 0
return self._devices
def select_for_current_replica(self, values, replica_context):
return values[replica_context.replica_id_in_sync_group //
self._num_replicas_per_worker]
def replica_for_device(self, device):
raise ValueError("`WorkerDeviceMap` not indexed by replicas")
def select_for_device(self, values, device):
# TODO(yuefengz): this should map from any device to the value on its
# corresponding worker.
return values[self._devices.index(device_util.canonicalize(device))]
def is_device_in_replica(self, device, replica_id):
raise ValueError("WorkerDeviceMap not indexed by replicas")
def __repr__(self):
return "%s(%r, num_replicas_per_worker=%d)" % (
self.__class__.__name__, self._devices, self._num_replicas_per_worker)
class DistributedValues(object):
"""Holds a map from replica to values. Either PerReplica or Mirrored."""
def __init__(self, device_map, values, logical_device=None):
assert isinstance(device_map, DeviceMap)
self._device_map = device_map
self._values = tuple(values)
if logical_device is None:
logical_device = device_map.logical_device_from_values(self._values)
self._logical_device = logical_device
# TODO(josh11b): Split this into two functions, one with device, one without.
def get(self, device=None):
"""Returns the value for the current device or raises a ValueError."""
if device is None:
replica_context = distribution_strategy_context.get_replica_context()
if replica_context:
return self._device_map.select_for_current_replica(
self._values, replica_context)
else:
device = distribute_lib.get_update_device()
if device is None:
return self._get_cross_replica()
device = device_util.canonicalize(device)
return self._device_map.select_for_device(self._values, device)
@property
def primary(self):
"""Returns a representative component."""
return self._values[0]
@property
def devices(self):
return self._device_map.logical_to_actual_devices(self._logical_device)
@property
def logical_device(self):
return self._logical_device
@property
def device_map(self):
return self._device_map
# TODO(josh11b): Replace experimental_local_results with this?
@property
def values(self):
return self._values
@property
def is_tensor_like(self):
return all(tensor_util.is_tensor(v) for v in self._values)
def __str__(self):
devices = self.devices
assert len(self._values) == len(devices)
debug_str = ",\n".join(" %d %s: %s" % (i, devices[i], self._values[i])
for i in range(len(devices)))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_str)
def __repr__(self):
devices = self.devices
assert len(self._values) == len(devices)
debug_repr = ",\n".join(" %d %s: %r" % (i, devices[i], self._values[i])
for i in range(len(devices)))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_repr)
# NOTE(josh11b,apassos): It would be great if we could inspect the values this was
# initialized with and use that to generate the overloaded operators here.
# Unfortunately, Python's rules for special methods don't allow this, see
# https://docs.python.org/3/reference/datamodel.html#special-method-names
# "if a class defines a method named __getitem__(), and x is an instance of
# this class, then x[i] is roughly equivalent to type(x).__getitem__(x, i)."
# In particular, these special methods don't go through __getattr__, and
# it will only use those methods if they are defined in the class, not the
# object.
class DistributedDelegate(DistributedValues):
"""A map from device to values; acts as the same type as the values."""
def __getattr__(self, name):
# The '_use_resource_variables' and the attrs starts with '_self' are used
# for restoring the saved_model proto. At the point these attrs are queried,
# the variable has not been initialized. Thus it should not query those of
# the underlying components.
if name.startswith("_self_") or name == "_use_resource_variables":
return super(DistributedDelegate, self).__getattr__(name)
# TODO(priyag): This needs to be made robust against pitfalls from mix use
# __getattr__ and @property. See b/120402273.
return getattr(self.get(), name)
def _get_as_operand(self):
"""Returns the value for operations for the current device.
Some implementations, e.g. `TPUMirroredVariable`, are not able to return the
value type within a replica context. They can, however, return a value that
can be used by the operations below.
"""
return self.get()
# pylint: disable=multiple-statements
def __add__(self, o): return self._get_as_operand() + o
def __radd__(self, o): return o + self._get_as_operand()
def __sub__(self, o): return self._get_as_operand() - o
def __rsub__(self, o): return o - self._get_as_operand()
def __mul__(self, o): return self._get_as_operand() * o
def __rmul__(self, o): return o * self._get_as_operand()
def __truediv__(self, o): return self._get_as_operand() / o
def __rtruediv__(self, o): return o / self._get_as_operand()
def __floordiv__(self, o):
return self._get_as_operand() // o
def __rfloordiv__(self, o): return o // self._get_as_operand()
def __mod__(self, o): return self._get_as_operand() % o
def __rmod__(self, o): return o % self._get_as_operand()
def __lt__(self, o): return self._get_as_operand() < o
def __le__(self, o): return self._get_as_operand() <= o
def __gt__(self, o): return self._get_as_operand() > o
def __ge__(self, o): return self._get_as_operand() >= o
def __and__(self, o): return self._get_as_operand() & o
def __rand__(self, o): return o & self._get_as_operand()
def __or__(self, o): return self._get_as_operand() | o
def __ror__(self, o): return o | self._get_as_operand()
def __xor__(self, o): return self._get_as_operand() ^ o
def __rxor__(self, o): return o ^ self._get_as_operand()
def __getitem__(self, o): return self._get_as_operand()[o]
def __pow__(self, o, modulo=None):
return pow(self._get_as_operand(), o, modulo)
def __rpow__(self, o): return pow(o, self._get_as_operand())
def __invert__(self): return ~self._get_as_operand()
def __neg__(self): return -self._get_as_operand()
def __abs__(self): return abs(self._get_as_operand())
def __div__(self, o):
try:
return self._get_as_operand().__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self._get_as_operand().__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self._get_as_operand().__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self._get_as_operand().__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
# TODO(josh11b): Even more operator overloads.
class PerReplica(DistributedValues, composite_tensor.CompositeTensor):
"""Holds a map from replica to unsynchronized values."""
@property
def _type_spec(self):
value_specs = [type_spec.type_spec_from_value(v) for v in self._values]
return PerReplicaSpec(value_specs, self._device_map, self._logical_device)
class PerReplicaSpec(type_spec.TypeSpec):
"""Type specification for a `PerReplica`."""
__slots__ = ["_value_specs", "_device_map", "_logical_device"]
value_type = property(lambda self: PerReplica)
def __init__(self, value_specs, device_map, logical_device):
if isinstance(device_map, tuple):
device_map = self._deserialize_device_map(device_map)
self._value_specs = tuple(value_specs)
self._device_map = device_map
self._logical_device = logical_device
def _serialize(self):
device_map = self._serialize_device_map(self._device_map)
return (self._value_specs, device_map, self._logical_device)
@property
def _component_specs(self):
return self._value_specs
def _to_components(self, value):
replica_context = distribution_strategy_context.get_replica_context()
if replica_context is not None and replica_context.num_replicas_in_sync > 1:
raise ValueError(
"Flattening a PerReplica to components is not supported in replica "
"context.")
return value._values # pylint: disable=protected-access
def _from_components(self, tensor_list):
return PerReplica(self._device_map, tensor_list,
logical_device=self._logical_device)
@staticmethod
def _serialize_device_map(device_map):
if isinstance(device_map, SingleDeviceMap):
return ("single", device_map.all_devices[0])
elif isinstance(device_map, ReplicaDeviceMap):
return ("replica", device_map.all_devices)
elif isinstance(device_map, WorkerDeviceMap):
return ("worker", device_map.all_devices,
device_map.num_replicas_per_worker)
else:
raise ValueError("PerReplicaSpec does not support device_map type %s"
% type(device_map).__name__)
@staticmethod
def _deserialize_device_map(device_map_info):
device_map_type = device_map_info[0]
device_map_args = device_map_info[1:]
if device_map_type == "single":
return SingleDeviceMap(*device_map_args)
elif device_map_type == "replica":
return ReplicaDeviceMap(*device_map_args)
elif device_map_type == "worker":
return WorkerDeviceMap(*device_map_args)
else:
raise ValueError("Unexpected value in state tuple")
# Note that unlike PerReplica, Mirrored values inherit from
# DistributedDelegate and so can be used directly in cross-replica mode.
# TODO(tomhennigan) Should this extend CompositeTensor?
class Mirrored(DistributedDelegate):
"""Holds a map from replica to values which are kept in sync."""
def _get_cross_replica(self):
device = device_util.canonicalize(device_util.current())
replica_id = self._device_map.replica_for_device(device)
if replica_id is None:
return self.primary
return self._values[replica_id]
def _as_graph_element(self):
obj = self.get()
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return obj
def _assign_on_device(device, variable, tensor):
with ops.device(device):
return variable.assign(tensor)
def _assign_add_on_device(device, variable, tensor):
with ops.device(device):
return variable.assign_add(tensor)
def _assign_sub_on_device(device, variable, tensor):
with ops.device(device):
return variable.assign_sub(tensor)
def _assert_strategy(strategy):
if not distribution_strategy_context.has_strategy():
raise RuntimeError(
'Need to be inside "with strategy.scope()" for %s' %
(strategy,))
current_strategy = distribution_strategy_context.get_strategy()
if current_strategy is not strategy:
raise RuntimeError(
"Mixing different tf.distribute.Strategy objects: %s is not %s" %
(current_strategy, strategy))
@contextlib.contextmanager
def _enter_or_assert_strategy(strategy):
if not distribution_strategy_context.has_strategy():
with strategy.scope():
yield
else:
_assert_strategy(strategy)
yield
DistributedVarOp = collections.namedtuple(
"DistributedVarOp", ["name", "graph", "traceback", "type"])
class DistributedVariable(DistributedDelegate, variables_lib.AbstractVariable):
"""Holds a map from replica to variables."""
# TODO(josh11b): Support changing the set of variables if e.g. if new
# devices are joining or a device is to leave.
def __init__(self, strategy, device_map, values, logical_device=None):
self._distribute_strategy = strategy
super(DistributedVariable, self).__init__(
device_map, values, logical_device=logical_device)
self._common_name = self.primary.name.split(":")[0]
# Use a weakref to make it easy to map from the contained values
# to the container without introducing a reference cycle.
for v in values:
v._distributed_container = weakref.ref(self) # pylint: disable=protected-access
# tf.keras keeps track of variables initialized using this attribute. When
# tf.keras gets the default session, it initializes all uninitialized vars.
# We need to make _keras_initialized a member of DistributedVariable because
# without this it will use `__getattr__` which will delegate to a component
# variable.
self._keras_initialized = False
# Typically, a `DistributedVariable`'s initializer is composed of the
# initializers of the components variables. However, in some cases, such as
# when restoring from a checkpoint, we may set the _initializer_op
# property on the entire `DistributedVariable`.
self._initializer_op = None
def is_initialized(self, name=None):
"""Identifies if all the component variables are initialized.
Args:
name: Name of the final `logical_and` op.
Returns:
The op that evaluates to True or False depending on if all the
component variables are initialized.
"""
result = self.primary.is_initialized()
# We iterate through the list of values except the last one to allow us to
# name the final `logical_and` op the same name that is passed by the user
# to the `is_initialized` op. For distributed variables, the
# `is_initialized` op is a `logical_and` op.
for v in self._values[1:-1]:
result = math_ops.logical_and(result, v.is_initialized())
result = math_ops.logical_and(result, self._values[-1].is_initialized(),
name=name)
return result
@property
def initializer(self):
if self._initializer_op:
init_op = self._initializer_op
else:
# return grouped ops of all the var initializations of component values of
# the mirrored variable
init_op = control_flow_ops.group(tuple(
v.initializer for v in self._values))
return init_op
def _get_closest(self):
"""Return member in the same replica if possible, else the primary."""
replica_context = distribution_strategy_context.get_replica_context()
if replica_context:
return self._device_map.select_for_current_replica(
self._values, replica_context)
device = distribute_lib.get_update_device()
if device is None:
device = device_util.canonicalize(device_util.current())
replica_id = self._device_map.replica_for_device(device)
if replica_id is None:
return self.primary
return self._values[replica_id]
def initialized_value(self):
return self._get_closest().initialized_value()
@property
def initial_value(self):
return self._get_closest().initial_value
@property
def graph(self):
return self.primary.graph
@property
def _shared_name(self):
return self._common_name
@property
def _unique_id(self):
return self.primary._unique_id # pylint: disable=protected-access
@property
def _graph_key(self):
"""Lets Optimizers know which graph this variable is from."""
return self.primary._graph_key # pylint: disable=protected-access
@property
def name(self):
return self.primary.name
@property
def dtype(self):
return self.primary.dtype
@property
def shape(self):
return self.primary.shape
@property
def synchronization(self):
return self.primary.synchronization
@property
def handle(self):
device = None
replica_context = distribution_strategy_context.get_replica_context()
if replica_context is None:
device = distribute_lib.get_update_device()
if device is None:
raise ValueError("`handle` is not available outside the replica context"
" or a `tf.distribute.Strategy.update()` call.")
return self.get(device=device).handle
def eval(self, session=None):
return self._get_closest().eval(session)
@property
def _save_slice_info(self):
return self.primary._save_slice_info # pylint: disable=protected-access
def _get_save_slice_info(self):
return self.primary._get_save_slice_info() # pylint: disable=protected-access
def _set_save_slice_info(self, save_slice_info):
for v in self._values:
v._set_save_slice_info(save_slice_info) # pylint: disable=protected-access
@property
def device(self):
return self._get_closest().device
@property
def trainable(self):
return self.primary.trainable
@property
def distribute_strategy(self):
return self._distribute_strategy
def get_shape(self):
return self.primary.get_shape()
def to_proto(self, export_scope=None):
return self.primary.to_proto(export_scope=export_scope)
@property
def op(self):
# We want cross-replica code that does some var.op.X calls
# to work (even if the current device isn't in self.devices), but
# other uses of var.op in a cross-replica context to fail.
if distribution_strategy_context.in_cross_replica_context():
return DistributedVarOp(self.primary.op.name,
self.primary.op.graph,
self.primary.op.traceback,
self.primary.op.type)
return self.get().op
@property
def _in_graph_mode(self):
return self.primary._in_graph_mode # pylint: disable=protected-access
def read_value(self):
with _enter_or_assert_strategy(self._distribute_strategy):
return array_ops.identity(self.get())
def value(self):
return self._get_closest().value()
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
ops.register_dense_tensor_like_type(DistributedVariable)
@contextlib.contextmanager
def _maybe_enter_graph(tensor):
# Note: might have an eager tensor but not be executing eagerly when
# building functions.
if (context.executing_eagerly() or isinstance(tensor, ops.EagerTensor)
or ops.has_default_graph()):
yield
else:
with tensor.graph.as_default():
yield
def _make_raw_assign_fn(raw_assign_fn): # pylint: disable=missing-docstring
def assign_fn(var, value, use_locking=False, name=None, read_value=True): # pylint: disable=missing-docstring
del use_locking # Unused.
with _maybe_enter_graph(var.handle):
op = raw_assign_fn(
var.handle, ops.convert_to_tensor(value, dtype=var.dtype), name=name)
with ops.control_dependencies([op]):
return var._read_variable_op() if read_value else op # pylint: disable=protected-access
return assign_fn
class TPUVariableMixin(object):
"""Mixin for TPU variables."""
def __init__(self, *args, **kwargs):
super(TPUVariableMixin, self).__init__(*args, **kwargs)
# Handle ID is needed for `get_replicated_var_handle` to cache the variables
# correctly since in eager mode different variables can have the same name.
if ops.executing_eagerly_outside_functions():
self._handle_id = self._common_name + "_" + str(id(self.primary))
else:
self._handle_id = self._common_name
def __getattr__(self, name):
if _enclosing_tpu_context() is None:
return super(TPUVariableMixin, self).__getattr__(name)
else:
raise AttributeError(
"'{}' not accessible within a TPU context.".format(name))
def get(self, device=None):
if (_enclosing_tpu_context() is None) or (device is not None):
return super(TPUVariableMixin, self).get(device=device)
else:
raise NotImplementedError(
"`TPUVariableMixin.get()` is not supported within a TPU context.")
def _get_as_operand(self):
return self.read_value()
def _get_closest(self):
if _enclosing_tpu_context() is None:
return super(TPUVariableMixin, self)._get_closest()
else:
return self.primary
def numpy(self):
if context.executing_eagerly():
return self.read_value().numpy()
else:
raise NotImplementedError(
"numpy() is only available when eager execution is enabled.")
@property
def handle(self):
# If we're in a tpu.rewrite(), return the replicated handle.
tpu_context = _enclosing_tpu_context()
if tpu_context is None:
return self._get_closest().handle
else:
return tpu_context.get_replicated_var_handle(
self._handle_id, self._values)
@property
def device(self):
return self.handle.device
def _read_variable_op(self):
if self.trainable:
tape.variable_accessed(self)
return gen_resource_variable_ops.read_variable_op(self.handle, self.dtype)
def read_value(self):
if _enclosing_tpu_context() is None:
return super(TPUVariableMixin, self).read_value()
else:
return self._read_variable_op()
@property
def constraint(self):
return self.primary.constraint
def _as_graph_element(self):
if _enclosing_tpu_context() is None:
return super(TPUVariableMixin, self)._as_graph_element() # pylint: disable=protected-access
else:
return None
@property
def op(self):
return DistributedVarOp(
self.primary.op.name, self.primary.op.graph, self.primary.op.traceback,
self.primary.op.type)
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
# pylint: disable=protected-access
if _enclosing_tpu_context() is None:
return super(TPUVariableMixin, self)._dense_var_to_tensor(
dtype=dtype, name=name, as_ref=as_ref)
# pylint: enable=protected-access
elif dtype is not None and dtype != self.dtype:
return math_ops.cast(self.read_value(), dtype)
else:
return self.handle if as_ref else self.read_value()
def _validate_colocate_extended(v, extended):
variable_strategy = v._distribute_strategy # pylint: disable=protected-access
if variable_strategy.extended is not extended:
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not %s created in scope: %s" %
(v, variable_strategy))
def validate_colocate_distributed_variable(v, extended):
if not isinstance(v, DistributedVariable):
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not: %r" % (v,))
_validate_colocate_extended(v, extended)
def validate_colocate(v, extended):
if not hasattr(v, "_distribute_strategy"):
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not: %r" % (v,))
_validate_colocate_extended(v, extended)
def _apply_aggregation(strategy, value, aggregation, destinations):
if aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
return strategy.extended.broadcast_to(
strategy.experimental_local_results(value)[0],
destinations=destinations)
reduce_op = reduce_util.ReduceOp.from_variable_aggregation(aggregation)
return strategy.extended.reduce_to(reduce_op, value, destinations)
_aggregation_error_msg = (
"You must specify an aggregation method to update a "
"{variable_type} in Replica Context. You can do so by passing "
"an explicit value for argument `aggregation` to tf.Variable(..)."
"e.g. `tf.Variable(..., aggregation=tf.VariableAggregation.SUM)`"
"`tf.VariableAggregation` lists the possible aggregation methods."
"This is required because {variable_type} should always be "
"kept in sync. When updating them or assigning to them in a "
"replica context, we automatically try to aggregate the values "
"before updating the variable. For this aggregation, we need to "
"know the aggregation method. "
"Another alternative is to not try to update such "
"{variable_type} in replica context, but in cross replica "
"context. You can enter cross replica context by calling "
"`tf.distribute.get_replica_context().merge_call(merge_fn, ..)`."
"Inside `merge_fn`, you can then update the {variable_type} "
"using `tf.distribute.StrategyExtended.update()`.")
class _MirroredSaveable(saver.BaseSaverBuilder.ResourceVariableSaveable):
"""Class for defining how to restore a MirroredVariable."""
def __init__(self, mirrored_variable, primary_variable, name):
self._mirrored_variable = mirrored_variable
super(_MirroredSaveable, self).__init__(primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
tensor, = restored_tensors
return control_flow_ops.group(tuple(
_assign_on_device(v.device, v, tensor)
for v in self._mirrored_variable.values))
class MirroredVariable(DistributedVariable, Mirrored):
"""Holds a map from replica to variables whose values are kept in sync."""
def __init__(
self, strategy, device_map, values, aggregation, logical_device=None):
super(MirroredVariable, self).__init__(
strategy, device_map, values, logical_device=logical_device)
self._aggregation = aggregation
# The arguments to update() are automatically unwrapped so the update()
# function would normally see regular variables, not MirroredVariables.
# However, the update function can still operate on wrapped MirroredVariables
# through object members, captured arguments, etc. This is more likely in an
# update_non_slot() function (like OptimizerV2._finish), which can
# update several non-slot variables in one call.
def _assign_func(self, *args, **kwargs):
with _enter_or_assert_strategy(self._distribute_strategy):
f = kwargs.pop("f")
if distribution_strategy_context.in_cross_replica_context():
update_device = distribute_lib.get_update_device()
if update_device is not None:
# We are calling an assign function on the mirrored variable in an
# update context.
v = self.get(device=update_device)
return f(v, *args, **kwargs)
# We are calling assign on the mirrored variable in cross replica
# context, use `strategy.extended.update()` to update the variable.
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
else:
_assert_replica_context(self._distribute_strategy)
# We are calling an assign function on the mirrored variable in replica
# context.
# We reduce the value we want to assign/add/sub. More details about how
# we handle the different use cases can be found in the _reduce method.
# We call the function on each of the mirrored variables with the
# reduced value.
if self._aggregation == vs.VariableAggregation.NONE:
raise ValueError(_aggregation_error_msg.format(
variable_type="MirroredVariable"))
def merge_fn(strategy, value, *other_args, **other_kwargs):
v = _apply_aggregation(strategy, value, self._aggregation, self)
return strategy.extended.update(
self, f, args=(v,) + other_args, kwargs=other_kwargs)
return distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=args, kwargs=kwargs)
def assign_sub(self, *args, **kwargs):
assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw)
return self._assign_func(f=assign_sub_fn, *args, **kwargs)
def assign_add(self, *args, **kwargs):
assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw)
return self._assign_func(f=assign_add_fn, *args, **kwargs)
def assign(self, *args, **kwargs):
assign_fn = lambda var, *a, **kw: var.assign(*a, **kw)
return self._assign_func(f=assign_fn, *args, **kwargs)
@property
def aggregation(self):
return self._aggregation
def _get_cross_replica(self):
device = device_util.canonicalize(device_util.current())
replica_id = self._device_map.replica_for_device(device)
if replica_id is None:
return array_ops.identity(self.primary)
return array_ops.identity(self._values[replica_id])
def _as_graph_element(self):
# pylint: disable=protected-access
if distribution_strategy_context.in_cross_replica_context():
return self.primary._as_graph_element()
return self.get()._as_graph_element()
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
MirroredVariables.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _MirroredSaveable(self, self.primary, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
# Try to avoid assignments to and other mutations of MirroredVariable
# state except through a DistributionStrategy.extended.update() call.
assert not as_ref
return ops.internal_convert_to_tensor(
self.get(), dtype=dtype, name=name, as_ref=as_ref)
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_mirrored(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(MirroredVariable,
_tensor_conversion_mirrored)
def _enclosing_tpu_context():
# pylint: disable=protected-access
tpu_context = ops.get_default_graph()._get_control_flow_context()
# pylint: enable=protected-access
while tpu_context is not None and not isinstance(
tpu_context, control_flow_ops.XLAControlFlowContext):
tpu_context = tpu_context.outer_context
return tpu_context
def is_distributed_variable(v):
"""Determine if a variable is ds variable or TPU mirrored variable."""
return isinstance(v, DistributedVariable)
class TPUMirroredVariable(TPUVariableMixin, MirroredVariable):
"""Holds a map from replica to TPU variables whose values are kept in sync."""
def _assign_func(self, *args, **kwargs):
with _enter_or_assert_strategy(self._distribute_strategy):
if (distribution_strategy_context.in_cross_replica_context()
and (_enclosing_tpu_context() is not None)):
f = kwargs.pop("f")
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
else:
return MirroredVariable._assign_func(self, *args, **kwargs)
def assign_sub(self, *args, **kwargs):
assign_sub_fn = _make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)
return self._assign_func(f=assign_sub_fn, *args, **kwargs)
def assign_add(self, *args, **kwargs):
assign_add_fn = _make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)
return self._assign_func(f=assign_add_fn, *args, **kwargs)
def assign(self, *args, **kwargs):
assign_fn = _make_raw_assign_fn(
gen_resource_variable_ops.assign_variable_op)
return self._assign_func(f=assign_fn, *args, **kwargs)
class _SyncOnReadSaveable(saver.BaseSaverBuilder.SaveableObject):
"""Class for defining how to restore a SyncOnReadVariable."""
def __init__(self, sync_on_read_variable, name):
self._sync_on_read_variable = sync_on_read_variable
# We use a callable so that we don't have to evaluate this expression
# in the case where we are trying to restore instead of save.
def tensor():
strategy = sync_on_read_variable._distribute_strategy # pylint: disable=protected-access
return strategy.extended.read_var(sync_on_read_variable)
spec = saver.BaseSaverBuilder.SaveSpec(
tensor=tensor,
slice_spec="",
name=name,
dtype=sync_on_read_variable.dtype,
device=sync_on_read_variable.primary.device)
super(_SyncOnReadSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
tensor, = restored_tensors
return self._sync_on_read_variable.assign(tensor)
def _assert_replica_context(strategy):
replica_context = distribution_strategy_context.get_replica_context()
if not replica_context:
raise RuntimeError(
"Replica-local variables may only be assigned in a replica context.")
if replica_context.strategy is not strategy:
raise RuntimeError(
"Replica-local variables may only be assigned in a replica context.")
class SyncOnReadVariable(DistributedVariable):
"""Holds a map from replica to variables whose values are reduced on save."""
def __init__(
self, strategy, device_map, values, aggregation, logical_device=None):
self._aggregation = aggregation
super(SyncOnReadVariable, self).__init__(
strategy, device_map, values, logical_device=logical_device)
def assign_sub(self, *args, **kwargs):
with _enter_or_assert_strategy(self._distribute_strategy):
if distribution_strategy_context.in_cross_replica_context():
if self._aggregation == vs.VariableAggregation.SUM:
raise ValueError(
"SyncOnReadVariable does not support `assign_sub` in "
"cross-replica context when aggregation is set to "
"`tf.VariableAggregation.SUM`.")
return control_flow_ops.group(tuple(
_assign_sub_on_device(v.device, v, args[0]) for v in self._values))
else:
return self.get().assign_sub(*args, **kwargs)
def assign_add(self, *args, **kwargs):
with _enter_or_assert_strategy(self._distribute_strategy):
if distribution_strategy_context.in_cross_replica_context():
if self._aggregation == vs.VariableAggregation.SUM:
raise ValueError(
"SyncOnReadVariable does not support `assign_add` in "
"cross-replica context when aggregation is set to "
"`tf.VariableAggregation.SUM`.")
return control_flow_ops.group(tuple(
_assign_add_on_device(v.device, v, args[0]) for v in self._values))
else:
return self.get().assign_add(*args, **kwargs)
def assign(self, *args, **kwargs):
with _enter_or_assert_strategy(self._distribute_strategy):
if distribution_strategy_context.in_cross_replica_context():
# To preserve the sum across save and restore, we have to divide the
# total across all devices when restoring a variable that was summed
# when saving.
tensor = args[0]
if self._aggregation == vs.VariableAggregation.SUM:
tensor *= 1. / len(self.devices)
return control_flow_ops.group(tuple(
_assign_on_device(v.device, v, tensor) for v in self._values))
else:
return self.get().assign(*args, **kwargs)
@property
def aggregation(self):
return self._aggregation
def _get_cross_replica(self):
if self._aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
return self.primary
with _enter_or_assert_strategy(self._distribute_strategy):
return self._distribute_strategy.reduce(
reduce_util.ReduceOp.from_variable_aggregation(self.aggregation),
self, axis=None)
def _as_graph_element(self):
# pylint: disable=protected-access
if distribution_strategy_context.in_cross_replica_context():
return self._get_cross_replica()
return self.get()._as_graph_element()
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
`SyncOnReadVariable`s.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _SyncOnReadSaveable(self, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
return ops.internal_convert_to_tensor(
self.get(), dtype=dtype, name=name, as_ref=as_ref)
# Register a conversion function for SyncOnReadVariable which allows as_ref to
# be true.
def _tensor_conversion_sync_on_read(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(SyncOnReadVariable,
_tensor_conversion_sync_on_read)
class TPUSyncOnReadVariable(TPUVariableMixin, SyncOnReadVariable):
"""Holds a map from replica to variables whose values are reduced on save."""
def assign_sub(self, *args, **kwargs):
if _enclosing_tpu_context() is None:
return SyncOnReadVariable.assign_sub(self, *args, **kwargs)
else:
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)(
self, *args, **kwargs)
def assign_add(self, *args, **kwargs):
if _enclosing_tpu_context() is None:
return SyncOnReadVariable.assign_add(self, *args, **kwargs)
else:
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)(
self, *args, **kwargs)
def assign(self, *args, **kwargs):
if _enclosing_tpu_context() is None:
return SyncOnReadVariable.assign(self, *args, **kwargs)
else:
return _make_raw_assign_fn(
gen_resource_variable_ops.assign_variable_op)(self, *args, **kwargs)
def regroup(device_map, values, wrap_class=PerReplica):
"""Makes a nest per-replica into a nest of PerReplica/Mirrored values."""
assert isinstance(device_map, DeviceMap)
assert len(values) == device_map.num_replicas_in_graph
v0 = values[0]
if isinstance(v0, list):
for v in values[1:]:
assert isinstance(v, list)
assert len(v) == len(v0), ("len(v) == %d, len(v0) == %d, v: %s, v0: %s" %
(len(v), len(v0), v, v0))
return [regroup(device_map, tuple(v[i] for v in values), wrap_class)
for i in range(len(v0))]
if isinstance(v0, tuple):
for v in values[1:]:
assert isinstance(v, tuple)
assert len(v) == len(v0)
regrouped_tuple = tuple(
regroup(device_map, tuple(v[i] for v in values), wrap_class)
for i in range(len(v0)))
if hasattr(v0, "_fields"):
# This tuple is in fact a namedtuple! Create a new namedtuple instance
# and initialize it with the regrouped values:
assert hasattr(type(v0), "_make")
return type(v0)._make(regrouped_tuple)
else:
return regrouped_tuple
if isinstance(v0, dict):
v0keys = set(v0.keys())
for v in values[1:]:
assert isinstance(v, dict), ("v[0]: %r v[i]: %r" % (v0, v))
assert set(v.keys()) == v0keys, ("v[0].keys: %s v[i].keys: %s" %
(v0keys, set(v.keys())))
return {key: regroup(device_map, tuple(v[key] for v in values), wrap_class)
for key in v0keys}
# If exactly the same object across all devices, return it unwrapped.
same_id = True
for v in values[1:]:
if v is not v0:
same_id = False
break
# Consider three cases where same_id is true:
# * If v0 is a DistributedVariable (a MirroredVariable or
# SyncOnReadVariable, and same_id means it is the same across all
# devices), we want to return it. We check DistributedVariable
# specifically since it can look like it has a
# _distributed_container member since its members do.
# * If v0 is a member of a distributed variable, in which case
# hasattr(v0, "_distributed_container") is true, we want to
# return the DistributedVariable that contains it using the
# _distributed_container logic below. This case can trigger
# same_id when there is only one device.
# * In any other situation, same_id means we return v0.
if same_id and (isinstance(v0, DistributedVariable) or
not hasattr(v0, "_distributed_container")):
return v0
# Detect the case where each device has a parallel component of the
# same MirroredVariable (or SyncOnReadVariable). In this case we
# want to return the containing MirroredVariable, after a bunch of
# sanity checking. In particular, each component should have the
# same container, and the devices of the variables should match the
# keys of the per-replica dictionary.
if hasattr(v0, "_distributed_container"):
# pylint: disable=protected-access
assert not isinstance(v0, MirroredVariable), (
"ids = %s, values = %s" % ([id(v) for v in values], values))
assert device_map.is_device_in_replica(v0.device, 0), (
"v0.device = %s, device_map = %s" % (v0.device, device_map))
distributed_container = v0._distributed_container()
assert distributed_container is not None
for r, v in enumerate(values[1:]):
assert device_map.is_device_in_replica(v.device, r + 1), (
"v.device = %s, r = %d, device_map = %s" %
(v.device, r + 1, device_map))
assert distributed_container is v._distributed_container()
return distributed_container
# pylint: enable=protected-access
return wrap_class(device_map, values)
def select_replica(replica_id, structured):
"""Specialize a nest of regular & per-replica values for one replica."""
def _get(x):
return x.values[replica_id] if isinstance(x, DistributedValues) else x
return nest.map_structure(_get, structured)
def select_device_mirrored(device, structured):
"""Specialize a nest of regular & mirrored values for one device."""
def _get_mirrored(x):
if isinstance(x, DistributedValues):
if not isinstance(x, Mirrored):
raise TypeError(
"Expected value to be mirrored across replicas: %s in %s." %
(x, structured))
return x.get(device)
else:
return x
return nest.map_structure(_get_mirrored, structured)
def update_regroup(extended, device_map, updates, group):
"""Regroup for an update, with dependencies to ensure all updates execute."""
if not group:
regrouped = regroup(device_map, updates, Mirrored)
return nest.map_structure(extended._local_results, regrouped) # pylint: disable=protected-access
def _make_grouped_mirrored(device_map, values):
"""Convert per-replica list `values` into Mirrored type with grouping."""
if len(values) == 1:
return Mirrored(device_map, values)
# Make sure we run all updates. Without this, something like
# session.run(extended.update(...)) may only update one replica.
g = control_flow_ops.group(values)
# If values is just ops, the grouping is enough. Everything in values
# should have the same type, since we expect every replica to be performing
# the same computation.
if not all(tensor_util.is_tensor(v) for v in values):
return g
# Otherwise we need tensors with the same values as `values`, but
# that have a dependency on `g`.
devices = device_map.logical_to_actual_devices(
device_map.logical_device_from_values(values))
assert len(values) == len(devices)
with_dep = []
for v, d in zip(values, devices):
with ops.device(d), ops.control_dependencies([g]):
with_dep.append(array_ops.identity(v))
return Mirrored(device_map, with_dep)
return regroup(device_map, updates, _make_grouped_mirrored)
def value_container(val):
"""Returns the container that this per-replica `value` belongs to.
Args:
val: A value returned by `call_for_each_replica()` or a variable
created in `scope()`.
Returns:
A container that `value` belongs to.
If value does not belong to any container (including the case of
container having been destroyed), returns the value itself.
"""
if (hasattr(val, "_distributed_container") and
# DistributedVariable has _distributed_container defined
# but we don't want to return it.
not isinstance(val, DistributedVariable)):
container = val._distributed_container() # pylint: disable=protected-access
if container is not None:
return container
return val
class AggregatingVariable(variables_lib.Variable):
"""A wrapper around a variable that aggregates updates across replicas."""
def __init__(self, strategy, v, aggregation):
self._distribute_strategy = strategy
self._v = v
# NOTE: We don't use "_distributed_container" here because we don't want
# to trigger that code path in regroup().
v._aggregating_container = weakref.ref(self) # pylint: disable=protected-access
self._aggregation = aggregation
def get(self):
return self._v
@property
def distribute_strategy(self):
return self._distribute_strategy
def __getattr__(self, name):
return getattr(self._v, name)
def _assign_func(self, *args, **kwargs):
with _enter_or_assert_strategy(self._distribute_strategy):
f = kwargs.pop("f")
if distribution_strategy_context.in_cross_replica_context():
update_device = distribute_lib.get_update_device()
if update_device is not None:
# We are calling an assign function in an update context.
return f(self._v, *args, **kwargs)
# We are calling an assign function in cross replica context, wrap it in
# an update call.
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
else:
replica_context = distribution_strategy_context.get_replica_context()
assert replica_context
# We are calling an assign function in replica context.
# We reduce the value we want to assign/add/sub. More details about how
# we handle the different use cases can be found in the _reduce method.
# We call the function with the reduced value.
if self._aggregation == vs.VariableAggregation.NONE:
raise ValueError(_aggregation_error_msg.format(
variable_type="AggregatingVariable"))
def merge_fn(strategy, value, *other_args, **other_kwargs):
v = _apply_aggregation(strategy, value, self._aggregation, self)
return strategy.extended.update(
self, f, args=(v,) + other_args, kwargs=other_kwargs)
return replica_context.merge_call(merge_fn, args=args, kwargs=kwargs)
def assign_sub(self, *args, **kwargs):
assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw)
return self._assign_func(f=assign_sub_fn, *args, **kwargs)
def assign_add(self, *args, **kwargs):
assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw)
return self._assign_func(f=assign_add_fn, *args, **kwargs)
def assign(self, *args, **kwargs):
assign_fn = lambda var, *a, **kw: var.assign(*a, **kw)
return self._assign_func(f=assign_fn, *args, **kwargs)
@property
def initializer(self):
return self._v.initializer
def initialized_value(self):
return self._v.initialized_value()
@property
def initial_value(self):
return self._v.initial_value
@property
def op(self):
return self._v.op
def read_value(self):
return self._v.read_value()
def eval(self, session=None):
return self._v.eval(session)
@property
def graph(self):
return self._v.graph
@property
def device(self):
return self._v.device
@property
def shape(self):
return self._v.shape
@property
def aggregation(self):
return self._aggregation
@property
def name(self):
return self._v.name
@property
def dtype(self):
return self._v.dtype
# TODO(josh11b): Test saving & restoring.
def _gather_saveables_for_checkpoint(self):
return {trackable.VARIABLE_VALUE_KEY: self._v}
# pylint: disable=multiple-statements
def __add__(self, o): return self._v + o
def __radd__(self, o): return o + self._v
def __sub__(self, o): return self._v - o
def __rsub__(self, o): return o - self._v
def __mul__(self, o): return self._v * o
def __rmul__(self, o): return o * self._v
def __truediv__(self, o): return self._v / o
def __rtruediv__(self, o): return o / self._v
def __floordiv__(self, o): return self._v // o
def __rfloordiv__(self, o): return o // self._v
def __mod__(self, o): return self._v % o
def __rmod__(self, o): return o % self._v
def __lt__(self, o): return self._v < o
def __le__(self, o): return self._v <= o
def __gt__(self, o): return self._v > o
def __ge__(self, o): return self._v >= o
def __and__(self, o): return self._v & o
def __rand__(self, o): return o & self._v
def __or__(self, o): return self._v | o
def __ror__(self, o): return o | self._v
def __xor__(self, o): return self._v ^ o
def __rxor__(self, o): return o ^ self._v
def __getitem__(self, o): return self._v[o]
def __pow__(self, o, modulo=None): return pow(self._v, o, modulo)
def __rpow__(self, o): return pow(o, self._v)
def __invert__(self): return ~self._v
def __neg__(self): return -self._v
def __abs__(self): return abs(self._v)
def __div__(self, o):
try:
return self._v.__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self._v.__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self._v.__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self._v.__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __str__(self):
return str(self._v)
def __repr__(self):
return repr(self._v)
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_aggregate(var, dtype=None, name=None, as_ref=False):
return ops.internal_convert_to_tensor(
var.get(), dtype=dtype, name=name, as_ref=as_ref)
ops.register_tensor_conversion_function(
AggregatingVariable, _tensor_conversion_aggregate)
ops.register_dense_tensor_like_type(AggregatingVariable)
| StarcoderdataPython |
363788 | <filename>pinax/announcements/urls.py<gh_stars>0
from django.conf.urls import url
from pinax.announcements import views
urlpatterns = [
url(r"^$", views.AnnouncementListView.as_view(),
name="announcement_list"),
url(r"^create/$", views.AnnouncementCreateView.as_view(),
name="announcement_create"),
url(r"^(?P<pk>\d+)/$", views.AnnouncementDetailView.as_view(),
name="announcement_detail"),
url(r"^(?P<pk>\d+)/hide/$", views.AnnouncementDismissView.as_view(),
name="announcement_dismiss"),
url(r"^(?P<pk>\d+)/update/$", views.AnnouncementUpdateView.as_view(),
name="announcement_update"),
url(r"^(?P<pk>\d+)/delete/$", views.AnnouncementDeleteView.as_view(),
name="announcement_delete"),
]
| StarcoderdataPython |
3365538 | from source import (
CINT
, CSTR
# CPU instructions semantics definition
, Type
, Comment
, Call
, MCall
, Macro
, Function
, Declare
, Variable
, BranchSwitch
, SwitchCase
, OpAssign
, OpIndex
, OpAdd
, BranchIf
, BranchElse
, OpLess
, OpOr
, OpAnd
, OpNot
, OpEq
)
from qemu import (
# CPU template generation settings
CPUInfo
, CPURegister
, gen_reg_names_range
, Instruction
, Opcode
, Operand
, Reserved
# helpers
, underscored_name_shortener
# heuristic
, get_vp
)
| StarcoderdataPython |
4909964 | <reponame>WestenPy/Curso_em_video
'''Faça um programa que leia um número inteiro qualquer e mostre
na tela a sua tabuada'''
print('-=' * 30)
print(' TABUADA')
print('-=' * 30)
n = int(input('Digite um número para saber a sua tabuada: '))
print(f'{n} x 1 = {n * 1}')
print(f'{n} x 2 = {n * 2}')
print(f'{n} x 3 = {n * 3}')
print(f'{n} x 4 = {n * 4}')
print(f'{n} x 5 = {n * 5}')
print(f'{n} x 6 = {n * 6}')
print(f'{n} x 7 = {n * 7}')
print(f'{n} x 8 = {n * 8}')
print(f'{n} x 9 = {n * 9}')
print(f'{n} x 10 = {n * 10}')
| StarcoderdataPython |
8106461 | <filename>KvantProgram.py
import os
clear = lambda: os.system("clear")
from colorama import init
init()
from colorama import Fore, Back, Style
clear()
print( Fore.RED + "Сделанно KvantGD гайд на канале")
print( Fore.WHITE + "Ссылка на канал YouTube: https://www.youtube.com/channel/KvantGD")
print("")
print( Fore.RESET + "[1]TermGuard - антивирус в termux")
print( Fore.BLUE + "[2]b0mb3r - для спама смс")
print( Fore.YELLOW + "[3]YTDownload - СКАЧАТЬ ВИДЕО ИЗ ЮТУБА")
print( Fore.CYAN + "[4]cspamvk - накрутка комментариев в VK")
print( Fore.BLUE + "[5]define - для вычисления местоположения человека")
print( Fore.YELLOW + "[6]fisher - взлом VK")
print( Fore.GREEN + "[7]fuck-seeker - узнать точное местоположение человека")
print( Fore.CYAN + "[8]halk - длоя dos")
print( Fore.BLUE + "[9]hiddeneye - для фишинга с 32 шаблонами")
print( Fore.YELLOW + "[10]InfinityMailSpam - смс бомбер на почту")
print( Fore.GREEN + "[11]ipcs - взлом камер видеонаблюдения по всему миру")
print( Fore.CYAN + "[12]IPGeolocation - местоположение по ip")
print( Fore.BLUE + "[13]kingfish2.0 - взлом инстаграма")
print( Fore.YELLOW + "[14]Kingfish3 - взлом ВК, инстаграма, одноклассников, озона, телеграма")
print( Fore.GREEN + "[15]MyServer - для хостинга сайтов")
print( Fore.RED + "[16]noisy - запутать своего интернет-провайдера")
print( Fore.CYAN + "[17]Planetwork-DDOS - DDoS утилита")
print( Fore.BLUE + "[18]QIWI_Api - взлом QIWI")
print( Fore.YELLOW + "[19]routersploit - Взлом wi-fi")
print( Fore.GREEN + "[20]saycheese - взломать камеру через сайт")
print( Fore.CYAN + "[21]shellphish - Взлом FaceBook")
print( Fore.BLUE + "[22]Sherlock - пробив инфы про номеру и многое другое")
print( Fore.YELLOW + "[23]SmsHam - смс бомбер")
print( Fore.GREEN + "[24]termux-faceroot - фейковые root-права")
print( Fore.RED + "[25]Termux-login - поставить пароль на Termux")
print( Fore.YELLOW + "[98]Выйти")
print( Fore.GREEN + "[99]Обновить утелиту")
print( Back.WHITE )
print( Fore.RED + "За использование утелит связанных со взломами я ответственность не несу они были представлены только в ознакомительный целях")
print( Back.BLACK )
print( Fore.WHITE + " v1.3")
a=int(input( Fore.MAGENTA + "Введите номер программы: "))
print( Fore.RESET )
clear()
if a==1 :
os.system("apt update && apt install git -y && cd && git clone https://github.com/DarkGa/TermGuard && cd TermGuard && bash install.sh && cd")
clear()
print("Запустить:")
start = "tguard -h"
print(start)
if a==2 :
os.system("apt update && pkg install python -y && pkg install make -y && pkg install clang -y && pip install colorama && pip install b0mb3r")
clear()
print("Запустить:")
start = "b0mb3r"
print(start)
if a==3 :
os.system("apt update && apt upgrade -y && apt install python git -y && pip install pytube3 && cd && git clone https://github.com/kitasS/YTDownload")
clear()
print("Запустить:")
start = "cd YTDownload && python YTDwnload.py"
print(start)
if a==4 :
os.system("apt update && apt install git -y && apt install python -y && apt install python2 && cd && git clone https://github.com/YamkaFox/cspamvk && cd cspamvk && pip install vkbee")
clear()
print("Запустить:")
start = "cd && cd cspamvk && python main.py"
print(start)
if a==5 :
os.system("cd && apt update && apt install git -y && apt install python -y && apt install php -y && apt install openssh -y && cd && git clone https://github.com/TermuxGuide/termux-ngrok && cd termux-ngrok && bash termux-ngrok.sh && cd && git clone https://github.com/termux-lab/define.git")
clear()
print("Запустить:")
start = "cd && cd define && php define.php"
print(start)
print("")
print("Во 2 сессии")
print("ngrok http 8080")
if a==6 :
os.system("apt update && apt upgrade -y && pkg install python -y && pkg install python2 && apt install git -y && cd && git clone https://github.com/foxlitegor/fisher && cd fisher && chmod 777 install.sh && bash install.sh && apt install openssh -y")
clear()
print("Запустить:")
start = "cd && cd fisher && fish"
print(start)
print("Потом введите run")
print("")
print("Вторая сессия:")
print("ssh -R 80:localhost:8080 ssh.localhost.run -l vk.com")
if a==7 :
os.system("apt update && apt upgrade -y && apt install git python -y && pip install flask && cd && git clone https://github.com/kitasS/fuck-seeker && apt install openssh -y")
clear()
print("Запустить:")
start = "cd && cd fuck-seeker && python server.py"
print(start)
print("")
print("Вторая сессия:")
print("ssh -R 80:localhost:8080 ssh.localhost.run -l ip-help.com")
if a==8 :
os.system("apt update -y && apt upgrade -y && apt install python2 && apt install git && cd && git clone https://github.com/4D4N-Termux/hulky")
clear()
print("Запустить:")
start = "cd && cd hulky && python2 hulky.py http://google.com"
print(start)
if a==9 :
os.system("apt update && apt install git -y && apt install python -y && cd && git clone https://github.com/DarkSecDevelopers/HiddenEye.git && cd HiddenEye && pip install -r requirements.txt && pip install requests")
clear()
print("Запустить:")
start = "cd && cd hiddeneye && python HiddenEye.py"
print(start)
if a==10 :
os.system("apt install python git nano -y && cd && git clone https://github.com/kitasS/InfinityMailSpam")
clear()
print("Запустить:")
start = "cd && cd && InfinityMailSpam && python MailSpam.py"
print(start)
if a==11 :
os.system("pkg update -y && pkg upgrade -y && pkg install python2 -y && pkg install git -y && pip2 install requests && cd && git clone https://github.com/kancotdiq/ipcs")
clear()
print("Запустить:")
start = "cd && cd ipcs && python2 scan.py"
print(start)
if a==12 :
os.system("apt update && apt install python -y && apt install git -y && cd && git clone https://github.com/maldevel/IPGeoLocation && cd IPGeoLocation && pip install -r requirements.txt")
clear()
print("Запустить:")
start = "cd && cd IPGeolocation && python ipgeolocation.py -t (айпи)"
print(start)
if a==13 :
os.system("apt update && apt install git -y && apt install python -y && apt install php -y && apt install openssh -y && cd && git clone https://github.com/TermuxGuide/termux-ngrok && cd termux-ngrok && bash termux-ngrok.sh && cd && git clone https://github.com/termux-lab/kingfish2.0 && cd kingfish2.0 && pip install prettytable")
clear()
print("Запустить:")
start = "cd && cd kingfish2.0 && python fsh.py"
print(start)
print("")
print("Во 2 сессии")
print("ngrok http 8080")
if a==14 :
os.system("apt update && apt install git -y && apt install python -y && apt install php -y && apt install openssh -y && cd && git clone https://github.com/TermuxGuide/termux-ngrok && cd termux-ngrok && bash termux-ngrok.sh && cd && git clone https://github.com/termux-lab/kingfish3 && cd kingfish3.0 && pip install prettytable && pip install colorama")
clear()
print("Запустить:")
start = "cd && cd kingfish3 && python fsh.py"
print(start)
print("")
print("Во 2 сессии")
print("ngrok http 8080")
if a==15 :
os.system("pkg update -y && pkg upgrade -y && pkg install git -y && git clone http://github.com/rajkumardusad/MyServer && cd MyServer && chmod +x install && ./install && pkg install openssh -y")
clear()
print("Запустить:")
start = "myserver start"
print(start)
print("")
print("Во 2 сессии")
print("ssh -R 80:localhost:8080 ssh.localhost.run -l yourdomain")
if a==16 :
os.system("apt update -y && apt upgrade -y && apt install python git -y && pip install requests && cd && git clone https://github.com/1tayH/noisy.git")
clear()
print("Запустить:")
start = "cd && cd noisy && python noisy.py --config config.json"
print(start)
if a==17 :
os.system("apt update && apt install git -y && apt install python -y && apt install python2 && cd && git clone https://github.com/Hydra7/Planetwork-DDOS")
clear()
print("Запустить:")
print("cd && cd Planetwork-DDOS && python2 pntddos.py [ip роутера] [порт] 999999")
if a==18 :
os.system("apt update && apt install git -y && apt install php -y && apt install python -y && apt install libgnutls -y && pip install SimpleQIWI && pip install colorama && cd && git clone https://github.com/WannaDeauth/qiwi_api && pkg install openssh -y")
clear()
print("Запустить:")
start = "cd && cd qiwi_api && python qiwi.py"
print(start)
print("")
print("Во 2 сессии")
print("ssh -R 80:localhost:8080 ssh.localhost.run -l QIWI_Api.com")
if a==19 :
os.system("apt update && apt install git -y && apt install python -y && apt install figlet -y && pip install wheel && pip install future && cd && git clone https://github.com/41Team/RoutersploitTermux && cd RoutersploitTermux && bash run.sh")
clear()
print("Запустить:")
print("cd && cd routersploit && python rsf.py && use scanners/autopwn && set target [айпишник] && exploit")
if a==20 :
os.system("apt update && apt install git wget php openssh -y && cd && git clone https://github.com/thelinuxchoice/saycheese")
clear()
print("Запустить:")
start = "cd && cd saycheese && bash saycheese.sh"
print(start)
if a==21 :
os.system("apt update && apt upgrade -y && apt install python -y && apt install git -y && apt install php curl openssh -y && apt install wget -y && cd && git clone https://github.com/thelinuxchoice/shellphish")
clear()
print("Запустить:")
start = "cd && cd shellphish && bash shellphish.sh"
print(start)
if a==22 :
os.system("apt update && apt install git -y && apt install python -y && apt install python2 && apt install php -y && apt install openssh -y && cd && git clone https://github.com/termux-lab/sherlock && cd sherlock && pip install requests")
clear()
print("Запустить:")
start = "cd && cd sherlock && python sherl.py"
print(start)
if a==23 :
os.system("apt update && apt install git -y && apt install python -y && cd && git clone https://github.com/termux-lab/smsham.git && cd smsham && pip install colorama && pip install requests")
clear()
print("Запустить:")
start = "cd && cd smsham && python smsham.py"
print(start)
if a==24 :
os.system("pkg update && pkg upgrade -y && apt install git -y && cd && git clone https://github.com/saydog/termux-fakeroot && cd termux-fakeroot && chmod +x setup && ./setup && cd")
clear()
print("Запустить:")
start = "root su"
print(start)
if a==25 :
os.system("apt update && apt install git -y && apt install python -y && cd && git clone https://github.com/Ublimjo/Termux-login && cd Termux-login && bash setup.sh")
clear()
if a==98 :
exit()
if a==99 :
os.system("cd && rm -rf HelpTermuxHack && git clone https://github.com/KvantPro/HelpTermuxHack")
start = "cd && cd HelpTermuxHack && clear && python KvantProgram.py"
stat = input( Fore.YELLOW + "Запустить: (y/n)")
if stat.lower() == "y":
os.system(start)
os.system("cd")
exit()
| StarcoderdataPython |
3392442 | # Copyright (c) ASAPP Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from omegaconf import II
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import register_model
from fairseq.models.wav2vec.wav2vec2 import TransformerEncoder
from fairseq.modules import SamePad
from einops.layers.torch import Rearrange
from .feat_mlp_wav2vec2 import FeatMLPWav2Vec2Config, MLP, FeatMLPWav2Vec2Model
from ..modules.fp32_batch_norm import FP32BatchNorm1d
@torch.jit.script
def make_pad_mask(lengths: torch.Tensor) -> torch.Tensor:
return torch.arange(0, lengths.max(), device=lengths.device).view(1, -1).expand(lengths.size(0), -1) >= lengths.view(-1, 1)
@dataclass
class SqueezeWav2Vec2Config(FeatMLPWav2Vec2Config):
squeeze_factor: int = field(
default=2, metadata={"help": "downsample the sequece length by this factor in pos_conv and upsample after transformer"}
)
squeeze_method: str = field(
default='default', metadata={"help": "method to squeeze the temporal dimension"}
)
@register_model("squeeze_wav2vec2", dataclass=SqueezeWav2Vec2Config)
class SqueezeWav2Vec2Model(FeatMLPWav2Vec2Model):
def __init__(self, cfg: SqueezeWav2Vec2Config):
super().__init__(cfg)
self.encoder = SqueezeTransformerEncoder(cfg)
class SqueezeTransformerEncoder(TransformerEncoder):
def __init__(self, cfg: SqueezeWav2Vec2Config):
super().__init__(cfg)
self.cfg = cfg
self.pos_conv = self.get_pos_conv(cfg.squeeze_factor)
self.pool = self.get_pool(cfg.squeeze_factor)
self.upsample = self.get_upsample(cfg.squeeze_factor)
def get_pool(self, squeeze_factor: int):
if squeeze_factor == 1:
return nn.Identity()
if self.cfg.squeeze_method in {'default', 'default-v2'}:
pool = nn.AvgPool1d(squeeze_factor, squeeze_factor)
elif self.cfg.squeeze_method in {'multi-layer', 'multi-layer-k4', 'multi-layer-k4-bn'}:
pool = nn.AvgPool1d(3, 2)
else:
raise ValueError(f"squeeze_method={self.cfg.squeeze_method}")
return pool
def get_pos_conv(self, squeeze_factor: int):
if self.cfg.squeeze_method in {'default', 'default-v2'}:
pos_conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=self.cfg.conv_pos,
padding=self.cfg.conv_pos // 2,
groups=self.cfg.conv_pos_groups,
stride=squeeze_factor,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (self.cfg.conv_pos * self.embedding_dim))
nn.init.normal_(pos_conv.weight, mean=0, std=std)
nn.init.constant_(pos_conv.bias, 0)
pos_conv = nn.utils.weight_norm(pos_conv, name="weight", dim=2)
pos_conv = nn.Sequential(pos_conv, SamePad(self.cfg.conv_pos), nn.GELU())
elif self.cfg.squeeze_method in {'multi-layer', 'multi-layer-k4'}:
layers = []
for i in range(int(np.log2(squeeze_factor))):
conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=self.cfg.conv_pos,
padding=self.cfg.conv_pos // 2,
groups=self.cfg.conv_pos_groups,
stride=2,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (self.cfg.conv_pos * self.embedding_dim))
nn.init.normal_(conv.weight, mean=0, std=std)
nn.init.constant_(conv.bias, 0)
conv = nn.utils.weight_norm(conv, name="weight", dim=2)
layers += [nn.Sequential(conv, nn.GELU())]
pos_conv = nn.ModuleList(layers)
elif self.cfg.squeeze_method in {'multi-layer-k4-bn'}:
layers = []
for i in range(int(np.log2(squeeze_factor))):
conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=self.cfg.conv_pos,
padding=self.cfg.conv_pos // 2,
groups=self.cfg.conv_pos_groups,
stride=2,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (self.cfg.conv_pos * self.embedding_dim))
nn.init.normal_(conv.weight, mean=0, std=std)
nn.init.constant_(conv.bias, 0)
conv = nn.utils.weight_norm(conv, name="weight", dim=2)
layers += [nn.Sequential(conv, FP32BatchNorm1d(self.embedding_dim), nn.GELU())]
pos_conv = nn.ModuleList(layers)
else:
raise ValueError(f"squeeze_method={self.cfg.squeeze_method}")
return pos_conv
def get_upsample(self, squeeze_factor: int):
if self.cfg.squeeze_method == 'default':
layers = [
nn.Linear(self.embedding_dim, self.embedding_dim * squeeze_factor),
nn.GELU(),
Rearrange('b t (s c) -> b (t s) c', s=squeeze_factor, c=self.embedding_dim),
]
upsample = nn.Sequential(*layers)
elif self.cfg.squeeze_method == 'default-v2':
layers = []
for _ in range(int(np.log2(squeeze_factor))):
layers += [
nn.Linear(self.embedding_dim, self.embedding_dim * 2),
nn.GELU(),
Rearrange('b t (s c) -> b (t s) c', s=2, c=self.embedding_dim),
]
upsample = nn.Sequential(*layers)
elif self.cfg.squeeze_method == 'multi-layer':
upsample = [Rearrange('b t c -> b c t')]
for i in range(int(np.log2(squeeze_factor))):
upsample += [
nn.ConvTranspose1d(self.embedding_dim, self.embedding_dim, 2, 2, 0, bias=False),
nn.GELU()
]
upsample.append(Rearrange('b c t -> b t c'))
upsample = nn.Sequential(*upsample)
elif self.cfg.squeeze_method == 'multi-layer-k4':
upsample = [Rearrange('b t c -> b c t')]
for i in range(int(np.log2(squeeze_factor))):
upsample += [
nn.ConvTranspose1d(self.embedding_dim, self.embedding_dim, 4, 2, 1, bias=False),
nn.GELU(),
]
upsample.append(Rearrange('b c t -> b t c'))
upsample = nn.Sequential(*upsample)
elif self.cfg.squeeze_method == 'multi-layer-k4-bn':
upsample = [Rearrange('b t c -> b c t')]
for i in range(int(np.log2(squeeze_factor))):
upsample += [
nn.ConvTranspose1d(self.embedding_dim, self.embedding_dim, 4, 2, 1, bias=False),
FP32BatchNorm1d(self.embedding_dim),
nn.GELU(),
]
upsample.append(Rearrange('b c t -> b t c'))
upsample = nn.Sequential(*upsample)
else:
raise ValueError(f"squeeze_method={self.cfg.squeeze_method}")
for m in upsample.modules():
if isinstance(m, (nn.ConvTranspose1d, nn.Linear)):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.zeros_(m.bias)
return upsample
def forward(self, x, padding_mask=None):
x = self.extract_features(x, padding_mask)
if self.layer_norm_first and self.upsample is None:
x = self.layer_norm(x)
return x
def extract_features(self, x, padding_mask=None):
if padding_mask is not None:
x[padding_mask] = 0
T = x.shape[1]
x = x.transpose(1, 2) # B, T, C to B, C, T
if isinstance(self.pos_conv, nn.Sequential):
x_conv = self.pos_conv(x)
x_pool = self.pool(x)
min_length = min(x_conv.size(-1), x_pool.size(-1))
x = (x_pool[...,:min_length] + x_conv[...,:min_length])
elif isinstance(self.pos_conv, nn.ModuleList):
for conv in self.pos_conv:
x_conv = conv(x)
x_pool = self.pool(x)
min_length = min(x_conv.size(-1), x_pool.size(-1))
x = (x_pool[...,:min_length] + x_conv[...,:min_length])
else:
raise NotImplementedError
x = x.transpose(1, 2)
# adjust the padding_mask
if padding_mask is not None:
input_lengths = (1 - padding_mask.long()).sum(-1)
# apply conv formula to get real output_lengths
output_lengths = input_lengths // self.cfg.squeeze_factor
output_lengths += x.size(1) - output_lengths.max().item()
padding_mask = make_pad_mask(output_lengths).to(x.device) # 1 at padding
if not self.layer_norm_first:
x = self.layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
layer_results = []
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, z = layer(x, self_attn_padding_mask=padding_mask, need_weights=False)
layer_results.append(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.upsample is not None:
if self.layer_norm_first:
x = self.layer_norm(x)
x = self.upsample(x)
if x.size(1) < T:
x = F.pad(x, (0, 0, 0, T - x.size(1)))
return x
| StarcoderdataPython |
3288806 | import copy
import math
from fractions import Fraction
from typing import *
from util import lcm_many, compare_trees, Tree, SMTUtilOption, get_tag, get_coefs_fraction, convert_int2list, \
find_subtrees_by_tags, get_lets
def look_into_floor(to_int: Tree, vars: Dict[str, Tuple[int, int]], lets: Dict[str, Any]) -> Tuple[
Tree, List[int], int]:
def get_bounds(coefs: Dict[str, Fraction], const: Fraction) -> Tuple[Fraction, Fraction]:
val_min = Fraction(0)
val_max = Fraction(0)
assert set(vars.keys()) >= set(coefs.keys())
for k in coefs.keys():
if coefs[k] >= 0:
val_min += coefs[k] * vars[k][0]
val_max += coefs[k] * vars[k][1]
else:
val_min += coefs[k] * vars[k][1]
val_max += coefs[k] * vars[k][0]
val_min += const
val_max += const
return val_min, val_max
assert get_tag(to_int[0]) in ["to_int", "floor"]
content_to_int = to_int[1]
coefs, const = get_coefs_fraction(content_to_int, lets)
assert set(coefs.keys()) <= set(vars.keys())
# TODO
content_min_q, content_max_q = get_bounds(coefs, const)
# content_min = int(math.ceil(content_min_q))
content_min = int(math.floor(content_min_q))
content_max = int(math.floor(content_max_q))
values_to_int = list(range(content_min, content_max + 1))
den = lcm_many([q.denominator for q in coefs.values()] + [const.denominator])
num_coefs = {k: int(v * den) for k, v in coefs.items()}
num_const = int(const * den)
expression = ["+"] + [["*", k, convert_int2list(v)] for k, v in num_coefs.items()] + [num_const] # type: ignore
return expression, values_to_int, den
def reduce_floor_strategy1(t: Tree, to_int: Tree, parents: List[Tuple[Tree, int]], vars: Dict[str, Tuple[int, int]],
lets: Dict[str, Any]) -> Tree:
expression, values_to_int, den = look_into_floor(to_int, vars, lets)
segments: List[Any] = []
for v in values_to_int:
cond: List[Any] = []
left = convert_int2list(v * den)
right = convert_int2list((v + 1) * den - 1)
cond.append(["<=", left, expression])
cond.append(["<=", expression, right])
for parent, id in parents:
parent[id] = convert_int2list(v)
seg = ["and"] + cond + [t]
segments.append(copy.deepcopy(seg))
return ["or"] + segments
def is_power_of_two(n):
while(True):
if n % 2 == 0:
n = n // 2
else:
return n == 1
assert False
def reduce_floor_strategy2(t: Tree, to_int: Tree, parents: List[Tuple[Tree, int]], vars: Dict[str, Tuple[int, int]],
lets: Dict[str, Any], counter: int, m: int) -> Tuple[Tree, Optional[Tuple[str, Tree]], bool]:
# check the application condition
expression, _, den = look_into_floor(to_int, vars, lets)
for parent, id in parents:
if parent[0] == "*":
if type(parent[1]) is int or (type(parent[1]) is str and parent[1].lstrip("+-").isnumeric()):
if int(parent[1]) % den == 0:
if den < m / 2:
if is_power_of_two(den):
pass
else:
return t, None, False
else:
return t, None, False
else:
return t, None, False
else:
return t, None, False
else:
return t, None, False
varname = f"var_strategy2_{counter}"
replacing = ["-", expression, ["mod", expression, den]]
assert den % m != 0
for parent, id in parents:
parent[1] = int(parent[1]) // den
parent[2] = varname
return t, (varname, replacing), True
def reduce_float_from_tree(t: Tree, vars: Dict[str, Tuple[int, int]], m: int, opt: SMTUtilOption,
force_strategy: Optional[str] = None) -> Any:
def get_to_int_all(tree: Any, parent: Any = None, id: int = -1) -> List[Tuple[Any, Any, int]]:
return find_subtrees_by_tags(["to_int", "floor"], tree, parent, id)
counter = 0
t = copy.deepcopy(t)
replacements_floor: Dict[str, Tree] = {}
while True:
to_ints = get_to_int_all(t)
if not to_ints:
break
to_ints_to_parents: List[Tuple[Tree, List[Tuple[Tree, int]]]] = []
for to_int, parent, id in to_ints:
added = False
for k, v in to_ints_to_parents:
if compare_trees(k, to_int):
v.append((parent, id))
added = True
break
if not added:
to_ints_to_parents.append((to_int, [(parent, id)]))
lets = get_lets(t)
to_int, parents = to_ints_to_parents[0]
# force_strategy = "strategy1" # strategy 2 has a bug!
if force_strategy is None:
force_strategy = opt.floor_reduce
if force_strategy == "strategy1":
t = reduce_floor_strategy1(t, to_int, parents, vars, lets)
elif force_strategy == "strategy2":
t, replacement, res_flag = reduce_floor_strategy2(t, to_int, parents, vars, lets, counter, m)
counter += 1
if res_flag == False:
t = reduce_floor_strategy1(t, to_int, parents, vars, lets)
if replacement is not None:
replacements_floor[replacement[0]] = replacement[1]
else:
assert False
return t, replacements_floor
def process_le_ineq(x_c: Dict[str, int], b: int, m: int, f: Callable[[Dict[str, int], int, int], Any]) -> Any:
flipped: List[str] = []
b_added = 0
# flip
for k in x_c.keys():
if x_c[k] < 0:
x_c[k] *= -1
flipped.append(k)
b_added += (m - 1) * x_c[k]
b += (m - 1) * x_c[k]
# print("flipped", x_c, b)
if b < 0:
return "false"
# reduce zeors
x_c = {k: v for k, v in x_c.items() if v != 0}
# calc
boxed = f(x_c, b, m)
# print("calc", boxed)
# flip again
def help(tree: Any, parent: Any = None, id: int = -1) -> None:
if type(tree) is str and tree in flipped:
if parent is None:
assert False
else:
parent[id] = ["-", m - 1, tree] # type: ignore
elif type(tree) is list:
for i in range(len(tree)):
help(tree[i], tree, i)
help(boxed)
return boxed
def process_ineq(t: Tree, m: int, f: Callable[[Dict[str, int], int, int], Any]) -> Any:
l_coefs1, l_const1 = get_coefs_fraction(t[1], {})
r_coefs1, r_const1 = get_coefs_fraction(t[2], {})
l_coefs = {k: v.numerator for k, v in l_coefs1.items()}
r_coefs = {k: v.numerator for k, v in r_coefs1.items()}
l_const = l_const1.numerator
r_const = r_const1.numerator
vars = set(list(l_coefs.keys()) + list(r_coefs.keys()))
coefs: Dict[str, int] = {}
tag = get_tag(t[0])
if tag == "<=":
for k in vars:
coefs[k] = l_coefs.get(k, 0) - r_coefs.get(k, 0)
const = r_const - l_const
# print(l_coefs, l_const, r_coefs, r_const)
# print(coefs, const)
t = f(coefs, const, m)
elif tag == ">=":
for k in vars:
coefs[k] = r_coefs.get(k, 0) - l_coefs.get(k, 0)
const = l_const - r_const
t = f(coefs, const, m)
pass
elif tag == "<":
for k in vars:
coefs[k] = l_coefs.get(k, 0) - r_coefs.get(k, 0)
const = r_const - l_const - 1
t = f(coefs, const, m)
elif tag == ">":
for k in vars:
coefs[k] = r_coefs.get(k, 0) - l_coefs.get(k, 0)
const = l_const - r_const - 1
t = f(coefs, const, m)
else:
assert False
return t
def process_ineqs_in_tree(t: Tree, m: int, f: Callable[[Tree, int], Any]) -> Any:
def help(tree: Tree) -> Any:
if type(tree) is list:
if type(tree[0]) is str:
if get_tag(tree[0]) in ["<=", ">=", "<", ">"]:
return f(tree, m)
return [help(i) for i in tree]
else:
return tree
return help(t)
if __name__ == "__main__":
pass | StarcoderdataPython |
345737 | def search(blocking, requester, task, keyword, tty_mode):
# the result of the task the hub thread submitted to us
# will not be available right now
task.set_async()
blocking.search_image(requester, task.return_result, keyword, tty_mode)
| StarcoderdataPython |
8152376 | <filename>falpr/setup.py
from setuptools import setup
VERSION = '0.0.1'
with open('requirements.txt') as f:
install_requires = f.readlines()
setup(
name='falpr',
version=VERSION,
description='Fully Automated Licence Plate Recognizer',
long_description='Magnificent app which recognizes chars in photo of licence plates',
author='MB',
url='https://github.com/matbur95/cypsio-pro',
license='MIT',
packages=['falpr'],
install_requires=install_requires,
)
| StarcoderdataPython |
6647456 | <filename>viz3d/opengl/primitives/sphere.py
from typing import Tuple
from io import StringIO
import numpy as np
__vertex_data = """
0.000000 0.000000 -1.000000 0.102381 -0.315090 -0.943523
0.425323 -0.309011 -0.850654 0.102381 -0.315090 -0.943523
-0.162456 -0.499995 -0.850654 0.102381 -0.315090 -0.943523
0.723607 -0.525725 -0.447220 0.700224 -0.268032 -0.661699
0.425323 -0.309011 -0.850654 0.700224 -0.268032 -0.661699
0.850648 0.000000 -0.525736 0.700224 -0.268032 -0.661699
0.000000 0.000000 -1.000000 -0.268034 -0.194736 -0.943523
-0.162456 -0.499995 -0.850654 -0.268034 -0.194736 -0.943523
-0.525730 0.000000 -0.850652 -0.268034 -0.194736 -0.943523
0.000000 0.000000 -1.000000 -0.268034 0.194737 -0.943523
-0.525730 0.000000 -0.850652 -0.268034 0.194737 -0.943523
-0.162456 0.499995 -0.850654 -0.268034 0.194737 -0.943523
0.000000 0.000000 -1.000000 0.102381 0.315090 -0.943523
-0.162456 0.499995 -0.850654 0.102381 0.315090 -0.943523
0.425323 0.309011 -0.850654 0.102381 0.315090 -0.943523
0.723607 -0.525725 -0.447220 0.904989 -0.268032 -0.330385
0.850648 0.000000 -0.525736 0.904989 -0.268032 -0.330385
0.951058 -0.309013 0.000000 0.904989 -0.268032 -0.330385
-0.276388 -0.850649 -0.447220 0.024747 -0.943521 -0.330386
0.262869 -0.809012 -0.525738 0.024747 -0.943521 -0.330386
0.000000 -1.000000 0.000000 0.024747 -0.943521 -0.330386
-0.894426 0.000000 -0.447216 -0.889697 -0.315095 -0.330385
-0.688189 -0.499997 -0.525736 -0.889697 -0.315095 -0.330385
-0.951058 -0.309013 0.000000 -0.889697 -0.315095 -0.330385
-0.276388 0.850649 -0.447220 -0.574602 0.748784 -0.330388
-0.688189 0.499997 -0.525736 -0.574602 0.748784 -0.330388
-0.587786 0.809017 0.000000 -0.574602 0.748784 -0.330388
0.723607 0.525725 -0.447220 0.534576 0.777865 -0.330387
0.262869 0.809012 -0.525738 0.534576 0.777865 -0.330387
0.587786 0.809017 0.000000 0.534576 0.777865 -0.330387
0.723607 -0.525725 -0.447220 0.802609 -0.583126 -0.125627
0.951058 -0.309013 0.000000 0.802609 -0.583126 -0.125627
0.587786 -0.809017 0.000000 0.802609 -0.583126 -0.125627
-0.276388 -0.850649 -0.447220 -0.306569 -0.943522 -0.125629
0.000000 -1.000000 0.000000 -0.306569 -0.943522 -0.125629
-0.587786 -0.809017 0.000000 -0.306569 -0.943522 -0.125629
-0.894426 0.000000 -0.447216 -0.992077 -0.000000 -0.125628
-0.951058 -0.309013 0.000000 -0.992077 -0.000000 -0.125628
-0.951058 0.309013 0.000000 -0.992077 -0.000000 -0.125628
-0.276388 0.850649 -0.447220 -0.306569 0.943522 -0.125629
-0.587786 0.809017 0.000000 -0.306569 0.943522 -0.125629
0.000000 1.000000 0.000000 -0.306569 0.943522 -0.125629
0.723607 0.525725 -0.447220 0.802609 0.583126 -0.125627
0.587786 0.809017 0.000000 0.802609 0.583126 -0.125627
0.951058 0.309013 0.000000 0.802609 0.583126 -0.125627
0.276388 -0.850649 0.447220 0.408946 -0.628425 0.661698
0.688189 -0.499997 0.525736 0.408946 -0.628425 0.661698
0.162456 -0.499995 0.850654 0.408946 -0.628425 0.661698
-0.723607 -0.525725 0.447220 -0.471300 -0.583122 0.661699
-0.262869 -0.809012 0.525738 -0.471300 -0.583122 0.661699
-0.425323 -0.309011 0.850654 -0.471300 -0.583122 0.661699
-0.723607 0.525725 0.447220 -0.700224 0.268032 0.661699
-0.850648 0.000000 0.525736 -0.700224 0.268032 0.661699
-0.425323 0.309011 0.850654 -0.700224 0.268032 0.661699
0.276388 0.850649 0.447220 0.038530 0.748779 0.661699
-0.262869 0.809012 0.525738 0.038530 0.748779 0.661699
0.162456 0.499995 0.850654 0.038530 0.748779 0.661699
0.894426 0.000000 0.447216 0.724042 0.194736 0.661695
0.688189 0.499997 0.525736 0.724042 0.194736 0.661695
0.525730 0.000000 0.850652 0.724042 0.194736 0.661695
0.525730 0.000000 0.850652 0.268034 0.194737 0.943523
0.162456 0.499995 0.850654 0.268034 0.194737 0.943523
0.000000 0.000000 1.000000 0.268034 0.194737 0.943523
0.525730 0.000000 0.850652 0.491119 0.356821 0.794657
0.688189 0.499997 0.525736 0.491119 0.356821 0.794657
0.162456 0.499995 0.850654 0.491119 0.356821 0.794657
0.688189 0.499997 0.525736 0.408946 0.628425 0.661699
0.276388 0.850649 0.447220 0.408946 0.628425 0.661699
0.162456 0.499995 0.850654 0.408946 0.628425 0.661699
0.162456 0.499995 0.850654 -0.102381 0.315090 0.943523
-0.425323 0.309011 0.850654 -0.102381 0.315090 0.943523
0.000000 0.000000 1.000000 -0.102381 0.315090 0.943523
0.162456 0.499995 0.850654 -0.187594 0.577345 0.794658
-0.262869 0.809012 0.525738 -0.187594 0.577345 0.794658
-0.425323 0.309011 0.850654 -0.187594 0.577345 0.794658
-0.262869 0.809012 0.525738 -0.471300 0.583122 0.661699
-0.723607 0.525725 0.447220 -0.471300 0.583122 0.661699
-0.425323 0.309011 0.850654 -0.471300 0.583122 0.661699
-0.425323 0.309011 0.850654 -0.331305 0.000000 0.943524
-0.425323 -0.309011 0.850654 -0.331305 0.000000 0.943524
0.000000 0.000000 1.000000 -0.331305 0.000000 0.943524
-0.425323 0.309011 0.850654 -0.607060 0.000000 0.794656
-0.850648 0.000000 0.525736 -0.607060 0.000000 0.794656
-0.425323 -0.309011 0.850654 -0.607060 0.000000 0.794656
-0.850648 0.000000 0.525736 -0.700224 -0.268032 0.661699
-0.723607 -0.525725 0.447220 -0.700224 -0.268032 0.661699
-0.425323 -0.309011 0.850654 -0.700224 -0.268032 0.661699
-0.425323 -0.309011 0.850654 -0.102381 -0.315090 0.943523
0.162456 -0.499995 0.850654 -0.102381 -0.315090 0.943523
0.000000 0.000000 1.000000 -0.102381 -0.315090 0.943523
-0.425323 -0.309011 0.850654 -0.187594 -0.577345 0.794658
-0.262869 -0.809012 0.525738 -0.187594 -0.577345 0.794658
0.162456 -0.499995 0.850654 -0.187594 -0.577345 0.794658
-0.262869 -0.809012 0.525738 0.038530 -0.748779 0.661699
0.276388 -0.850649 0.447220 0.038530 -0.748779 0.661699
0.162456 -0.499995 0.850654 0.038530 -0.748779 0.661699
0.162456 -0.499995 0.850654 0.268034 -0.194737 0.943523
0.525730 0.000000 0.850652 0.268034 -0.194737 0.943523
0.000000 0.000000 1.000000 0.268034 -0.194737 0.943523
0.162456 -0.499995 0.850654 0.491119 -0.356821 0.794657
0.688189 -0.499997 0.525736 0.491119 -0.356821 0.794657
0.525730 0.000000 0.850652 0.491119 -0.356821 0.794657
0.688189 -0.499997 0.525736 0.724042 -0.194736 0.661695
0.894426 0.000000 0.447216 0.724042 -0.194736 0.661695
0.525730 0.000000 0.850652 0.724042 -0.194736 0.661695
0.951058 0.309013 0.000000 0.889697 0.315095 0.330385
0.688189 0.499997 0.525736 0.889697 0.315095 0.330385
0.894426 0.000000 0.447216 0.889697 0.315095 0.330385
0.951058 0.309013 0.000000 0.794656 0.577348 0.187595
0.587786 0.809017 0.000000 0.794656 0.577348 0.187595
0.688189 0.499997 0.525736 0.794656 0.577348 0.187595
0.587786 0.809017 0.000000 0.574602 0.748784 0.330388
0.276388 0.850649 0.447220 0.574602 0.748784 0.330388
0.688189 0.499997 0.525736 0.574602 0.748784 0.330388
0.000000 1.000000 0.000000 -0.024747 0.943521 0.330386
-0.262869 0.809012 0.525738 -0.024747 0.943521 0.330386
0.276388 0.850649 0.447220 -0.024747 0.943521 0.330386
0.000000 1.000000 0.000000 -0.303531 0.934171 0.187597
-0.587786 0.809017 0.000000 -0.303531 0.934171 0.187597
-0.262869 0.809012 0.525738 -0.303531 0.934171 0.187597
-0.587786 0.809017 0.000000 -0.534576 0.777865 0.330387
-0.723607 0.525725 0.447220 -0.534576 0.777865 0.330387
-0.262869 0.809012 0.525738 -0.534576 0.777865 0.330387
-0.951058 0.309013 0.000000 -0.904989 0.268032 0.330385
-0.850648 0.000000 0.525736 -0.904989 0.268032 0.330385
-0.723607 0.525725 0.447220 -0.904989 0.268032 0.330385
-0.951058 0.309013 0.000000 -0.982246 0.000000 0.187599
-0.951058 -0.309013 0.000000 -0.982246 0.000000 0.187599
-0.850648 0.000000 0.525736 -0.982246 0.000000 0.187599
-0.951058 -0.309013 0.000000 -0.904989 -0.268031 0.330385
-0.723607 -0.525725 0.447220 -0.904989 -0.268031 0.330385
-0.850648 0.000000 0.525736 -0.904989 -0.268031 0.330385
-0.587786 -0.809017 0.000000 -0.534576 -0.777865 0.330387
-0.262869 -0.809012 0.525738 -0.534576 -0.777865 0.330387
-0.723607 -0.525725 0.447220 -0.534576 -0.777865 0.330387
-0.587786 -0.809017 0.000000 -0.303531 -0.934171 0.187597
0.000000 -1.000000 0.000000 -0.303531 -0.934171 0.187597
-0.262869 -0.809012 0.525738 -0.303531 -0.934171 0.187597
0.000000 -1.000000 0.000000 -0.024747 -0.943521 0.330386
0.276388 -0.850649 0.447220 -0.024747 -0.943521 0.330386
-0.262869 -0.809012 0.525738 -0.024747 -0.943521 0.330386
0.587786 -0.809017 0.000000 0.574602 -0.748784 0.330388
0.688189 -0.499997 0.525736 0.574602 -0.748784 0.330388
0.276388 -0.850649 0.447220 0.574602 -0.748784 0.330388
0.587786 -0.809017 0.000000 0.794656 -0.577348 0.187595
0.951058 -0.309013 0.000000 0.794656 -0.577348 0.187595
0.688189 -0.499997 0.525736 0.794656 -0.577348 0.187595
0.951058 -0.309013 0.000000 0.889697 -0.315095 0.330385
0.894426 0.000000 0.447216 0.889697 -0.315095 0.330385
0.688189 -0.499997 0.525736 0.889697 -0.315095 0.330385
0.587786 0.809017 0.000000 0.306569 0.943522 0.125629
0.000000 1.000000 0.000000 0.306569 0.943522 0.125629
0.276388 0.850649 0.447220 0.306569 0.943522 0.125629
0.587786 0.809017 0.000000 0.303531 0.934171 -0.187597
0.262869 0.809012 -0.525738 0.303531 0.934171 -0.187597
0.000000 1.000000 0.000000 0.303531 0.934171 -0.187597
0.262869 0.809012 -0.525738 0.024747 0.943521 -0.330386
-0.276388 0.850649 -0.447220 0.024747 0.943521 -0.330386
0.000000 1.000000 0.000000 0.024747 0.943521 -0.330386
-0.587786 0.809017 0.000000 -0.802609 0.583126 0.125627
-0.951058 0.309013 0.000000 -0.802609 0.583126 0.125627
-0.723607 0.525725 0.447220 -0.802609 0.583126 0.125627
-0.587786 0.809017 0.000000 -0.794656 0.577348 -0.187595
-0.688189 0.499997 -0.525736 -0.794656 0.577348 -0.187595
-0.951058 0.309013 0.000000 -0.794656 0.577348 -0.187595
-0.688189 0.499997 -0.525736 -0.889697 0.315095 -0.330385
-0.894426 0.000000 -0.447216 -0.889697 0.315095 -0.330385
-0.951058 0.309013 0.000000 -0.889697 0.315095 -0.330385
-0.951058 -0.309013 0.000000 -0.802609 -0.583126 0.125627
-0.587786 -0.809017 0.000000 -0.802609 -0.583126 0.125627
-0.723607 -0.525725 0.447220 -0.802609 -0.583126 0.125627
-0.951058 -0.309013 0.000000 -0.794656 -0.577348 -0.187595
-0.688189 -0.499997 -0.525736 -0.794656 -0.577348 -0.187595
-0.587786 -0.809017 0.000000 -0.794656 -0.577348 -0.187595
-0.688189 -0.499997 -0.525736 -0.574602 -0.748784 -0.330388
-0.276388 -0.850649 -0.447220 -0.574602 -0.748784 -0.330388
-0.587786 -0.809017 0.000000 -0.574602 -0.748784 -0.330388
0.000000 -1.000000 0.000000 0.306569 -0.943522 0.125629
0.587786 -0.809017 0.000000 0.306569 -0.943522 0.125629
0.276388 -0.850649 0.447220 0.306569 -0.943522 0.125629
0.000000 -1.000000 0.000000 0.303531 -0.934171 -0.187597
0.262869 -0.809012 -0.525738 0.303531 -0.934171 -0.187597
0.587786 -0.809017 0.000000 0.303531 -0.934171 -0.187597
0.262869 -0.809012 -0.525738 0.534576 -0.777865 -0.330387
0.723607 -0.525725 -0.447220 0.534576 -0.777865 -0.330387
0.587786 -0.809017 0.000000 0.534576 -0.777865 -0.330387
0.951058 -0.309013 0.000000 0.992077 0.000000 0.125628
0.951058 0.309013 0.000000 0.992077 0.000000 0.125628
0.894426 0.000000 0.447216 0.992077 0.000000 0.125628
0.951058 -0.309013 0.000000 0.982246 0.000000 -0.187599
0.850648 0.000000 -0.525736 0.982246 0.000000 -0.187599
0.951058 0.309013 0.000000 0.982246 0.000000 -0.187599
0.850648 0.000000 -0.525736 0.904989 0.268031 -0.330385
0.723607 0.525725 -0.447220 0.904989 0.268031 -0.330385
0.951058 0.309013 0.000000 0.904989 0.268031 -0.330385
0.425323 0.309011 -0.850654 0.471300 0.583122 -0.661699
0.262869 0.809012 -0.525738 0.471300 0.583122 -0.661699
0.723607 0.525725 -0.447220 0.471300 0.583122 -0.661699
0.425323 0.309011 -0.850654 0.187594 0.577345 -0.794658
-0.162456 0.499995 -0.850654 0.187594 0.577345 -0.794658
0.262869 0.809012 -0.525738 0.187594 0.577345 -0.794658
-0.162456 0.499995 -0.850654 -0.038530 0.748779 -0.661699
-0.276388 0.850649 -0.447220 -0.038530 0.748779 -0.661699
0.262869 0.809012 -0.525738 -0.038530 0.748779 -0.661699
-0.162456 0.499995 -0.850654 -0.408946 0.628425 -0.661698
-0.688189 0.499997 -0.525736 -0.408946 0.628425 -0.661698
-0.276388 0.850649 -0.447220 -0.408946 0.628425 -0.661698
-0.162456 0.499995 -0.850654 -0.491119 0.356821 -0.794657
-0.525730 0.000000 -0.850652 -0.491119 0.356821 -0.794657
-0.688189 0.499997 -0.525736 -0.491119 0.356821 -0.794657
-0.525730 0.000000 -0.850652 -0.724042 0.194736 -0.661695
-0.894426 0.000000 -0.447216 -0.724042 0.194736 -0.661695
-0.688189 0.499997 -0.525736 -0.724042 0.194736 -0.661695
-0.525730 0.000000 -0.850652 -0.724042 -0.194736 -0.661695
-0.688189 -0.499997 -0.525736 -0.724042 -0.194736 -0.661695
-0.894426 0.000000 -0.447216 -0.724042 -0.194736 -0.661695
-0.525730 0.000000 -0.850652 -0.491119 -0.356821 -0.794657
-0.162456 -0.499995 -0.850654 -0.491119 -0.356821 -0.794657
-0.688189 -0.499997 -0.525736 -0.491119 -0.356821 -0.794657
-0.162456 -0.499995 -0.850654 -0.408946 -0.628425 -0.661698
-0.276388 -0.850649 -0.447220 -0.408946 -0.628425 -0.661698
-0.688189 -0.499997 -0.525736 -0.408946 -0.628425 -0.661698
0.850648 0.000000 -0.525736 0.700224 0.268032 -0.661699
0.425323 0.309011 -0.850654 0.700224 0.268032 -0.661699
0.723607 0.525725 -0.447220 0.700224 0.268032 -0.661699
0.850648 0.000000 -0.525736 0.607060 0.000000 -0.794656
0.425323 -0.309011 -0.850654 0.607060 0.000000 -0.794656
0.425323 0.309011 -0.850654 0.607060 0.000000 -0.794656
0.425323 -0.309011 -0.850654 0.331305 0.000000 -0.943524
0.000000 0.000000 -1.000000 0.331305 0.000000 -0.943524
0.425323 0.309011 -0.850654 0.331305 0.000000 -0.943524
-0.162456 -0.499995 -0.850654 -0.038530 -0.748779 -0.661699
0.262869 -0.809012 -0.525738 -0.038530 -0.748779 -0.661699
-0.276388 -0.850649 -0.447220 -0.038530 -0.748779 -0.661699
-0.162456 -0.499995 -0.850654 0.187594 -0.577345 -0.794658
0.425323 -0.309011 -0.850654 0.187594 -0.577345 -0.794658
0.262869 -0.809012 -0.525738 0.187594 -0.577345 -0.794658
0.425323 -0.309011 -0.850654 0.471300 -0.583122 -0.661699
0.723607 -0.525725 -0.447220 0.471300 -0.583122 -0.661699
0.262869 -0.809012 -0.525738 0.471300 -0.583122 -0.661699
"""
__element_indices = """
0 1 2
3 4 5
6 7 8
9 10 11
12 13 14
15 16 17
18 19 20
21 22 23
24 25 26
27 28 29
30 31 32
33 34 35
36 37 38
39 40 41
42 43 44
45 46 47
48 49 50
51 52 53
54 55 56
57 58 59
60 61 62
63 64 65
66 67 68
69 70 71
72 73 74
75 76 77
78 79 80
81 82 83
84 85 86
87 88 89
90 91 92
93 94 95
96 97 98
99 100 101
102 103 104
105 106 107
108 109 110
111 112 113
114 115 116
117 118 119
120 121 122
123 124 125
126 127 128
129 130 131
132 133 134
135 136 137
138 139 140
141 142 143
144 145 146
147 148 149
150 151 152
153 154 155
156 157 158
159 160 161
162 163 164
165 166 167
168 169 170
171 172 173
174 175 176
177 178 179
180 181 182
183 184 185
186 187 188
189 190 191
192 193 194
195 196 197
198 199 200
201 202 203
204 205 206
207 208 209
210 211 212
213 214 215
216 217 218
219 220 221
222 223 224
225 226 227
228 229 230
231 232 233
234 235 236
237 238 239
"""
def sphere_model_data() -> Tuple[np.ndarray, np.ndarray]:
"""Returns the vertex data, and element array of a low poly sphere (80 triangles)"""
vertex_and_normals = np.loadtxt(StringIO(__vertex_data), delimiter=" ", dtype=np.float32)
element_indices = np.loadtxt(StringIO(__element_indices), delimiter=' ', dtype=np.int32)
return vertex_and_normals, element_indices
| StarcoderdataPython |
8085823 | <gh_stars>0
# -*- coding: utf-8 -*-
from datetime import date, datetime, timedelta
import time as t
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
#Load Tushare
from rqalpha.apis.api_base import history_bars, get_position
from rqalpha.mod.rqalpha_mod_sys_accounts.api.api_stock import order_target_value, order_value
import Utils.configuration_file_service as config_service
import tushare as ts
token = config_service.getProperty(section_name=config_service.TOKEN_SECTION_NAME,
property_name=config_service.TS_TOKEN_NAME)
pro = ts.pro_api(token)
# 主营业务构成
df = pro.balancesheet_vip(ts_code='689009.SH')
print(df)
# Export the df to excel
# df.to_excel(r'C:\Users\Austin\Desktop\Tushare\fina_mainbz_vip.xlsx', index = False)
| StarcoderdataPython |
6471426 | from bxcommon.models.serializable_flag import SerializableFlag
class QuotaType(SerializableFlag):
FREE_DAILY_QUOTA = 1
PAID_DAILY_QUOTA = 2
def __str__(self):
return str(self.name).lower()[:4]
| StarcoderdataPython |
9668962 | <reponame>Hamel007/oms_cms<gh_stars>10-100
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class UtilsConfig(AppConfig):
name = 'oms_cms.backend.utils'
verbose_name = _('Настройки')
| StarcoderdataPython |
6571088 | """
Options for managing Confab.
"""
from os import getcwd
from os.path import basename
from fabric.api import env, task
from fabric.utils import _AttributeDict
from difflib import unified_diff
from magic import Magic
from re import match
def _should_render(mime_type):
"""
Return whether a template file of with a particular mime type
should be rendered.
Some files may need to be excluded from template rendering;
such files will be copied verbatim.
"""
return next((True for pattern in ['text/', 'application/xml'] if match(pattern, mime_type)),
False)
def _is_empty(mime_type):
"""
Return whether a template file is an empty file.
"""
return mime_type == 'inode/x-empty'
def _is_not_temporary(file_name):
"""
Return whether a file name does not represent a temporary file.
When listing configuration files, we usually want temporary
files to be ignored.
"""
return not file_name.endswith('~')
def _is_not_internal(file_name):
"""
Return whether a file name does not represent internal usage.
When listing configuration files, we want to omit internal
files, especially if they are used as Jinja includes
"""
return not basename(file_name).startswith('_')
def _filter_func(file_name):
"""
Return the default filter func, which excludes temporary and internal files.
"""
return _is_not_temporary(file_name) and _is_not_internal(file_name)
def _get_mime_type(file_name):
"""
Return the mime type of a file.
The mime_type will be used to determine if a configuration file is text.
"""
return Magic(mime=True).from_file(file_name)
def _diff(a, b, fromfile=None, tofile=None):
"""
Return a diff using '---', '+++', and '@@' control lines.
By default, uses unified_diff.
"""
return unified_diff(a, b, fromfile=fromfile, tofile=tofile)
def _as_dict(module):
"""
Returns publicly names values in module's __dict__.
"""
try:
return {k: v for k, v in module.__dict__.iteritems() if not k[0:1] == '_'}
except AttributeError:
return {}
def _get_base_dir():
"""
Returns the base directory for user's template and data directories.
"""
return env.environmentdef.directory or getcwd()
# Options that control how confab runs.
#
# These are in opposition to options likely to changed
# between different runs of confab, such as directories,
# environments, roles, hosts, etc.
options = _AttributeDict({
# Should yes be assumed for interactive prompts?
'assume_yes': False,
# How to compute a file's mime_type?
'get_mime_type': _get_mime_type,
# How to determine if a template should be rendered?
'should_render': _should_render,
# How to determine if a template is an empty file?
'is_empty': _is_empty,
# How do filter available templates within the jinja environment?
'filter_func': _filter_func,
# How to determine diffs?
'diff': _diff,
# How to get dictionary configuration from module data?
'module_as_dict': _as_dict,
# Base directory for template and data directories.
'get_base_dir': _get_base_dir,
# What is the name of the template directory?
'get_templates_dir': lambda: 'templates',
# What is the name of the data directory?
'get_data_dir': lambda: 'data',
# What is the name of the generated directory?
'get_generated_dir': lambda: 'generated',
# What is the name of the remotes directory?
'get_remotes_dir': lambda: 'remotes',
})
class Options(object):
"""
Context manager to temporarily set options.
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
self.previous = {}
def __enter__(self):
for (k, v) in self.kwargs.iteritems():
self.previous[k] = options.get(k)
options[k] = v
return self
def __exit__(self, exc_type, value, traceback):
for k in self.kwargs.keys():
options[k] = self.previous[k]
@task
def assume_yes():
"""
Set the option to ``assume_yes`` in other tasks.
"""
options.assume_yes = True
| StarcoderdataPython |
1711889 | <reponame>viniciusriosfuck/vertical<filename>legacy/model_functions_old.py<gh_stars>0
import numpy as np
import pandas as pd
from datetime import datetime
from scipy.integrate import odeint
import matplotlib.pyplot as plt
def run_SEIR_ODE_model(covid_parameters, model_parameters) -> pd.DataFrame:
"""
Runs the simulation
output:
dataframe for SINGLE RUN
dataframe list for SENSITIVITY ANALYSIS AND CONFIDENCE INTERVAL
"""
cp = covid_parameters
mp = model_parameters
# Variaveis apresentadas em base diaria
# A grid of time points (in days)
t = range(mp.t_max)
# CONDICOES INICIAIS
# Initial conditions vector
SEIRHUM_0 = initial_conditions(mp)
niveis_isolamento = len(mp.contact_reduction_elderly)
if mp.IC_analysis == 2:
ii = 1
df = pd.DataFrame()
# 1: without; 2: vertical; 3: horizontal isolation
for i in range(niveis_isolamento): # 2: paper
omega_i = mp.contact_reduction_elderly[i]
omega_j = mp.contact_reduction_young[i]
# Integrate the SEIR equations over the time grid, t
# PARAMETROS PARA CALCULAR DERIVADAS
args = args_assignment(cp, mp, omega_i, omega_j, i, ii)
for a in range(4):
t = range(5)
if a == 0:
ret = odeint(derivSEIRHUM, SEIRHUM_0, t, args)
Si, Sj, Ei, Ej, Ii, Ij, Ri, Rj, Hi, Hj, dHi, dHj, Ui, Uj, dUi, dUj, Mi, Mj, pHi, pHj, pUi, pUj, pMi, pMj = ret.T
else:
casa_negativa = -1
SEIRHUM_0 = Si[casa_negativa], Sj[casa_negativa], Ei[casa_negativa], Ej[casa_negativa], Ii[casa_negativa], Ij[casa_negativa], Ri[casa_negativa], Rj[casa_negativa], Hi[casa_negativa], Hj[casa_negativa], dHi[casa_negativa], dHj[casa_negativa], Ui[casa_negativa], Uj[casa_negativa], dUi[casa_negativa], dUj[casa_negativa], Mi[casa_negativa], Mj[casa_negativa], pHi[casa_negativa], pHj[casa_negativa], pUi[casa_negativa], pUj[casa_negativa], pMi[casa_negativa], pMj[casa_negativa]
ret = odeint(derivSEIRHUM, SEIRHUM_0, t, args)
print(type(ret))
Si, Sj, Ei, Ej, Ii, Ij, Ri, Rj, Hi, Hj, dHi, dHj, Ui, Uj, dUi, dUj, Mi, Mj, pHi, pHj, pUi, pUj, pMi, pMj = ret.T
# Update the variables
#Si, Sj, Ei, Ej, Ii, Ij, Ri, Rj, Hi, Hj, dHi, dHj, Ui, Uj, dUi, dUj, Mi, Mj, pHi, pHj, pUi, pUj, pMi, pMj = ret.T
#plt.plot(t,pHi+pHj,Hi+Hj)
#plt.show()
df = df.append(pd.DataFrame({'Si': Si, 'Sj': Sj, 'Ei': Ei, 'Ej': Ej,
'Ii': Ii, 'Ij': Ij, 'Ri': Ri, 'Rj': Rj,
'Hi': Hi, 'Hj': Hj, 'dHi': dHi, 'dHj': dHj, 'Ui': Ui, 'Uj': Uj,
'dUi': dUi, 'dUj': dUj, 'Mi': Mi, 'Mj': Mj,
'pHi': pHi, 'pHj': pHj, 'pUi': pUi, 'pUj': pUj, 'pMi': pMi, 'pMj': pMj}, index=t)
.assign(omega_i = omega_i)
.assign(omega_j = omega_j))
t = range(mp.t_max)
DF_list = df
plt.plot(df.Ii)
plt.show()
else:
DF_list = list() # list of data frames
runs = len(cp.alpha)
print('Rodando ' + str(runs) + ' casos')
print('Para ' + str(mp.t_max) + ' dias')
print('Para cada um dos ' + str(niveis_isolamento) + ' niveis de isolamento de entrada')
print('')
for ii in range(runs): # sweeps the data frames list
df = pd.DataFrame()
# 1: without; 2: vertical; 3: horizontal isolation
for i in range(niveis_isolamento): # 2: paper
omega_i = mp.contact_reduction_elderly[i]
omega_j = mp.contact_reduction_young[i]
# Integrate the SEIR equations over the time grid, t
# PARAMETROS PARA CALCULAR DERIVADAS
args = args_assignment(cp, mp, omega_i, omega_j, i, ii)
ret = odeint(derivSEIRHUM, SEIRHUM_0, t, args)
# Update the variables
Si, Sj, Ei, Ej, Ii, Ij, Ri, Rj, Hi, Hj, dHi, dHj, Ui, Uj, dUi, dUj, Mi, Mj, pHi, pHj, pUi, pUj, pMi, pMj = ret.T
df = df.append(pd.DataFrame({'Si': Si, 'Sj': Sj, 'Ei': Ei, 'Ej': Ej,
'Ii': Ii, 'Ij': Ij, 'Ri': Ri, 'Rj': Rj,
'Hi': Hi, 'Hj': Hj, 'dHi': dHi, 'dHj': dHj, 'Ui': Ui, 'Uj': Uj,
'dUi': dUi, 'dUj': dUj, 'Mi': Mi, 'Mj': Mj,
'pHi': pHi, 'pHj': pHj, 'pUi': pUi, 'pUj': pUj, 'pMi': pMi, 'pMj': pMj}, index=t)
.assign(omega_i = omega_i)
.assign(omega_j = omega_j))
DF_list.append(df)
return DF_list
def initial_conditions(mp):
"""
Assembly of the initial conditions
input: model_parameters (namedtuple)
output: vector SEIRHUM_0 with the variables:
Si0, Sj0, Ei0, Ej0, Ii0, Ij0, Ri0, Rj0, Hi0, Hj0, Ui0, Uj0, Mi0, Mj0
Suscetible, Exposed, Infected, Removed, Ward Bed demand, ICU bed demand, Death
i: elderly (idoso, 60+); j: young (jovem, 0-59 years)
"""
Ei0 = mp.init_exposed_elderly # Ee0
Ej0 = mp.init_exposed_young # Ey0
Ii0 = mp.init_infected_elderly # Ie0
Ij0 = mp.init_infected_young # Iy0
Ri0 = mp.init_removed_elderly # Re0
Rj0 = mp.init_removed_young # Ry0
Hi0 = mp.init_hospitalized_ward_elderly # He0
Hj0 = mp.init_hospitalized_ward_young # Hy0
dHi0 = mp.init_hospitalized_ward_elderly_excess
dHj0 = mp.init_hospitalized_ward_young_excess
Ui0 = mp.init_hospitalized_icu_elderly # Ue0
Uj0 = mp.init_hospitalized_icu_young # Uy0
dUi0 = mp.init_hospitalized_icu_elderly_excess
dUj0 = mp.init_hospitalized_icu_young_excess
Mi0 = mp.init_deceased_elderly # Me0
Mj0 = mp.init_deceased_young # My0
pHi0 = 0
pHj0 = 0
pUi0 = 0
pUj0 = 0
pMi0 = 0
pMj0 = 0
# Suscetiveis
Si0 = mp.population * mp.population_rate_elderly - Ii0 - Ri0 - Ei0 # Suscetiveis idosos
Sj0 = mp.population * (1 - mp.population_rate_elderly) - Ij0 - Rj0 - Ej0 # Suscetiveis jovens
SEIRHUM_0 = Si0, Sj0, Ei0, Ej0, Ii0, Ij0, Ri0, Rj0, Hi0, Hj0, dHi0, dHj0, Ui0, Uj0, dUi0, dUj0, Mi0, Mj0, pHi0, pHj0, pUi0, pUj0, pMi0, pMj0
return SEIRHUM_0
def args_assignment(cp, mp, omega_i, omega_j, i, ii):
"""
Assembly of the derivative parameters
input: covid_parameters, model_parameters
output: vector args with the variables:
N, alpha, beta, gamma,
los_leito, los_uti, tax_int_i, tax_int_j, tax_uti_i, tax_uti_j,
taxa_mortalidade_i, taxa_mortalidade_j,
omega_i, omega_j
Population, incubation_rate, contact_rate, infectiviy_rate,
average_length_of_stay (regular and icu beds), internation rates (regular and icu beds, by age)
i: elderly (idoso, 60+); j: young (jovem, 0-59 years)
mortality_rate for young and elderly
"""
N = mp.population
pI = mp.population_rate_elderly
Normalization_constant = mp.Normalization_constant[0] #Because if the constant be scaled after changing the contact matrix again,
#it should lose the effect of reducing infection rate
if mp.IC_analysis == 2: # SINGLE RUN
alpha = cp.alpha
beta = cp.beta
gamma = cp.gamma
delta = cp.delta
else: # CONFIDENCE INTERVAL OR SENSITIVITY ANALYSIS
alpha = cp.alpha[ii]
beta = cp.beta[ii]
gamma = cp.gamma[ii]
delta = cp.delta[ii]
contact_matrix = mp.contact_matrix[i]
taxa_mortalidade_i = cp.mortality_rate_elderly
taxa_mortalidade_j = cp.mortality_rate_young
pH = cp.pH
pU = cp.pU
los_leito = cp.los_ward
los_uti = cp.los_icu
infection_to_hospitalization = cp.infection_to_hospitalization
infection_to_icu = cp.infection_to_icu
tax_int_i = cp.internation_rate_ward_elderly
tax_int_j = cp.internation_rate_ward_young
tax_uti_i = cp.internation_rate_icu_elderly
tax_uti_j = cp.internation_rate_icu_young
capacidade_UTIs = mp.bed_icu
capacidade_Ward = mp.bed_ward
args = (N, alpha, beta, gamma,delta,
los_leito, los_uti, tax_int_i, tax_int_j, tax_uti_i, tax_uti_j,
taxa_mortalidade_i, taxa_mortalidade_j,omega_i, omega_j,contact_matrix,pI,
infection_to_hospitalization,infection_to_icu,capacidade_UTIs,capacidade_Ward,Normalization_constant,pH,pU)
return args
def derivSEIRHUM(SEIRHUM, t, N, alpha, beta, gamma, delta,
los_leito, los_uti, tax_int_i, tax_int_j, tax_uti_i, tax_uti_j,
taxa_mortalidade_i, taxa_mortalidade_j,omega_i, omega_j,contact_matrix,pI,
infection_to_hospitalization,infection_to_icu,capacidade_UTIs,capacidade_Ward,Normalization_constant,pH,pU):
"""
Computes the derivatives
input: SEIRHUM variables for elderly (i) and young (j),
Suscetible, Exposed, Infected, Recovered, Hospitalized, ICU, Deacesed
time, Brazillian population,
incubation rate, contamination rate, infectivity rate,
LOS, hospitalization rates for wards and icu beds,
death rates
attenuating factors
output: vector with the derivatives
"""
# Vetor variaveis incognitas
Si, Sj, Ei, Ej, Ii, Ij, Ri, Rj, Hi, Hj, dHi, dHj, Ui, Uj, dUi, dUj, Mi, Mj, pHi, pHj, pUi, pUj, pMi, pMj = SEIRHUM
Iij = np.array([[Ij/((1-pI)*N)],[Ii/(pI*N)]])
Sij = np.array([[Sj],[Si]])
dSijdt = -(beta/Normalization_constant)*np.dot(contact_matrix,Iij)*Sij
dSjdt = dSijdt[0]
dSidt = dSijdt[1]
dEidt = - dSidt - alpha * Ei
dEjdt = - dSjdt - alpha * Ej
dIidt = alpha * Ei - gamma * Ii
dIjdt = alpha * Ej - gamma * Ij
dRidt = gamma * Ii
dRjdt = gamma * Ij
dpHi = -tax_int_i*dSidt - pHi / infection_to_hospitalization
dpHj = -tax_int_j*dSjdt - pHj / infection_to_hospitalization
dpUi = -tax_uti_i*dSidt - pUi / infection_to_icu
dpUj = -tax_uti_j*dSjdt - pUj / infection_to_icu
dpMi = -taxa_mortalidade_i*dSidt - pMi*delta
dpMj = -taxa_mortalidade_j*dSjdt - pMj*delta
coisa = 1/500
coisa2 = -coisa*(Hi+Hj-capacidade_Ward)
coisa = 1/50
coisa3 = -coisa*(Ui+Uj-capacidade_UTIs)
# Leitos demandados
dHidt = (pHi / infection_to_hospitalization )*(1-1/(1+np.exp(coisa2))) - Hi / los_leito
dHjdt = (pHj / infection_to_hospitalization )*(1-1/(1+np.exp(coisa2))) - Hj / los_leito
dUidt = (pUi / infection_to_icu)*(1-1/(1+np.exp(coisa3))) - Ui / los_uti
dUjdt = (pUj / infection_to_icu)*(1-1/(1+np.exp(coisa3))) - Uj / los_uti
#Leitos demandados em excesso
ddHidt = (pHi / infection_to_hospitalization)*(1/(1+np.exp(coisa2)))
ddHjdt = (pHj / infection_to_hospitalization)*(1/(1+np.exp(coisa2)))
ddUidt = (pUi / infection_to_icu)*(1/(1+np.exp(coisa3)))
ddUjdt = (pUj / infection_to_icu)*(1/(1+np.exp(coisa3)))
# Obitos
dMidt = pMi * delta + ddHidt*pH + ddUidt*pU
dMjdt = pMj * delta + ddHjdt*pH + ddUjdt*pU
return (dSidt, dSjdt, dEidt, dEjdt, dIidt, dIjdt, dRidt, dRjdt,
dHidt, dHjdt, ddHidt, ddHjdt, dUidt, dUjdt, ddUidt, ddUjdt, dMidt, dMjdt,
dpHi, dpHj, dpUi, dpUj, dpMi, dpMj)
| StarcoderdataPython |
9648732 | # encoding: utf-8
"""sys.excepthook for IPython itself, leaves a detailed report on disk.
Authors:
* <NAME>
* <NAME>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2001-2007 <NAME>. <<EMAIL>>
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import os
import sys
import traceback
from pprint import pformat
from IPython.core import ultratb
from IPython.core.release import author_email
from IPython.utils.sysinfo import sys_info
from IPython.utils.py3compat import input, getcwd
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# Template for the user message.
_default_message_template = """\
Oops, {app_name} crashed. We do our best to make it stable, but...
A crash report was automatically generated with the following information:
- A verbatim copy of the crash traceback.
- A copy of your input history during this session.
- Data on your current {app_name} configuration.
It was left in the file named:
\t'{crash_report_fname}'
If you can email this file to the developers, the information in it will help
them in understanding and correcting the problem.
You can mail it to: {contact_name} at {contact_email}
with the subject '{app_name} Crash Report'.
If you want to do it now, the following command will work (under Unix):
mail -s '{app_name} Crash Report' {contact_email} < {crash_report_fname}
To ensure accurate tracking of this issue, please file a report about it at:
{bug_tracker}
"""
_lite_message_template = """
If you suspect this is an IPython bug, please report it at:
https://github.com/ipython/ipython/issues
or send an email to the mailing list at {email}
You can print a more detailed traceback right now with "%tb", or use "%debug"
to interactively debug it.
Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
{config}Application.verbose_crash=True
"""
class CrashHandler(object):
"""Customizable crash handlers for IPython applications.
Instances of this class provide a :meth:`__call__` method which can be
used as a ``sys.excepthook``. The :meth:`__call__` signature is::
def __call__(self, etype, evalue, etb)
"""
message_template = _default_message_template
section_sep = '\n\n'+'*'*75+'\n\n'
def __init__(self, app, contact_name=None, contact_email=None,
bug_tracker=None, show_crash_traceback=True, call_pdb=False):
"""Create a new crash handler
Parameters
----------
app : Application
A running :class:`Application` instance, which will be queried at
crash time for internal information.
contact_name : str
A string with the name of the person to contact.
contact_email : str
A string with the email address of the contact.
bug_tracker : str
A string with the URL for your project's bug tracker.
show_crash_traceback : bool
If false, don't print the crash traceback on stderr, only generate
the on-disk report
Non-argument instance attributes:
These instances contain some non-argument attributes which allow for
further customization of the crash handler's behavior. Please see the
source for further details.
"""
self.crash_report_fname = "Crash_report_%s.txt" % app.name
self.app = app
self.call_pdb = call_pdb
#self.call_pdb = True # dbg
self.show_crash_traceback = show_crash_traceback
self.info = dict(app_name = app.name,
contact_name = contact_name,
contact_email = contact_email,
bug_tracker = bug_tracker,
crash_report_fname = self.crash_report_fname)
def __call__(self, etype, evalue, etb):
"""Handle an exception, call for compatible with sys.excepthook"""
# do not allow the crash handler to be called twice without reinstalling it
# this prevents unlikely errors in the crash handling from entering an
# infinite loop.
sys.excepthook = sys.__excepthook__
# Report tracebacks shouldn't use color in general (safer for users)
color_scheme = 'NoColor'
# Use this ONLY for developer debugging (keep commented out for release)
#color_scheme = 'Linux' # dbg
try:
rptdir = self.app.ipython_dir
except:
rptdir = getcwd()
if rptdir is None or not os.path.isdir(rptdir):
rptdir = getcwd()
report_name = os.path.join(rptdir,self.crash_report_fname)
# write the report filename into the instance dict so it can get
# properly expanded out in the user message template
self.crash_report_fname = report_name
self.info['crash_report_fname'] = report_name
TBhandler = ultratb.VerboseTB(
color_scheme=color_scheme,
long_header=1,
call_pdb=self.call_pdb,
)
if self.call_pdb:
TBhandler(etype,evalue,etb)
return
else:
traceback = TBhandler.text(etype,evalue,etb,context=31)
# print traceback to screen
if self.show_crash_traceback:
print(traceback, file=sys.stderr)
# and generate a complete report on disk
try:
report = open(report_name,'w')
except:
print('Could not create crash report on disk.', file=sys.stderr)
return
# Inform user on stderr of what happened
print('\n'+'*'*70+'\n', file=sys.stderr)
print(self.message_template.format(**self.info), file=sys.stderr)
# Construct report on disk
report.write(self.make_report(traceback))
report.close()
input("Hit <Enter> to quit (your terminal may close):")
def make_report(self,traceback):
"""Return a string containing a crash report."""
sec_sep = self.section_sep
report = ['*'*75+'\n\n'+'IPython post-mortem report\n\n']
rpt_add = report.append
rpt_add(sys_info())
try:
config = pformat(self.app.config)
rpt_add(sec_sep)
rpt_add('Application name: %s\n\n' % self.app_name)
rpt_add('Current user configuration structure:\n\n')
rpt_add(config)
except:
pass
rpt_add(sec_sep+'Crash traceback:\n\n' + traceback)
return ''.join(report)
def crash_handler_lite(etype, evalue, tb):
"""a light excepthook, adding a small message to the usual traceback"""
traceback.print_exception(etype, evalue, tb)
from IPython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
# we are in a Shell environment, give %magic example
config = "%config "
else:
# we are not in a shell, show generic config
config = "c."
print(_lite_message_template.format(email=author_email, config=config), file=sys.stderr)
| StarcoderdataPython |
4809358 | <reponame>AaronFriel/pulumi-aws-native
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ConnectionArgs', 'Connection']
@pulumi.input_type
class ConnectionArgs:
def __init__(__self__, *,
connection_name: Optional[pulumi.Input[str]] = None,
host_arn: Optional[pulumi.Input[str]] = None,
provider_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionTagArgs']]]] = None):
"""
The set of arguments for constructing a Connection resource.
:param pulumi.Input[str] connection_name: The name of the connection. Connection names must be unique in an AWS user account.
:param pulumi.Input[str] host_arn: The host arn configured to represent the infrastructure where your third-party provider is installed. You must specify either a ProviderType or a HostArn.
:param pulumi.Input[str] provider_type: The name of the external provider where your third-party code repository is configured. You must specify either a ProviderType or a HostArn.
:param pulumi.Input[Sequence[pulumi.Input['ConnectionTagArgs']]] tags: Specifies the tags applied to a connection.
"""
if connection_name is not None:
pulumi.set(__self__, "connection_name", connection_name)
if host_arn is not None:
pulumi.set(__self__, "host_arn", host_arn)
if provider_type is not None:
pulumi.set(__self__, "provider_type", provider_type)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="connectionName")
def connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the connection. Connection names must be unique in an AWS user account.
"""
return pulumi.get(self, "connection_name")
@connection_name.setter
def connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_name", value)
@property
@pulumi.getter(name="hostArn")
def host_arn(self) -> Optional[pulumi.Input[str]]:
"""
The host arn configured to represent the infrastructure where your third-party provider is installed. You must specify either a ProviderType or a HostArn.
"""
return pulumi.get(self, "host_arn")
@host_arn.setter
def host_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_arn", value)
@property
@pulumi.getter(name="providerType")
def provider_type(self) -> Optional[pulumi.Input[str]]:
"""
The name of the external provider where your third-party code repository is configured. You must specify either a ProviderType or a HostArn.
"""
return pulumi.get(self, "provider_type")
@provider_type.setter
def provider_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_type", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionTagArgs']]]]:
"""
Specifies the tags applied to a connection.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionTagArgs']]]]):
pulumi.set(self, "tags", value)
class Connection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
connection_name: Optional[pulumi.Input[str]] = None,
host_arn: Optional[pulumi.Input[str]] = None,
provider_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionTagArgs']]]]] = None,
__props__=None):
"""
Schema for AWS::CodeStarConnections::Connection resource which can be used to connect external source providers with AWS CodePipeline
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] connection_name: The name of the connection. Connection names must be unique in an AWS user account.
:param pulumi.Input[str] host_arn: The host arn configured to represent the infrastructure where your third-party provider is installed. You must specify either a ProviderType or a HostArn.
:param pulumi.Input[str] provider_type: The name of the external provider where your third-party code repository is configured. You must specify either a ProviderType or a HostArn.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionTagArgs']]]] tags: Specifies the tags applied to a connection.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ConnectionArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Schema for AWS::CodeStarConnections::Connection resource which can be used to connect external source providers with AWS CodePipeline
:param str resource_name: The name of the resource.
:param ConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
connection_name: Optional[pulumi.Input[str]] = None,
host_arn: Optional[pulumi.Input[str]] = None,
provider_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionTagArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ConnectionArgs.__new__(ConnectionArgs)
__props__.__dict__["connection_name"] = connection_name
__props__.__dict__["host_arn"] = host_arn
__props__.__dict__["provider_type"] = provider_type
__props__.__dict__["tags"] = tags
__props__.__dict__["connection_arn"] = None
__props__.__dict__["connection_status"] = None
__props__.__dict__["owner_account_id"] = None
super(Connection, __self__).__init__(
'aws-native:codestarconnections:Connection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Connection':
"""
Get an existing Connection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ConnectionArgs.__new__(ConnectionArgs)
__props__.__dict__["connection_arn"] = None
__props__.__dict__["connection_name"] = None
__props__.__dict__["connection_status"] = None
__props__.__dict__["host_arn"] = None
__props__.__dict__["owner_account_id"] = None
__props__.__dict__["provider_type"] = None
__props__.__dict__["tags"] = None
return Connection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="connectionArn")
def connection_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the connection. The ARN is used as the connection reference when the connection is shared between AWS services.
"""
return pulumi.get(self, "connection_arn")
@property
@pulumi.getter(name="connectionName")
def connection_name(self) -> pulumi.Output[str]:
"""
The name of the connection. Connection names must be unique in an AWS user account.
"""
return pulumi.get(self, "connection_name")
@property
@pulumi.getter(name="connectionStatus")
def connection_status(self) -> pulumi.Output[str]:
"""
The current status of the connection.
"""
return pulumi.get(self, "connection_status")
@property
@pulumi.getter(name="hostArn")
def host_arn(self) -> pulumi.Output[Optional[str]]:
"""
The host arn configured to represent the infrastructure where your third-party provider is installed. You must specify either a ProviderType or a HostArn.
"""
return pulumi.get(self, "host_arn")
@property
@pulumi.getter(name="ownerAccountId")
def owner_account_id(self) -> pulumi.Output[str]:
"""
The name of the external provider where your third-party code repository is configured. For Bitbucket, this is the account ID of the owner of the Bitbucket repository.
"""
return pulumi.get(self, "owner_account_id")
@property
@pulumi.getter(name="providerType")
def provider_type(self) -> pulumi.Output[Optional[str]]:
"""
The name of the external provider where your third-party code repository is configured. You must specify either a ProviderType or a HostArn.
"""
return pulumi.get(self, "provider_type")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.ConnectionTag']]]:
"""
Specifies the tags applied to a connection.
"""
return pulumi.get(self, "tags")
| StarcoderdataPython |
8148564 | <reponame>Dung-Han-Lee/Pointcloud-based-Row-Detection-using-ShellNet-and-PyTorch
from sys import argv
import os
def rename(directory):
num = 0
for filename in sorted(os.listdir(directory)):
ext = filename.split(".")[-1]
if (ext != 'png' and ext != 'npy' and ext != 'jpg') :
continue
os.rename( os.path.join(directory, filename),\
os.path.join(directory, ("%04d" % num ) + "." + ext) )
num +=1
root = "." if len(argv) < 2 else argv[1]
folders = ["train", "val", "test"]
subfolders = ["pc_label", "pointcloud"]
for folder in folders:
for subfolder in subfolders:
dir_name = os.path.join(root, folder, subfolder)
rename(dir_name) | StarcoderdataPython |
5089428 | <filename>runtests.py
#!/usr/bin/env python
# -*- coding: utf-8
from __future__ import unicode_literals, absolute_import
import os
import sys
import subprocess
import pytest
PYTEST_ARGS = ['--tb=short', '-q', '-s', '-rw']
FLAKE8_ARGS = ['custom_auth_user', 'tests']
sys.path.append(os.path.dirname(__file__))
def flake8_main(args):
print('Running lint tests...')
flake8 = subprocess.call(['flake8'] + args)
print('\033[91m' + 'Lint flake8 failed' + '\033[0m' if flake8 else
'\033[92m' + 'Lint flake8 passed' + '\033[0m')
return flake8
def exit_if_failed(func):
if func:
sys.exit(func)
if __name__ == '__main__':
run_lint = True
run_pytest = True
# No lint
try:
sys.argv.remove('--nolint')
except ValueError:
run_lint = True
else:
run_lint = False
# Lint only
try:
sys.argv.remove('--lintonly')
except ValueError:
run_pytest = True
else:
run_pytest = False
# Coverage option
if run_pytest:
try:
sys.argv.remove('--coverage')
except ValueError:
pass
else:
PYTEST_ARGS += [
'--cov-report', 'html',
'--cov', 'custom_auth_user']
# Run flake8 lint
if run_lint:
exit_if_failed(flake8_main(FLAKE8_ARGS))
# Run pytest
if run_pytest:
exit_if_failed(pytest.main(PYTEST_ARGS))
| StarcoderdataPython |
1662807 | from netapp.connection import NaConnection
from instance_info import InstanceInfo # 2 properties
from enabled_preset import EnabledPreset # 1 properties
from filter import Filter # 0 properties
from counter_data import CounterData # 2 properties
from filter_data import FilterData # 0 properties
from aggregation_data import AggregationData # 2 properties
from preset_info import PresetInfo # 8 properties
from counter_info import CounterInfo # 11 properties
from archive_config_info import ArchiveConfigInfo # 6 properties
from object_info import ObjectInfo # 4 properties
from perf_archive_get_iter_key_td import PerfArchiveGetIterKeyTd # 1 properties
from preset_detail_info import PresetDetailInfo # 5 properties
from archive_record import ArchiveRecord # 1 properties
from instance_uuid import InstanceUuid # 0 properties
from label_info import LabelInfo # 0 properties
from perf_archive_info import PerfArchiveInfo # 10 properties
from instance_data import InstanceData # 4 properties
from counter import Counter # 0 properties
from archive_header import ArchiveHeader # 1 properties
from instance import Instance # 0 properties
from perf_preset_get_iter_key_td import PerfPresetGetIterKeyTd # 2 properties
from perf_object_instance_list_info_iter_key_td import PerfObjectInstanceListInfoIterKeyTd # 3 properties
from performance_archive_state import PerformanceArchiveState # 0 properties
class PerfConnection(NaConnection):
def perf_object_instance_list_info(self, objectname):
"""
Get the list of names of current instances of an object.
This will return the names of all current instances of the
specified object with one call. If the object is expected to
have a large number of instances, the iterator version
of this API should be used.
:param objectname: Name of the object to get instance information for.
"""
return self.request( "perf-object-instance-list-info", {
'objectname': [ objectname, 'objectname', [ basestring, 'None' ], False ],
}, {
'instances': [ InstanceInfo, True ],
} )
def perf_archive_config_modify(self, datastore_file_rotation=None, datastore_max_percent=None, datastore_max_retention=None, is_enabled=None):
"""
Modify the current Performance Archive configuration
:param datastore_file_rotation: Minutes Between Performance Archive Data File Rotations
:param datastore_max_percent: Maximum Percentage of Root Volume Used for Performance Archive
Data
:param datastore_max_retention: Days to Retain Performance Archive Data
:param is_enabled: Is the Performance Archive Enabled?
"""
return self.request( "perf-archive-config-modify", {
'datastore_file_rotation': [ datastore_file_rotation, 'datastore-file-rotation', [ int, 'None' ], False ],
'datastore_max_percent': [ datastore_max_percent, 'datastore-max-percent', [ int, 'None' ], False ],
'datastore_max_retention': [ datastore_max_retention, 'datastore-max-retention', [ int, 'None' ], False ],
'is_enabled': [ is_enabled, 'is-enabled', [ bool, 'None' ], False ],
}, {
} )
def perf_preset_create(self, preset, comment=None, preset_detail_infos=None, is_archive_enabled=None, expiration_length=None, privilege=None):
"""
Create a single Performance Preset configuration and all of its
details
:param preset: Preset Name
:param comment: Preset Description
:param preset_detail_infos: Details of the Performance Preset
:param is_archive_enabled: Is Preset Enabled?
:param expiration_length: Default expiration length (in minutes) until the Performance
Preset will be automatically disabled.
:param privilege: Privilege level
<br/>Possible Values: admin, advanced, diagnostic
"""
return self.request( "perf-preset-create", {
'comment': [ comment, 'comment', [ basestring, 'None' ], False ],
'preset_detail_infos': [ preset_detail_infos, 'preset-detail-infos', [ PresetDetailInfo, 'None' ], True ],
'is_archive_enabled': [ is_archive_enabled, 'is-archive-enabled', [ bool, 'None' ], False ],
'expiration_length': [ expiration_length, 'expiration-length', [ int, 'None' ], False ],
'privilege': [ privilege, 'privilege', [ basestring, 'None' ], False ],
'preset': [ preset, 'preset', [ basestring, 'None' ], False ],
}, {
} )
def perf_archive_get_instances_iter_next(self, tag, maximum):
"""
Continue retrieving the instances of an archive.
When the 'records' output element is 0, all
instance data have been retrieved and the
perf-object-instance-list-info-iter-end API should be called.
:param tag: Tag from earlier call to perf-object-instance-list-info-iter-start
Note that any tag not used for 1 hour will be deleted.
:param maximum: Maximum number of records to retrieve with this call.<b>
Range: [0..2^31-1]
"""
return self.request( "perf-archive-get-instances-iter-next", {
'tag': tag,
'maximum': [ maximum, 'maximum', [ int, 'None' ], False ],
}, {
'generation': [ int, False ],
'records': [ int, False ],
'archive-records': [ ArchiveRecord, True ],
} )
def perf_archive_get_instances_iter_end(self, tag):
"""
Terminate a perf-archive-get-instances iterator.
:param tag: Tag from a previous perf-archive-get-instances-iter-start
"""
return self.request( "perf-archive-get-instances-iter-end", {
'tag': tag,
}, {
} )
def perf_archive_destroy(self, archive):
"""
Destroy a Performance Archive
:param archive: Performance Archive Name
"""
return self.request( "perf-archive-destroy", {
'archive': [ archive, 'archive', [ basestring, 'None' ], False ],
}, {
'result-error-message': [ basestring, False ],
'result-jobid': [ int, False ],
'result-error-code': [ int, False ],
'result-status': [ basestring, False ],
} )
def perf_archive_config_get(self, desired_attributes=None):
"""
Display the current Performance Archive configuration
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "perf-archive-config-get", {
'desired_attributes': [ desired_attributes, 'desired-attributes', [ ArchiveConfigInfo, 'None' ], False ],
}, {
'attributes': [ ArchiveConfigInfo, False ],
} )
def perf_object_instance_list_info_iter_end(self, tag):
"""
Terminate a perf-object-instance-list-info iterator.
:param tag: Tag from a previous perf-object-instance-list-info-iter-start
"""
return self.request( "perf-object-instance-list-info-iter-end", {
'tag': tag,
}, {
} )
def perf_object_instance_list_info_iter_start(self, objectname):
"""
Begin retrieving the names of instances of an object. This
call should be followed with one or more calls to
perf-object-instance-list-info-iter-next
:param objectname: Name of the object to get instance information for.
"""
return self.request( "perf-object-instance-list-info-iter-start", {
'objectname': [ objectname, 'objectname', [ basestring, 'None' ], False ],
}, {
'records': [ int, False ],
'tag': [ basestring, False ],
} )
def perf_object_get_instances_iter_end(self, tag):
"""
Terminate a perf-object-get-instances iterator.
:param tag: Tag from a pervious perf-object-get-instances-iter-start
"""
return self.request( "perf-object-get-instances-iter-end", {
'tag': tag,
}, {
} )
def perf_preset_modify(self, preset, comment=None, privilege=None, is_archive_enabled=None, new_name=None):
"""
Modify a Performance Preset
:param preset: Preset Name
:param comment: Preset Description
:param privilege: Preset Privilege Level
:param is_archive_enabled: Is Preset Archive-Enabled?
:param new_name: New Preset Name
"""
return self.request( "perf-preset-modify", {
'comment': [ comment, 'comment', [ basestring, 'None' ], False ],
'privilege': [ privilege, 'privilege', [ basestring, 'privilege-level' ], False ],
'is_archive_enabled': [ is_archive_enabled, 'is-archive-enabled', [ bool, 'None' ], False ],
'preset': [ preset, 'preset', [ basestring, 'None' ], False ],
'new_name': [ new_name, 'new-name', [ basestring, 'None' ], False ],
}, {
} )
def perf_object_get_instances_iter_next(self, tag, maximum):
"""
Continue retrieving the values of counters of instances of an
object. This call will return a partial list instance names,
continued from the previous call with the same tag. When the
'records' output element is 0, all counter values of all instances
have been retrieved and the perf-object-instance-list-info-iter-end
API should be called.
:param tag: Tag from a previous perf-object-get-instances-iter-start
:param maximum: Maximum number of entries to retrieve with this call.<br>
Range: [0..2^31-1]
"""
return self.request( "perf-object-get-instances-iter-next", {
'tag': tag,
'maximum': [ maximum, 'maximum', [ int, 'None' ], False ],
}, {
'records': [ int, False ],
'instances': [ InstanceData, True ],
} )
def perf_preset_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Iteratively get information about Performance Presets
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
perf-preset object.
All perf-preset objects matching this query up to 'max-records'
will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "perf-preset-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ PresetInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ PresetInfo, 'None' ], False ],
}, {
'attributes-list': [ PresetInfo, True ],
} )
def perf_archive_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Iteratively get information about Performance Archives
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
perf-archive object.
All perf-archive objects matching this query up to 'max-records'
will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "perf-archive-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ PerfArchiveInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ PerfArchiveInfo, 'None' ], False ],
}, {
'attributes-list': [ PerfArchiveInfo, True ],
} )
def perf_preset_detail_get(self, preset):
"""
Get detailed information about a single Performance Preset configuration
:param preset: Preset Name
"""
return self.request( "perf-preset-detail-get", {
'preset': [ preset, 'preset', [ basestring, 'None' ], False ],
}, {
'comment': [ basestring, False ],
'uuid': [ basestring, False ],
'is-read-only': [ bool, False ],
'generation-id': [ int, False ],
'preset': [ basestring, False ],
'expiration-length': [ int, False ],
'expiration-end-date': [ int, False ],
'privilege': [ basestring, False ],
'is-archive-enabled': [ bool, False ],
'preset-detail-infos': [ PresetDetailInfo, True ],
} )
def perf_archive_create(self, end_time, start_time, archive, comment=None, case_number=None, source_nodes=None):
"""
Save a new Performance Archive
:param end_time: End of Performance Archive's time range (UTC)
:param start_time: Start of Performance Archive's time range (UTC)
:param archive: Performance Archive Name
:param comment: Performance Archive Comment
:param case_number: Support Case Number Associated with Archive
:param source_nodes: Originating Nodes for Archive Data
"""
return self.request( "perf-archive-create", {
'comment': [ comment, 'comment', [ basestring, 'None' ], False ],
'end_time': [ end_time, 'end-time', [ int, 'None' ], False ],
'start_time': [ start_time, 'start-time', [ int, 'None' ], False ],
'case_number': [ case_number, 'case-number', [ basestring, 'None' ], False ],
'source_nodes': [ source_nodes, 'source-nodes', [ basestring, 'node-name' ], True ],
'archive': [ archive, 'archive', [ basestring, 'None' ], False ],
}, {
'result-error-message': [ basestring, False ],
'result-jobid': [ int, False ],
'result-error-code': [ int, False ],
'result': [ PerfArchiveInfo, False ],
'result-status': [ basestring, False ],
} )
def perf_object_get_instances(self, objectname, instance_uuids=None, filter_data=None, privilege_level=None, instances=None, counters=None):
"""
Get a list of current counter values of instances of an object. This will
return the values of all specified counters and instances of the specified
object with one call.<p>
In Data ONTAP 7-Mode, if the object is expected to have a large number of
instances and/or counters, the iterator version of this API should be
used.
:param objectname: Name of the object to get counter values for.
:param instance_uuids: List of instance UUIDs for which to get counter values. This element
can be used to limit data collection to a specified subset of the
instances of the object. Either instances or instance-uuids input
must be provided. The API errors out if both of these inputs are
provided or neither of these inputs is provided, or if more than 500
instances are requested.
:param filter_data: Filter to apply to the performance data.
:param privilege_level: Name of the privilege level. Possible values: "basic", "admin", "advanced", "diag"
If the element is absent, the default privilege of the object will be used.
:param instances: List of instance names for which to get counter values. This element
can be used to limit data collection to a specified subset of the
instances of the object.<p>
In Data ONTAP 7-Mode, counter values of all instances of the object
will be retrieved when this element is absent.<p>
In Data ONTAP Cluster-Mode, either instances or instance-uuids input
must be provided. The API errors out if both of these inputs are
provided or neither of these inputs is provided, or if more than 500
instances are requested.
:param counters: List of counters whose values will be retrieved. This element can
be used to limit data collection to a specified subset of the counters
of instances of the object. If this element is absent, values of all
counters will be retrieved.
"""
return self.request( "perf-object-get-instances", {
'instance_uuids': [ instance_uuids, 'instance-uuids', [ basestring, 'instance-uuid' ], True ],
'filter_data': [ filter_data, 'filter-data', [ basestring, 'filter-data' ], False ],
'objectname': [ objectname, 'objectname', [ basestring, 'None' ], False ],
'privilege_level': [ privilege_level, 'privilege-level', [ basestring, 'None' ], False ],
'instances': [ instances, 'instances', [ basestring, 'instance' ], True ],
'counters': [ counters, 'counters', [ basestring, 'counter' ], True ],
}, {
'timestamp': [ basestring, False ],
'instances': [ InstanceData, True ],
} )
def perf_preset_delete(self, preset):
"""
Delete a Performance Preset and all of its Preset Details
:param preset: Preset Name
"""
return self.request( "perf-preset-delete", {
'preset': [ preset, 'preset', [ basestring, 'None' ], False ],
}, {
} )
def perf_archive_get_headers(self, duration, timestamp):
"""
Retrieve all headers available of archive datafiles.
This call should be preceded by a call to
perf-archive-get-oldest-timestamp, otherwise an error may
be returned if the input timestamp is too old.
:param duration: Duration in milliseconds to retrieve headers
A zero value duration signifies to go until the most recent timestamp.
Range: [0..2^31-1]
:param timestamp: Timestamp (in seconds, Unix Epoch Time) to start searching for headers.
Range: [0..2^31-1]
"""
return self.request( "perf-archive-get-headers", {
'duration': [ duration, 'duration', [ int, 'None' ], False ],
'timestamp': [ timestamp, 'timestamp', [ basestring, 'None' ], False ],
}, {
'archive-headers': [ ArchiveHeader, True ],
} )
def perf_archive_get_oldest_timestamp(self):
"""
Retrieve the oldest timestamp available.
"""
return self.request( "perf-archive-get-oldest-timestamp", {
}, {
'timestamp': [ int, False ],
} )
def perf_object_instance_list_info_iter_next(self, tag, maximum):
"""
Continue retrieving the names of instances of an object. This call
will return a partial list instance names, continued from the previous
call with the same name. When the 'records' output element is 0, all
instance names have been retrieved and the
perf-object-instance-list-info-iter-end API should be called.
:param tag: Tag from earlier call to perf-object-instance-list-info-iter-start
:param maximum: Maximum number of instance names to retrieve with this call.<b>
Range: [0..2^31-1]
"""
return self.request( "perf-object-instance-list-info-iter-next", {
'tag': tag,
'maximum': [ maximum, 'maximum', [ int, 'None' ], False ],
}, {
'records': [ int, False ],
'instances': [ InstanceInfo, True ],
} )
def perf_object_get_instances_iter_start(self, objectname, instances=None, counters=None):
"""
Begin retrieving the counter values of instances of an object.
This call should be followed with one or more calls to
perf-object-get-instances-iter-next
:param objectname: Name of the object to get counter values for.
:param instances: List of instances to get counter values for. This element can be
used to limit data collection to a specified subset of the instances of
the object. If this element is absent, counter values of all instances
of the object will be retrieved.
:param counters: List of counters whose values will be retrieved. This element can
be used to limit data collection to a specified subset of the counters
of instances of the object. If this element is absent, values of all
counters will be retrieved.
"""
return self.request( "perf-object-get-instances-iter-start", {
'instances': [ instances, 'instances', [ basestring, 'instance' ], True ],
'objectname': [ objectname, 'objectname', [ basestring, 'None' ], False ],
'counters': [ counters, 'counters', [ basestring, 'counter' ], True ],
}, {
'timestamp': [ basestring, False ],
'tag': [ basestring, False ],
'records': [ int, False ],
} )
def perf_object_counter_list_info(self, objectname, filter_data=None):
"""
Get information about the counters of an object. This information
is static and independent of any particular instance of the object.
It includes counter names and descriptions, as well as properties
which are necessary to to interpret counter values.
:param objectname: Name of the object to get information for.
:param filter_data: Allows selecting counters from a particular node. An example filter is "node_name=hostname".<br><br>
This element is <b>deprecated</b> in Data ONTAP 8.2 and later, as filtering by node is
no longer necessary. Each node contains the same set of counter definitions,
so filtering by node yields the same results as not filtering at all.
"""
return self.request( "perf-object-counter-list-info", {
'filter_data': [ filter_data, 'filter-data', [ basestring, 'filter-data' ], False ],
'objectname': [ objectname, 'objectname', [ basestring, 'None' ], False ],
}, {
'counters': [ CounterInfo, True ],
} )
def perf_archive_get_instances_iter_start(self, timestamp):
"""
Begin retrieving the instances of an archive.
This call should be followed with one or more calls to
perf-archive-get-instances-iter-next.
This call should be preceded by a call to
perf-archive-get-oldest-timestamp, otherwise an error may
be returned if the input timestamp is too old.
:param timestamp: Timestamp (in seconds, Unix Epoch Time) to start.
Range: [0..2^31-1]
"""
return self.request( "perf-archive-get-instances-iter-start", {
'timestamp': [ timestamp, 'timestamp', [ basestring, 'None' ], False ],
}, {
'tag': [ basestring, False ],
} )
def perf_object_list_info(self, filter_data=None):
"""
Get list of performance objects in the system.
:param filter_data: Allows selecting objects from a particular node. An example filter is "node_name=hostname". <br><br>
This element is <b>deprecated</b> in Data ONTAP 8.2 and later as filtering by node is
no longer necessary. Each node contains the same set of object definitions,
so filtering by node yields the same results as not filtering at all.
"""
return self.request( "perf-object-list-info", {
'filter_data': [ filter_data, 'filter-data', [ basestring, 'filter-data' ], False ],
}, {
'objects': [ ObjectInfo, True ],
} )
def perf_archive_modify(self, archive, comment=None, case_number=None):
"""
Modify properties of a Performance Archive
:param archive: Performance Archive Name
:param comment: Performance Archive Comment
:param case_number: Support Case Number Associated with Archive
"""
return self.request( "perf-archive-modify", {
'comment': [ comment, 'comment', [ basestring, 'None' ], False ],
'archive': [ archive, 'archive', [ basestring, 'None' ], False ],
'case_number': [ case_number, 'case-number', [ basestring, 'None' ], False ],
}, {
} )
def perf_object_instance_list_info_iter(self, objectname, filter_data=None, desired_attributes=None, max_records=None, tag=None, query=None):
"""
Gets the list of names and UUIDs of current instances of an
object.
This iterative API will return names and UUIDs of the
current instances
of the specified object; the number of records returned
is specified by
the max-records input element.<p>
To retrieve all the instances of the specified object,
the value of the
next-tag output element should be passed in as the value
of the tag
input element on the subsequent API call.
:param objectname: Name of the object for which to get instance information.
:param filter_data: Filter to apply to instance list; see <a
href='#filter-data'>filter-data</a> type for details.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
:param max_records: The maximum number of records to return in this call.
Default: 1000
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
perf object.
All perf objects matching this query up to 'max-records' will be
returned.
"""
return self.request( "perf-object-instance-list-info-iter", {
'filter_data': [ filter_data, 'filter-data', [ basestring, 'None' ], False ],
'objectname': [ objectname, 'objectname', [ basestring, 'None' ], False ],
'desired_attributes': [ desired_attributes, 'desired-attributes', [ InstanceInfo, 'None' ], False ],
'max_records': max_records,
'tag': tag,
'query': [ query, 'query', [ InstanceInfo, 'None' ], False ],
}, {
'attributes-list': [ InstanceInfo, True ],
} )
| StarcoderdataPython |
1767030 | from twitter.twitter import Twitter # noqa
| StarcoderdataPython |
177427 | <gh_stars>1-10
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import unittest
import json
from libs import time_util
from model import swarming_task_queue_request
class SwarmingTaskQueueRequestTest(unittest.TestCase):
def testCreateDefault(self):
request = swarming_task_queue_request.SwarmingTaskQueueRequest.Create()
self.assertEqual(
request.taskqueue_state,
swarming_task_queue_request.SwarmingTaskQueueState.SCHEDULED)
self.assertEqual(
request.taskqueue_priority,
swarming_task_queue_request.SwarmingTaskQueuePriority.FORCE)
self.assertTrue(
isinstance(request.taskqueue_request_time, datetime.datetime))
self.assertEqual(request.taskqueue_dimensions, None)
self.assertEqual(request.swarming_task_request, None)
def testCreate(self):
request = swarming_task_queue_request.SwarmingTaskQueueRequest.Create(
taskqueue_state=swarming_task_queue_request.SwarmingTaskQueueState.
READY,
taskqueue_priority=swarming_task_queue_request.
SwarmingTaskQueuePriority.FLAKE,
taskqueue_request_time=datetime.datetime(2017, 1, 2),
taskqueue_dimensions='dim',
swarming_task_request='request')
self.assertEqual(request.taskqueue_state,
swarming_task_queue_request.SwarmingTaskQueueState.READY)
self.assertEqual(
request.taskqueue_priority,
swarming_task_queue_request.SwarmingTaskQueuePriority.FLAKE)
self.assertEqual(request.taskqueue_request_time,
datetime.datetime(2017, 1, 2))
self.assertEqual(request.taskqueue_dimensions, 'dim')
self.assertEqual(request.swarming_task_request, 'request')
| StarcoderdataPython |
3594404 | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tool to help developers rebase branches across the Blink rename."""
import argparse
import json
import os
import subprocess
import shutil
import sys
import tempfile
class _DepotToolsNotFoundException(Exception):
pass
def _whereis(name):
"""Find and return the first entry in $PATH containing a file named |name|.
Returns the path if found; otherwise returns nothing.
"""
for path in os.environ['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, name)):
return path
def _find_depot_tools():
"""Attempts to configure and return a wrapper for invoking depot tools.
Returns:
A helper object for invoking depot tools.
Raises:
_DepotToolsNotFoundException: An error occurred trying to find depot tools.
"""
class DepotToolsWrapper(object):
def __init__(self, path):
self.__download_from_google_storage = os.path.join(
path, 'download_from_google_storage.py')
self.__gsutil = os.path.join(path, 'gsutil.py')
def call_download_from_google_storage(self, *args):
"""Runs download_from_google_storage with the given args."""
subprocess.check_call(['python', self.__download_from_google_storage] +
list(args))
def call_gsutil(self, *args):
"""Runs gsutil with the given args."""
subprocess.check_call(['python', self.__gsutil] + list(args))
# Attempt to find download_from_google_storage.py from depot_tools
path = _whereis('download_from_google_storage.py')
if not path:
raise _DepotToolsNotFoundException(
'download_from_google_storage.py not found. Make sure depot_tools is '
'in $PATH.')
# Make sure gsutil.py is in the same location
path2 = _whereis('download_from_google_storage.py')
if not path2:
raise _DepotToolsNotFoundException(
'gsutil.py not found. Make sure depot_tools is in $PATH.')
if path != path2:
raise _DepotToolsNotFoundException(
'download_from_google_storage.py found in %s but gsutil.py found in %s.'
% (path, path2))
return DepotToolsWrapper(path)
class Bootstrapper(object):
"""Helper class for bootstrapping startup of the rebase helper.
Performs update checks and stages any required binaries."""
def __init__(self, depot_tools, components_manifest_name):
"""Bootstrapper constructor.
Args:
depot_tools: a wrapper for invoking depot_tools.
components_manifest_name: The name of the components manifest.
"""
self.__depot_tools = depot_tools
self.__components_manifest_name = components_manifest_name
self.__tmpdir = None
def __enter__(self):
self.__tmpdir = tempfile.mkdtemp()
return self
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.__tmpdir, ignore_errors=True)
def update(self):
"""Performs an update check for various components."""
components = self._get_latest_components()
for name, sha1_hash in components.iteritems():
args = [
'--no_auth', '--no_resume', '-b', 'chromium-blink-rename',
'--extract', sha1_hash
]
if '-' in name:
name, platform = name.split('-', 1)
args.append('-p')
args.append(platform)
args.append('-o')
args.append(os.path.join('staging', '%s.tar.gz' % name))
self.__depot_tools.call_download_from_google_storage(*args)
def _get_latest_components(self):
"""Fetches info about the latest components from google storage.
The return value should be a dict of component names to SHA1 hashes."""
components_path = os.path.join(self.__tmpdir, 'COMPONENTS')
self.__depot_tools.call_gsutil(
'cp', 'gs://chromium-blink-rename/%s' % self.__components_manifest_name,
components_path)
with open(components_path) as f:
return json.loads(f.read())
def main():
# Intentionally suppress help. These are internal testing flags.
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--components-manifest-name', default='COMPONENTS')
parser.add_argument('--pylib-path')
args, remaining_argv = parser.parse_known_args()
script_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(script_dir)
try:
depot_tools = _find_depot_tools()
except _DepotToolsNotFoundException as e:
print e.message
return 1
print 'Checking for updates...'
with Bootstrapper(depot_tools, args.components_manifest_name) as bootstrapper:
bootstrapper.update()
# Import stage 2 and launch it.
tool_pylib = args.pylib_path
if not tool_pylib:
tool_pylib = os.path.abspath(os.path.join(script_dir, 'staging/pylib'))
sys.path.insert(0, tool_pylib)
from blink_rename_merge_helper import driver
# Note: for compatibility with older versions of run.py, set sys.argv to the
# unconsumed args.
sys.argv = sys.argv[:1] + remaining_argv
driver.run()
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
6401361 | from numpy import int64
from randomForest_classifier.predict import make_prediction
from randomForest_classifier.processing.data_management import load_dataset
def test_make_single_prediction():
# Given
test_data = load_dataset(file_name='test.csv')
single_test_input = test_data[0:1]
# When
subject = make_prediction(input_data=single_test_input)
# Then
assert subject is not None
assert isinstance(subject.get('predictions')[0], int64)
assert subject.get('predictions')[0] == 0
def test_make_multiple_predictions():
# Given
test_data = load_dataset(file_name='test.csv')
original_data_length = len(test_data)
multiple_test_input = test_data
# When
subject = make_prediction(input_data=multiple_test_input)
# Then
assert subject is not None
assert len(subject.get('predictions')) == 1649
assert subject.get('predictions').sum() == 1213
# We expect some rows to be filtered out
assert len(subject.get('predictions')) != original_data_length
| StarcoderdataPython |
1862 | <reponame>jgrigera/indico<filename>indico/web/forms/fields/protection.py<gh_stars>1-10
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import absolute_import, unicode_literals
from flask import render_template
from markupsafe import Markup
from indico.core.db import db
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.util.i18n import _
from indico.web.forms.fields import IndicoEnumRadioField
from indico.web.forms.widgets import JinjaWidget
class IndicoProtectionField(IndicoEnumRadioField):
widget = JinjaWidget('forms/protection_widget.html', single_kwargs=True)
radio_widget = JinjaWidget('forms/radio_buttons_widget.html', orientation='horizontal', single_kwargs=True)
def __init__(self, *args, **kwargs):
self.protected_object = kwargs.pop('protected_object')(kwargs['_form'])
get_acl_message_url = kwargs.pop('acl_message_url', None)
self.acl_message_url = get_acl_message_url(kwargs['_form']) if get_acl_message_url else None
self.can_inherit_protection = self.protected_object.protection_parent is not None
if not self.can_inherit_protection:
kwargs['skip'] = {ProtectionMode.inheriting}
super(IndicoProtectionField, self).__init__(*args, enum=ProtectionMode, **kwargs)
def render_protection_message(self):
protected_object = self.get_form().protected_object
if hasattr(protected_object, 'get_non_inheriting_objects'):
non_inheriting_objects = protected_object.get_non_inheriting_objects()
else:
non_inheriting_objects = []
if isinstance(protected_object.protection_parent, db.m.Event):
parent_type = _('Event')
elif isinstance(protected_object.protection_parent, db.m.Category):
parent_type = _('Category')
else:
parent_type = _('Session')
rv = render_template('_protection_info.html', field=self, protected_object=protected_object,
parent_type=parent_type, non_inheriting_objects=non_inheriting_objects)
return Markup(rv)
| StarcoderdataPython |
5060252 | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/schools8_pymc3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="W_0ED20uQKha"
# In this notebook, we fit a hierarchical Bayesian model to the "8 schools" dataset.
# See also https://github.com/probml/pyprobml/blob/master/scripts/schools8_pymc3.py
# + id="HXRokZL1QPvB"
# %matplotlib inline
import sklearn
import scipy.stats as stats
import scipy.optimize
import matplotlib.pyplot as plt
import seaborn as sns
import time
import numpy as np
import os
import pandas as pd
# + id="C5EHDB-rQSIa" colab={"base_uri": "https://localhost:8080/"} outputId="d6d8b024-96ba-4014-97d9-ddef6d88349e"
# !pip install -U pymc3>=3.8
import pymc3 as pm
print(pm.__version__)
import theano.tensor as tt
import theano
# #!pip install arviz
import arviz as az
# + id="sKlvHNY6RUaP"
# !mkdir ../figures
# + [markdown] id="-jby_J17HqBT"
# # Data
# + id="8pNC3UANQjeO" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="8f91ec2e-e81b-452b-dcf7-8c9f6ddda82a"
# https://github.com/probml/pyprobml/blob/master/scripts/schools8_pymc3.py
# Data of the Eight Schools Model
J = 8
y = np.array([28., 8., -3., 7., -1., 1., 18., 12.])
sigma = np.array([15., 10., 16., 11., 9., 11., 10., 18.])
print(np.mean(y))
print(np.median(y))
names=[];
for t in range(8):
names.append('{}'.format(t));
# Plot raw data
fig, ax = plt.subplots()
y_pos = np.arange(8)
ax.errorbar(y,y_pos, xerr=sigma, fmt='o')
ax.set_yticks(y_pos)
ax.set_yticklabels(names)
ax.invert_yaxis() # labels read top-to-bottom
plt.title('8 schools')
plt.savefig('../figures/schools8_data.png')
plt.show()
# + [markdown] id="vcAdKbnXHsKE"
# # Centered model
# + id="-Lxa_JgfQmAI" colab={"base_uri": "https://localhost:8080/", "height": 723} outputId="573cdde1-a178-4949-de75-af036d02f6dd"
# Centered model
with pm.Model() as Centered_eight:
mu_alpha = pm.Normal('mu_alpha', mu=0, sigma=5)
sigma_alpha = pm.HalfCauchy('sigma_alpha', beta=5)
alpha = pm.Normal('alpha', mu=mu_alpha, sigma=sigma_alpha, shape=J)
obs = pm.Normal('obs', mu=alpha, sigma=sigma, observed=y)
log_sigma_alpha = pm.Deterministic('log_sigma_alpha', tt.log(sigma_alpha))
np.random.seed(0)
with Centered_eight:
trace_centered = pm.sample(1000, chains=4, return_inferencedata=False)
pm.summary(trace_centered).round(2)
# PyMC3 gives multiple warnings about divergences
# Also, see r_hat ~ 1.01, ESS << nchains*1000, especially for sigma_alpha
# We can solve these problems below by using a non-centered parameterization.
# In practice, for this model, the results are very similar.
# + id="pOrDPo_lQob_" colab={"base_uri": "https://localhost:8080/"} outputId="0cbd7421-2754-43c2-a468-7250ae30b8d1"
# Display the total number and percentage of divergent chains
diverging = trace_centered['diverging']
print('Number of Divergent Chains: {}'.format(diverging.nonzero()[0].size))
diverging_pct = diverging.nonzero()[0].size / len(trace_centered) * 100
print('Percentage of Divergent Chains: {:.1f}'.format(diverging_pct))
# + id="bYbhbC-kT8GV" outputId="77b27048-57ad-456c-f6ea-7bbeee7d1d94" colab={"base_uri": "https://localhost:8080/"}
dir(trace_centered)
# + id="9ODVo7cLUKs8" outputId="505c9b7c-6b7f-4b12-be22-c67809d19641" colab={"base_uri": "https://localhost:8080/"}
trace_centered.varnames
# + id="gClLFgqHVuW1" outputId="7447a76c-0e85-4d11-ca0a-fd24babe57dd" colab={"base_uri": "https://localhost:8080/", "height": 356}
with Centered_eight:
#fig, ax = plt.subplots()
az.plot_autocorr(trace_centered, var_names=['mu_alpha', 'sigma_alpha'], combined=True);
plt.savefig('schools8_centered_acf_combined.png', dpi=300)
# + id="uWPD88BxTkMj" outputId="ed94b053-2ebc-41f1-91c3-12f0d7eec423" colab={"base_uri": "https://localhost:8080/", "height": 452}
with Centered_eight:
#fig, ax = plt.subplots()
az.plot_autocorr(trace_centered, var_names=['mu_alpha', 'sigma_alpha']);
plt.savefig('schools8_centered_acf.png', dpi=300)
# + id="Uv1QEiQOQtGc" colab={"base_uri": "https://localhost:8080/", "height": 370} outputId="7ce96252-9002-4f18-a64c-c55046f5415d"
with Centered_eight:
az.plot_forest(trace_centered, var_names="alpha",
hdi_prob=0.95, combined=True);
plt.savefig('schools8_centered_forest_combined.png', dpi=300)
# + id="cgzmwxVGZxub" outputId="8979ca4c-d9df-43bb-847e-bad33b2258bb" colab={"base_uri": "https://localhost:8080/", "height": 542}
with Centered_eight:
az.plot_forest(trace_centered, var_names="alpha",
hdi_prob=0.95, combined=False);
plt.savefig('schools8_centered_forest.png', dpi=300)
# + [markdown] id="BkphbYr_HxOj"
# # Non-centered
# + id="jLFiQS0ZQvR4" colab={"base_uri": "https://localhost:8080/", "height": 905} outputId="8c0caa4b-4aa4-4685-f8ef-ef23ba60b82c"
# Non-centered parameterization
with pm.Model() as NonCentered_eight:
mu_alpha = pm.Normal('mu_alpha', mu=0, sigma=5)
sigma_alpha = pm.HalfCauchy('sigma_alpha', beta=5)
alpha_offset = pm.Normal('alpha_offset', mu=0, sigma=1, shape=J)
alpha = pm.Deterministic('alpha', mu_alpha + sigma_alpha * alpha_offset)
#alpha = pm.Normal('alpha', mu=mu_alpha, sigma=sigma_alpha, shape=J)
obs = pm.Normal('obs', mu=alpha, sigma=sigma, observed=y)
log_sigma_alpha = pm.Deterministic('log_sigma_alpha', tt.log(sigma_alpha))
np.random.seed(0)
with NonCentered_eight:
trace_noncentered = pm.sample(1000, chains=4)
pm.summary(trace_noncentered).round(2)
# Samples look good: r_hat = 1, ESS ~= nchains*1000
# + id="RyB5Qu-MQxuM" colab={"base_uri": "https://localhost:8080/", "height": 356} outputId="4a21b628-5b80-4ae4-a148-a208f33d6d43"
with NonCentered_eight:
az.plot_autocorr(trace_noncentered, var_names=['mu_alpha', 'sigma_alpha'], combined=True);
plt.savefig('schools8_noncentered_acf_combined.png', dpi=300)
# + id="JHmvYgsAQzuK" colab={"base_uri": "https://localhost:8080/", "height": 370} outputId="5ed95cc6-49b8-4bc6-acca-59f7c5f5c06b"
with NonCentered_eight:
az.plot_forest(trace_noncentered, var_names="alpha",
combined=True, hdi_prob=0.95);
plt.savefig('schools8_noncentered_forest_combined.png', dpi=300)
# + id="vb8tzwUhXlW0" colab={"base_uri": "https://localhost:8080/", "height": 568} outputId="efad1751-55c1-4d1d-97b8-198f67af8935"
az.plot_forest([trace_centered, trace_noncentered], model_names=['centered', 'noncentered'],
var_names="alpha",
combined=True, hdi_prob=0.95);
plt.axvline(np.mean(y), color='k', linestyle='--')
# + id="JETMmNSuZUV7" colab={"base_uri": "https://localhost:8080/", "height": 647} outputId="835e3d2c-7874-41b5-d22e-d64e18fae9ab"
az.plot_forest([trace_centered, trace_noncentered], model_names=['centered', 'noncentered'],
var_names="alpha", kind='ridgeplot',
combined=True, hdi_prob=0.95);
# + [markdown] id="Q_SYYgL0H13G"
# # Funnel of hell
# + id="E3CtP2kcT4s5" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="17af872c-3d56-48e6-be05-a5aab0b4aa39"
# Plot the "funnel of hell"
# Based on
# https://github.com/twiecki/WhileMyMCMCGentlySamples/blob/master/content/downloads/notebooks/GLM_hierarchical_non_centered.ipynb
fig, axs = plt.subplots(ncols=2, sharex=True, sharey=True)
x = pd.Series(trace_centered['mu_alpha'], name='mu_alpha')
y = pd.Series(trace_centered['log_sigma_alpha'], name='log_sigma_alpha')
axs[0].plot(x, y, '.');
axs[0].set(title='Centered', xlabel='µ', ylabel='log(sigma)');
#axs[0].axhline(0.01)
x = pd.Series(trace_noncentered['mu_alpha'], name='mu')
y = pd.Series(trace_noncentered['log_sigma_alpha'], name='log_sigma_alpha')
axs[1].plot(x, y, '.');
axs[1].set(title='NonCentered', xlabel='µ', ylabel='log(sigma)');
#axs[1].axhline(0.01)
plt.savefig('schools8_funnel.png', dpi=300)
xlim = axs[0].get_xlim()
ylim = axs[0].get_ylim()
# + id="EMOdWlU-Q13N" colab={"base_uri": "https://localhost:8080/", "height": 953} outputId="0125ea26-646a-4b29-8a69-7fc508ac5d66"
x = pd.Series(trace_centered['mu_alpha'], name='mu')
y = pd.Series(trace_centered['log_sigma_alpha'], name='log sigma_alpha')
sns.jointplot(x, y, xlim=xlim, ylim=ylim);
plt.suptitle('centered')
plt.savefig('schools8_centered_joint.png', dpi=300)
x = pd.Series(trace_noncentered['mu_alpha'], name='mu')
y = pd.Series(trace_noncentered['log_sigma_alpha'], name='log sigma_alpha')
sns.jointplot(x, y, xlim=xlim, ylim=ylim);
plt.suptitle('noncentered')
plt.savefig('schools8_noncentered_joint.png', dpi=300)
# + id="qAfA7fIWWN9B" colab={"base_uri": "https://localhost:8080/", "height": 351} outputId="9a307f3d-bee9-4ce9-e219-c7b847dc5f78"
group = 0
fig, axs = plt.subplots(ncols=2, sharex=True, sharey=True, figsize=(10,5))
x = pd.Series(trace_centered['alpha'][:, group], name=f'alpha {group}')
y = pd.Series(trace_centered['log_sigma_alpha'], name='log_sigma_alpha')
axs[0].plot(x, y, '.');
axs[0].set(title='Centered', xlabel=r'$\alpha_0$', ylabel=r'$\log(\sigma_\alpha)$');
x = pd.Series(trace_noncentered['alpha'][:,group], name=f'alpha {group}')
y = pd.Series(trace_noncentered['log_sigma_alpha'], name='log_sigma_alpha')
axs[1].plot(x, y, '.');
axs[1].set(title='NonCentered', xlabel=r'$\alpha_0$', ylabel=r'$\log(\sigma_\alpha)$');
xlim = axs[0].get_xlim()
ylim = axs[0].get_ylim()
plt.savefig('schools8_funnel_group0.png', dpi=300)
# + id="4AOjRfRijXeA"
| StarcoderdataPython |
6540926 | <reponame>power-edge/mlb_statsapi_etl
"""
created by nikos at 4/29/21
"""
import unittest
from .base_test_mixin import ModelTestMixin, sleep_after_get
class TestSeasonModel(unittest.TestCase, ModelTestMixin):
from mlb_statsapi.model.api.season import SeasonModel as Mod
def setUp(self) -> None:
# noinspection PyTypeChecker
self.doSetUp(self)
# def test_get_seasons(self):
# self.dump(self.api_doc.seasons(
# path_params={'seasonId': '2012'},
# query_params={'sportId': 1}
# ).get().obj)
@sleep_after_get()
def test_get_allSeasons(self):
self.dump(self.api_doc.allSeasons(
query_params={'sportId': 1}
).get().obj)
@sleep_after_get()
def test_get_season(self):
self.dump(self.api_doc.season(
path_params={'seasonId': 2017},
query_params={'sportId': 1}
).get().obj)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6407052 | <filename>Python/DeepSSMUtilsPackage/DeepSSMUtils/TorchLoaders.py<gh_stars>0
import os
import numpy as np
import itk
import csv
import random
import subprocess
import torch
from torch import nn
from torch.utils.data import DataLoader
######################## Data loading functions ####################################
'''
Reads csv and makes train and validation data loaders
'''
def getTrainValLoaders(loader_dir, data_csv, batch_size=1, down_sample=False):
if not os.path.exists(loader_dir):
os.makedirs(loader_dir)
images, scores, models, prefixes = getAllTrainData(loader_dir, data_csv, down_sample)
images, scores, models, prefixes = shuffleData(images, scores, models, prefixes)
# split into train (80%) validation(20%)
cut = int(len(images)*.80)
print("\nTurning to tensors...")
train_data = DeepSSMdataset(images[:cut], scores[:cut], models[:cut])
print(str(len(train_data)) + ' in training set')
val_data = DeepSSMdataset(images[cut:], scores[cut:], models[cut:])
print(str(len(val_data)) + ' in validation set')
print("\nCreating and saving dataloaders...")
trainloader = DataLoader(
train_data,
batch_size=batch_size,
shuffle=True,
num_workers=8,
pin_memory=torch.cuda.is_available()
)
train_path = loader_dir + 'train'
torch.save(trainloader, train_path)
print("Train loader done.")
validationloader = DataLoader(
val_data,
batch_size=1,
shuffle=True,
num_workers=8,
pin_memory=torch.cuda.is_available()
)
val_path = loader_dir + 'validation'
torch.save(validationloader, val_path)
print("Val loader done.")
return train_path, val_path
'''
Makes test data loader
'''
def getTestLoader(loader_dir, test_img_list, down_sample):
# get data
image_paths = []
scores = []
models = []
test_names = []
for index in range(len(test_img_list)):
image_path = test_img_list[index]
# add name
prefix = getPrefix(image_path)
test_names.append(prefix)
image_paths.append(image_path)
# add label placeholders
scores.append([])
models.append([])
images = getImages(loader_dir, image_paths, down_sample)
test_data = DeepSSMdataset(images, scores, models)
# Write test names to file so they are saved somewhere
name_file = open(loader_dir + 'test_names.txt', 'w+')
name_file.write(str(test_names))
name_file.close()
print("Test names saved to: " + loader_dir + "test_names.txt")
# Make loader
print("Creating and saving test dataloader...")
testloader = DataLoader(
test_data,
batch_size=1,
shuffle=False,
num_workers=8,
pin_memory=torch.cuda.is_available()
)
test_path = loader_dir + 'test'
torch.save(testloader, test_path)
print("Done.")
return test_path, test_names
'''
returns images, scores, models, prefixes from CSV
'''
def getAllTrainData(loader_dir, data_csv, down_sample):
# get all data and targets
print("Reading all data...")
image_paths = []
scores = []
models = []
prefixes = []
with open(data_csv, newline='') as csvfile:
datareader = csv.reader(csvfile)
index = 0
for row in datareader:
image_path = row[0]
model_path = row[1]
pca_scores = row[2:]
# add name
prefix = getPrefix(image_path)
# data error check
if prefix not in getPrefix(model_path):
print("Error: Images and models mismatched in csv.")
print(index)
print(prefix)
print(getPrefix(model_path))
exit()
prefixes.append(prefix)
# add image path
image_paths.append(image_path)
# add score (un-normalized)
pca_scores = [float(i) for i in pca_scores]
scores.append(pca_scores)
# add model
mdl = getParticles(model_path)
models.append(mdl)
index += 1
images = getImages(loader_dir, image_paths, down_sample)
scores = whitenPCAscores(scores, loader_dir)
return images, scores, models, prefixes
'''
Shuffle all data
'''
def shuffleData(images, scores, models, prefixes):
print("Shuffling.")
c = list(zip(images, scores, models, prefixes))
random.shuffle(c)
images, scores, models, prefixes = zip(*c)
return images, scores, models, prefixes
'''
Class for DeepSSM datasets that works with Pytorch DataLoader
'''
class DeepSSMdataset():
def __init__(self, img, pca_target, mdl_target):
self.img = torch.FloatTensor(img)
self.pca_target = torch.FloatTensor(pca_target)
self.mdl_target = torch.FloatTensor(mdl_target)
def __getitem__(self, index):
x = self.img[index]
y1 = self.pca_target[index]
y2 = self.mdl_target[index]
return x, y1, y2
def __len__(self):
return len(self.img)
'''
getTorchDataLoaderHelper
returns sample prefix from path string
'''
def getPrefix(path):
file_name = os.path.basename(path)
prefix = "_".join(file_name.split("_")[:2])
prefix = prefix.split(".")[0]
return prefix
'''
getTorchDataLoaderHelper
get list from .particles format
'''
def getParticles(model_path):
f = open(model_path, "r")
data = []
for line in f.readlines():
points = line.replace(' \n','').split(" ")
points = [float(i) for i in points]
data.append(points)
return(data)
'''
getTorchDataLoaderHelper
reads .nrrd files and returns whitened data
'''
def getImages(loader_dir, image_list, down_sample):
# get all images
all_images = []
for image_path in image_list:
if down_sample:
img = downSample(image_path)
else:
image = itk.imread(image_path, itk.F)
img = itk.GetArrayFromImage(image)
all_images.append(img)
all_images = np.array(all_images)
# get mean and std
mean_path = loader_dir + 'mean_img.npy'
std_path = loader_dir + 'std_img.npy'
if not os.path.exists(mean_path) or not os.path.exists(std_path):
mean_image = np.mean(all_images)
std_image = np.std(all_images)
np.save(mean_path, mean_image)
np.save(std_path, std_image)
else:
mean_image = np.load(mean_path)
std_image = np.load(std_path)
# normlaize
norm_images = []
for image in all_images:
norm_images.append([(image-mean_image)/std_image])
return norm_images
'''
Decreases the size of the image to 3/4 it's original size
'''
def downSample(image_path):
path = os.path.dirname(image_path)
temp_path = path + "/temp.nrrd"
cmd = ["shapeworks",
"read-image", "--name", image_path,
"image-info", "--size", str(True)]
output = subprocess.run(cmd, capture_output=True, text=True).stdout.splitlines()
size = makeVector(output[0].split(":")[1])
sizex = int(3*size[0]/4)
sizey = int(3*size[1]/4)
sizez = int(3*size[2]/4)
cmd = ["shapeworks",
"read-image", "--name", image_path,
"resize", "--sizex", str(sizex),
"--sizey", str(sizey),
"--sizez", str(sizez),
"write-image", "--name", temp_path]
subprocess.check_call(cmd)
image = itk.imread(temp_path, itk.F)
img = itk.GetArrayFromImage(image)
os.remove(temp_path)
return img
def makeVector(str):
arr = np.array(str.replace("[", "").replace("]", "").split(","))
return np.asarray(arr, np.float64)
'''
getTorchDataLoaderHelper
normalizes PCA scores, returns mean and std for reconstruction
'''
def whitenPCAscores(scores, loader_dir):
scores = np.array(scores)
mean_score = np.mean(scores, axis=0)
std_score = np.std(scores, axis=0)
np.save(loader_dir + 'mean_PCA.npy', mean_score)
np.save(loader_dir + 'std_PCA.npy', std_score)
norm_scores = []
for score in scores:
norm_scores.append((score-mean_score)/std_score)
return norm_scores | StarcoderdataPython |
28936 | #!/usr/bin/env python3
import argparse
import gc
import numpy as np
import os
import pandas as pd
import pysam
# Number of SVs to process before resetting pysam (close and re-open file). Avoids a memory leak in pysam.
PYSAM_RESET_INTERVAL = 1000
def get_read_depth(df_subset, bam_file_name, mapq, ref_filename=None):
"""
Get read depths over one or more breakpoints.
:param df_subset: Subset dataframe with a column for contigs (first column) and one or more columns for the
location of breakpoints to quantify.
:param bam_file_name: Name of alignment file to query.
:param mapq: Minimum mapping quality.
:return: A Series with with one element for each row of `df_subset` containing the average of read depths over
the breakpoints for each variant.
"""
# Init pysam query count (for memory leak prevention)
pysam_count = 0
bam_file = pysam.AlignmentFile(bam_file_name, 'r', reference_filename=ref_filename)
# Init dataframe
df_subset = df_subset.copy()
n_loc_cols = df_subset.shape[1] - 1 # Number of location columns; depth is averaged for each
df_subset.columns = ['CONTIG'] + ['LOC_{}'.format(col) for col in range(n_loc_cols)]
# Init count
df_subset['N'] = np.zeros(df_subset.shape[0], np.float64)
n_index = df_subset.shape[1] - 1
# Count
for subset_index in range(n_loc_cols):
# Use numeric index, skip chromosome column
subset_index += 1
for row_index in range(df_subset.shape[0]):
n_reads = 0
# Get position
contig = df_subset.iloc[row_index, 0]
pos = df_subset.iloc[row_index, subset_index]
# Reset pysam periodically (avoids memory leak)
pysam_count += 1
if pysam_count >= PYSAM_RESET_INTERVAL:
if bam_file is not None:
bam_file.close()
gc.collect()
bam_file = pysam.AlignmentFile(bam_file_name, 'r', reference_filename=ref_filename)
pysam_count = 0
# Count
for segment in bam_file.fetch(str(contig), pos, pos + 1):
if segment.mapping_quality >= mapq and segment.is_proper_pair:
n_reads += 1
df_subset.iloc[row_index, n_index] += n_reads
# Return mean of depths (divide by the number of locations)
return df_subset['N'] / n_loc_cols
def get_ref_contig_sizes(altref_file):
"""
Get a Series of contigs lengths. Includes primary and alt contigs.
:param altref_file: BED file of contig information where each record spans the whole contig. Must contain
columns "#CHROM" and "END".
:return: Series of contig lengths indexed by the contig name.
"""
# Get reference chromosome sizes
ref_len_series = pd.read_table(altref_file, header=0)
ref_len_series.index = ref_len_series['#CHROM']
ref_len_series = ref_len_series['END']
return ref_len_series
def annotate_variant_info(variant_table, ref_len_series, flank):
"""
Annotate variant info with locations reads will be extracted from.
:param variant_table: Variant info table.
:param ref_len_series: Series of contig sizes.
:param flank: Number of bases from variant breakpoints.
:return: `variant_table` with additional fields.
"""
# Annotate variant info with flank locations
variant_table['FLANK_L_REF'] = variant_table['POS'] - flank
variant_table['FLANK_L_REF'] = variant_table['FLANK_L_REF'].apply(lambda pos: pos if pos > 0 else 0)
variant_table['FLANK_R_REF'] = variant_table['END'] + flank
variant_table['FLANK_R_REF'] = variant_table.apply(lambda row: min(row['FLANK_R_REF'], ref_len_series[row['#CHROM']]), axis=1)
variant_table['FLANK_L_CTG'] = variant_table['CONTIG_START'] - flank
variant_table['FLANK_L_CTG'] = variant_table['FLANK_L_CTG'].apply(lambda pos: pos if pos > 0 else 0)
variant_table['FLANK_R_CTG'] = variant_table['CONTIG_END'] + flank
variant_table['FLANK_R_CTG'] = variant_table.apply(lambda row: min(row['FLANK_R_CTG'], ref_len_series[row['CONTIG']]), axis=1)
# Annotate with the midpoint of the variant sequence
variant_table['VAR_CONTIG'] = variant_table.apply(lambda row: row['#CHROM'] if row['SVTYPE'] == 'DEL' else row['CONTIG'], axis=1)
variant_table['VAR_MIDPOINT'] = variant_table.apply(
lambda row:
(row['POS'] + row['END']) / 2 if row['SVTYPE'] == 'DEL' else (row['CONTIG_START'] + row['CONTIG_END']) / 2,
axis=1)
variant_table['VAR_MIDPOINT'] = variant_table['VAR_MIDPOINT'].astype(np.int64)
return variant_table
# Main
if __name__ == '__main__':
# Get arguments
arg_parser = argparse.ArgumentParser(description='Get insert size deltas on the reference over the SV breakpoints.')
arg_parser.add_argument('bam', help='BAM file of short read alignments.')
arg_parser.add_argument('bed', help='SV info BED file with columns "#CHROM", "POS", "END", "SVTYPE", "CONTIG", '
'"CONTIG_START", and "CONTIG_END", including a header line.')
arg_parser.add_argument('alt_info', help='BED file of contigs in the reference.')
arg_parser.add_argument('out', help='Output file.')
arg_parser.add_argument('--out_stats',
help='Output depth distribution statistics.')
arg_parser.add_argument('--mapq', type=int, default=20,
help='Minimum mapping quality of aligned reads.')
arg_parser.add_argument('--flank', type=int, default=100,
help='Number of reference bases on each side of the SV for flanking regions.')
arg_parser.add_argument('--ref', nargs='?',
default=None, help='Reference for records are aligned against.')
args = arg_parser.parse_args()
# Check arguments
if not os.path.isfile(args.bam):
raise RuntimeError('Input BAM file does not exist or is not a regular file: {}'.format(args.bam))
if args.mapq < 0:
raise RuntimeError('Mapping quality is negative: {}'.format(args.mapq))
if args.flank < 0:
raise RuntimeError('Flank is negative: {}'.format(args.flank))
args.out = args.out.strip()
if not args.out:
raise RuntimeError('Output file name is empty.')
# Get variant info
df_bed = pd.read_table(args.bed, header=0)
# Get reference chromosome sizes
ref_len = get_ref_contig_sizes(args.alt_info)
# Annotate variant info with locations reads are extracted from
df_bed = annotate_variant_info(df_bed, ref_len, args.flank)
# Count reads over variant midpoint
df_bed['DP_N_VAR'] =\
get_read_depth(df_bed.loc[:, ['VAR_CONTIG', 'VAR_MIDPOINT']], args.bam, args.mapq, ref_filename=args.ref)
# Count reads over reference flank
df_bed['DP_N_PROX_REF'] =\
get_read_depth(df_bed.loc[:, ['#CHROM', 'FLANK_L_REF', 'FLANK_R_REF']], args.bam, args.mapq, ref_filename=args.ref)
# Count reads over contig flank
df_bed['DP_N_PROX_CTG'] =\
get_read_depth(df_bed.loc[:, ['CONTIG', 'FLANK_L_CTG', 'FLANK_R_CTG']], args.bam, args.mapq, ref_filename=args.ref)
# Get global stats
ref_mean = np.mean(df_bed['DP_N_PROX_REF'])
ref_sd = np.std(df_bed['DP_N_PROX_REF'])
if ref_mean == 0:
raise RuntimeError('Cannot compute global depth stats: Global mean of proximal reference breakpoint depths is 0')
# Combine total depths
df_bed['DP_N_VAR_PROX_REF'] = df_bed['DP_N_VAR'] + df_bed['DP_N_PROX_REF']
df_bed['DP_N_VAR_PROX_CTG'] = df_bed['DP_N_VAR'] + df_bed['DP_N_PROX_CTG']
# Set relative ratios
df_bed['DP_VAR_REF'] = df_bed.apply(
lambda row: row['DP_N_VAR'] / row['DP_N_VAR_PROX_REF'] if row['DP_N_VAR_PROX_REF'] > 0 else 0,
axis=1
)
df_bed['DP_VAR_CTG'] = df_bed.apply(
lambda row: row['DP_N_VAR'] / row['DP_N_VAR_PROX_CTG'] if row['DP_N_VAR_PROX_CTG'] > 0 else 0,
axis=1
)
df_bed['DP_VAR_GLOBAL'] = df_bed['DP_N_VAR'] / ref_mean
# Write
df_features = df_bed.loc[
:, ('INDEX', 'DP_VAR_REF', 'DP_VAR_CTG', 'DP_VAR_GLOBAL', 'DP_N_VAR', 'DP_N_PROX_REF', 'DP_N_PROX_CTG')
]
df_features.to_csv(
args.out, sep='\t', index=False
)
# Write stats
if args.out_stats:
with open(args.out_stats, 'w') as stats_out:
stats_out.write('ref_mean\t{:.6f}\n'.format(ref_mean))
stats_out.write('ref_sd\t{:.6f}\n'.format(ref_sd))
| StarcoderdataPython |
79696 | <reponame>Benjamin-Fouquet/Processing-scripts
#!/bin/env python
"""
Simple VTK example in Python to load an STL mesh and display with a manipulator.
<NAME>, 2014-01-28, (c) 2014
"""
import vtk
def render():
# Create a rendering window and renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# Create a RenderWindowInteractor to permit manipulating the camera
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
style = vtk.vtkInteractorStyleTrackballCamera()
iren.SetInteractorStyle(style)
stlFilename = "magnolia.stl"
polydata = loadStl(stlFilename)
ren.AddActor(polyDataToActor(polydata))
ren.SetBackground(0.1, 0.1, 0.1)
# enable user interface interactor
iren.Initialize()
renWin.Render()
iren.Start()
def loadStl(fname):
"""Load the given STL file, and return a vtkPolyData object for it."""
reader = vtk.vtkSTLReader()
reader.SetFileName(fname)
reader.Update()
polydata = reader.GetOutput()
return polydata
def polyDataToActor(polydata):
"""Wrap the provided vtkPolyData object in a mapper and an actor, returning
the actor."""
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
#mapper.SetInput(reader.GetOutput())
mapper.SetInput(polydata)
else:
mapper.SetInputConnection(polydata.GetProducerPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
#actor.GetProperty().SetRepresentationToWireframe()
actor.GetProperty().SetColor(0.5, 0.5, 1.0)
return actor
render()
| StarcoderdataPython |
1789555 | <reponame>Guya-LTD/user
# -*- coding: utf-8 -*-
"""Copyright Header Details
Copyright
---------
Copyright (C) Guya , PLC - All Rights Reserved (As Of Pending...)
Unauthorized copying of this file, via any medium is strictly prohibited
Proprietary and confidential
LICENSE
-------
This file is subject to the terms and conditions defined in
file 'LICENSE.txt', which is part of this source code package.
Authors
-------
* [<NAME>](https://github.com/Simonbelete)
Project
-------
* Name:
- Guya E-commerce & Guya Express
* Sub Project Name:
- User Service
* Description
- User Service for Guya
"""
import pytest
from faker import Faker
from .factory.user_factory import UserFactory, Session
from .factory.credential_factory import CredentialFactory
class TestUserPersistence():
def setup_class(self):
# Prepeare a new, clean session
self.session = Session()
# init faker object
self.faker = Faker()
def test_creation(self):
user = UserFactory()
user.credential = CredentialFactory()
assert user.id != None
def test_name_with_empty_string(self):
with pytest.raises(ValueError):
UserFactory(name = '')
def test_name_with_invalid_charset(self):
with pytest.raises(ValueError):
UserFactory(name = '-' + self.faker.name())
def test_email_with_empty_string(self):
with pytest.raises(ValueError):
UserFactory(email = '')
#def test_email_with_invalid_charset(self):
#with pytest.raises(ValueError):
#UserFactory(email = '*' + self.faker.email())
def test_email_with_out_at_sign(self):
with pytest.raises(ValueError):
UserFactory(email = self.faker.first_name())
def test_pnum_with_less_charset_length(slef):
with pytest.raises(ValueError):
UserFactory(pnum = '1234567')
| StarcoderdataPython |
99681 | import asyncio
import logging
import socket
import websockets
from gabriel_protocol import gabriel_pb2
from collections import namedtuple
URI_FORMAT = 'ws://{host}:{port}'
logger = logging.getLogger(__name__)
websockets_logger = logging.getLogger(websockets.__name__)
# The entire payload will be printed if this is allowed to be DEBUG
websockets_logger.setLevel(logging.INFO)
ProducerWrapper = namedtuple('ProducerWrapper', ['producer', 'source_name'])
# It isn't necessary to do this as of Python 3.6 because
# "The socket option TCP_NODELAY is set by default for all TCP connections"
# per https://docs.python.org/3/library/asyncio-eventloop.html
# However, this seems worth keeping in case the default behavior changes.
class NoDelayProtocol(websockets.client.WebSocketClientProtocol):
def connection_made(self, transport: asyncio.BaseTransport) -> None:
super().connection_made(transport)
sock = transport.get_extra_info('socket')
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
class WebsocketClient:
def __init__(self, host, port, producer_wrappers, consumer):
'''
producer should take no arguments and return an instance of
gabriel_pb2.InputFrame.
consumer should take one gabriel_pb2.ResultWrapper and does not need to
return anything.
'''
self._welcome_event = asyncio.Event()
self._sources = {}
self._running = True
self._uri = URI_FORMAT.format(host=host, port=port)
self.producer_wrappers = producer_wrappers
self.consumer = consumer
def launch(self, message_max_size=None):
event_loop = asyncio.get_event_loop()
try:
self._websocket = event_loop.run_until_complete(
websockets.connect(self._uri, create_protocol=NoDelayProtocol,
max_size=message_max_size))
except ConnectionRefusedError:
logger.error('Could not connect to server')
return
# We don't waste time checking TCP_NODELAY in production.
# Note that websocket.transport is an undocumented property.
# sock = self._websocket.transport.get_extra_info('socket')
# assert(sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) == 1)
consumer_task = asyncio.ensure_future(self._consumer_handler())
tasks = [
asyncio.ensure_future(self._producer_handler(
producer_wrapper.producer, producer_wrapper.source_name))
for producer_wrapper in self.producer_wrappers
]
tasks.append(consumer_task)
_, pending = event_loop.run_until_complete(asyncio.wait(
tasks, return_when=asyncio.FIRST_COMPLETED))
for task in pending:
task.cancel()
logger.info('Disconnected From Server')
def get_source_names(self):
return self._sources.keys()
def stop(self):
self._running = False
logger.info('stopping server')
async def _consumer_handler(self):
while self._running:
try:
raw_input = await self._websocket.recv()
except websockets.exceptions.ConnectionClosed:
return # stop the handler
logger.debug('Recieved input from server')
to_client = gabriel_pb2.ToClient()
to_client.ParseFromString(raw_input)
if to_client.HasField('welcome'):
self._process_welcome(to_client.welcome)
elif to_client.HasField('response'):
self._process_response(to_client.response)
else:
raise Exception('Empty to_client message')
def _process_welcome(self, welcome):
for source_name in welcome.sources_consumed:
self._sources[source_name] = _Source(welcome.num_tokens_per_source)
self._welcome_event.set()
def _process_response(self, response):
result_wrapper = response.result_wrapper
if (result_wrapper.status == gabriel_pb2.ResultWrapper.SUCCESS):
self.consumer(result_wrapper)
elif (result_wrapper.status ==
gabriel_pb2.ResultWrapper.NO_ENGINE_FOR_SOURCE):
raise Exception('No engine for source')
else:
status = result_wrapper.Status.Name(result_wrapper.status)
logger.error('Output status was: %s', status)
if response.return_token:
self._sources[response.source_name].return_token()
async def _producer_handler(self, producer, source_name):
'''
Loop waiting until there is a token available. Then calls producer to
get the gabriel_pb2.InputFrame to send.
'''
await self._welcome_event.wait()
source = self._sources.get(source_name)
assert source is not None, (
"No engines consume frames from source: {}".format(source_name))
while self._running:
await source.get_token()
input_frame = await producer()
if input_frame is None:
source.return_token()
logger.info('Received None from producer')
continue
from_client = gabriel_pb2.FromClient()
from_client.frame_id = source.get_frame_id()
from_client.source_name = source_name
from_client.input_frame.CopyFrom(input_frame)
try:
await self._send_from_client(from_client)
except websockets.exceptions.ConnectionClosed:
return # stop the handler
logger.debug('num_tokens for %s is now %d', source_name,
source.get_num_tokens())
source.next_frame()
async def _send_from_client(self, from_client):
# Removing this method will break measurement_client
await self._websocket.send(from_client.SerializeToString())
class _Source:
def __init__(self, num_tokens):
self._num_tokens = num_tokens
self._event = asyncio.Event()
self._frame_id = 0
def return_token(self):
self._num_tokens += 1
self._event.set()
async def get_token(self):
while self._num_tokens < 1:
logger.debug('Waiting for token')
self._event.clear() # Clear because we definitely want to wait
await self._event.wait()
self._num_tokens -= 1
def get_num_tokens(self):
return self._num_tokens
def get_frame_id(self):
return self._frame_id
def next_frame(self):
self._frame_id += 1
| StarcoderdataPython |
3288279 | #!/usr/bin/python
# $1 The readme to be transformed
# $2 brief description
# $pwd: dest dir
import sys
import os.path
def readFirst(line, brief, out):
if line[0:2] != "# ":
raise ValueError("Expected first line to start with '# '")
# skip the first line
if brief is not None:
out.write(line + "\n");
out.write("@brief " + brief + "\n\n")
out.write("[TOC]\n\n")
readCounter = 0
def readMore(line, label, offset, out):
global readCounter
if line[0:2] == "##":
out.write(line[1:] + " {{#qtautoupdater_{}_label_{}}}\n".format(label, readCounter))
readCounter += 1
else:
out.write(line + "\n")
#read args
readme = sys.argv[1]
brief = sys.argv[2] if len(sys.argv) > 2 else None
doxme = os.path.basename(readme)
label, _ = os.path.splitext(doxme)
inFile = open(readme, "r")
outFile = open(doxme, "w")
isFirst = True
for line in inFile:
if isFirst:
readFirst(line[:-1], brief, outFile)
isFirst = False
else:
readMore(line[:-1], label, 1 if brief is None else 0, outFile)
inFile.close();
outFile.close();
| StarcoderdataPython |
4827684 | <filename>homeassistant/components/zha/core/channels/protocol.py
"""
Protocol channels module for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import logging
import zigpy.zcl.clusters.protocol as protocol
from .. import registries
from ..channels import ZigbeeChannel
_LOGGER = logging.getLogger(__name__)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.AnalogInputExtended.cluster_id)
class AnalogInputExtended(ZigbeeChannel):
"""Analog Input Extended channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.AnalogInputRegular.cluster_id)
class AnalogInputRegular(ZigbeeChannel):
"""Analog Input Regular channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.AnalogOutputExtended.cluster_id)
class AnalogOutputExtended(ZigbeeChannel):
"""Analog Output Regular channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.AnalogOutputRegular.cluster_id)
class AnalogOutputRegular(ZigbeeChannel):
"""Analog Output Regular channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.AnalogValueExtended.cluster_id)
class AnalogValueExtended(ZigbeeChannel):
"""Analog Value Extended edition channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.AnalogValueRegular.cluster_id)
class AnalogValueRegular(ZigbeeChannel):
"""Analog Value Regular channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.BacnetProtocolTunnel.cluster_id)
class BacnetProtocolTunnel(ZigbeeChannel):
"""Bacnet Protocol Tunnel channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.BinaryInputExtended.cluster_id)
class BinaryInputExtended(ZigbeeChannel):
"""Binary Input Extended channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.BinaryInputRegular.cluster_id)
class BinaryInputRegular(ZigbeeChannel):
"""Binary Input Regular channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.BinaryOutputExtended.cluster_id)
class BinaryOutputExtended(ZigbeeChannel):
"""Binary Output Extended channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.BinaryOutputRegular.cluster_id)
class BinaryOutputRegular(ZigbeeChannel):
"""Binary Output Regular channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.BinaryValueExtended.cluster_id)
class BinaryValueExtended(ZigbeeChannel):
"""Binary Value Extended channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.BinaryValueRegular.cluster_id)
class BinaryValueRegular(ZigbeeChannel):
"""Binary Value Regular channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.GenericTunnel.cluster_id)
class GenericTunnel(ZigbeeChannel):
"""Generic Tunnel channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
protocol.MultistateInputExtended.cluster_id
)
class MultiStateInputExtended(ZigbeeChannel):
"""Multistate Input Extended channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.MultistateInputRegular.cluster_id)
class MultiStateInputRegular(ZigbeeChannel):
"""Multistate Input Regular channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
protocol.MultistateOutputExtended.cluster_id
)
class MultiStateOutputExtended(ZigbeeChannel):
"""Multistate Output Extended channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
protocol.MultistateOutputRegular.cluster_id
)
class MultiStateOutputRegular(ZigbeeChannel):
"""Multistate Output Regular channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
protocol.MultistateValueExtended.cluster_id
)
class MultiStateValueExtended(ZigbeeChannel):
"""Multistate Value Extended channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(protocol.MultistateValueRegular.cluster_id)
class MultiStateValueRegular(ZigbeeChannel):
"""Multistate Value Regular channel."""
pass
| StarcoderdataPython |
11369098 | <filename>model.py
# -*- coding: utf-8 -*-
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
bookmarks_tags = db.Table('bookmarks_tags',
db.Column('bookmark_id', db.Integer, db.ForeignKey('bookmark.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id'))
)
class Bookmark(db.Model):
__tablename__ = 'bookmark'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
reference_id = db.Column(db.Integer, db.ForeignKey('reference.id'))
comment = db.Column(db.Text())
rendered_comment = db.Column(db.Text())
isread = db.Column(db.Boolean)
star = db.Column(db.Boolean)
user = db.relationship("User", back_populates="bookmarks")
reference = db.relationship("Reference", back_populates="bookmarks")
tags = db.relationship("Tag", secondary=bookmarks_tags, back_populates="bookmarks")
def __init__(self, comment):
self.comment = comment
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True)
email = db.Column(db.String, unique=True)
password = db.Column(db.String)
authenticated = db.Column(db.Boolean, default=False)
bookmarks = db.relationship("Bookmark", back_populates="user")
def __init__(self, name, email, password):
self.name = name
self.email = email
self.password = password
def is_active(self):
"""True, as all users are active."""
return True
def get_id(self):
"""Return the email address to satisfy Flask-Login's requirements."""
return self.email
def is_authenticated(self):
"""Return True if the user is authenticated."""
return self.authenticated
def is_anonymous(self):
"""False, as anonymous users aren't supported."""
return False
class Reference(db.Model):
__tablename__ = 'reference'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(256), unique=True)
abstract = db.Column(db.Text())
pubmed_id = db.Column(db.String(10))
doi = db.Column(db.String(256))
arxiv_id = db.Column(db.String(256))
starcount = db.Column(db.Integer, default=0)
bookmarks = db.relationship("Bookmark", back_populates="reference")
def __init__(self, **k):
self.title = k.get("title", "Title not found")
self.abstract = k.get("abstract", "abstract not found")
self.pubmed_id = k.get("pubmed_id", None)
self.doi = k.get("doi", None)
self.arxiv_id = k.get("arxiv_id", None)
class Tag(db.Model):
__tablename__ = 'tag'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=True)
bookmarks = db.relationship("Bookmark", secondary=bookmarks_tags, back_populates="tags")
def __init__(self, name):
self.name = name
| StarcoderdataPython |
5028297 | #!/usr/bin/env python
"""
Convert a URL or a path into different formats, e.g., Jupyter URL, GitHub, Git
path.
> url.py https://github.com/.../.../Task229_Exploratory_analysis_of_ST_data.ipynb
file_name=
/Users/saggese/src/.../.../oil/ST/Task229_Exploratory_analysis_of_ST_data.ipynb
github_url=
https://github.com/.../.../Task229_Exploratory_analysis_of_ST_data.ipynb
jupyter_url=
http://localhost:10001/tree/oil/ST/Task229_Exploratory_analysis_of_ST_data.ipynb
"""
import argparse
import logging
import os
import sys
import helpers.dbg as dbg
import helpers.git as git
import helpers.network as hnetwor
import helpers.parser as hparse
import helpers.printing as hprint
_LOG = logging.getLogger(__name__)
def _print(tag: str, val: str, verbose: bool) -> None:
if verbose:
print("\n# %s\n%s" % (hprint.color_highlight(tag, "green"), val))
else:
print("\n" + val)
# #############################################################################
def _parse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("positional", nargs="*")
parser.add_argument("--short", action="store_true", help="Short output form")
hparse.add_verbosity_arg(parser)
return parser
def _main(parser: argparse.ArgumentParser) -> None:
args = parser.parse_args()
dbg.init_logger(verbosity=args.log_level, force_print_format=True)
#
positional = args.positional
if len(positional) != 1:
print("Need to specify one 'url'")
sys.exit(-1)
#
verbosity = not args.short
github_prefix, jupyter_prefix = hnetwor.get_prefixes()
_print("github_prefix", github_prefix, verbosity)
_print("jupyter_prefix", jupyter_prefix, verbosity)
#
url = positional[0]
rel_file_name = hnetwor.get_file_name(url)
_print("rel_file_name", rel_file_name, verbosity)
if not rel_file_name:
msg = "Can't extract the name of a file from '%s'" % url
raise ValueError(msg)
#
_print("file_name", rel_file_name, verbosity)
#
abs_file_name = git.get_client_root(super_module=True) + "/" + rel_file_name
_print("abs file_name", abs_file_name, verbosity)
#
github_url = github_prefix + "/" + rel_file_name
_print("github_url", github_url, verbosity)
#
jupyter_url = jupyter_prefix + "/" + rel_file_name
_print("jupyter_url", jupyter_url, verbosity)
#
if rel_file_name.endswith(".ipynb"):
cmd = "publish_notebook.py --file %s --action open" % abs_file_name
_print("read notebook", cmd, verbosity)
#
print()
if not os.path.exists(abs_file_name):
_LOG.warning("'%s' doesn't exist", abs_file_name)
hnetwor.check_url(github_url)
hnetwor.check_url(jupyter_url)
if __name__ == "__main__":
_main(_parse())
| StarcoderdataPython |
12805635 | <gh_stars>10-100
"""
A custom Model Field for tagging.
"""
from django.db import IntegrityError
from django.db.models import signals
from django.db.models.fields import CharField
from django.utils.translation import ugettext_lazy as _
from tagging import settings
from tagging.models import Tag, Synonym
from tagging.utils import edit_string_for_tags, parse_tag_input
class TagField(CharField):
"""
A "special" character field that actually works as a relationship to tags
"under the hood". This exposes a space-separated string of tags, but does
the splitting/reordering/etc. under the hood.
"""
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 255)
kwargs['blank'] = kwargs.get('blank', True)
if kwargs.has_key('create_synonyms'):
self.create_synonyms = kwargs.pop('create_synonyms')
else:
self.create_synonyms = None
super(TagField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name):
super(TagField, self).contribute_to_class(cls, name)
# Make this object the descriptor for field access.
setattr(cls, self.name, self)
# Save tags back to the database post-save
signals.post_save.connect(self._post_save, cls, True)
signals.pre_save.connect(self._pre_save, cls, True)
def __get__(self, instance, owner=None):
"""
Tag getter. Returns an instance's tags if accessed on an instance, and
all of a model's tags if called on a class. That is, this model::
class Link(models.Model):
...
tags = TagField()
Lets you do both of these::
>>> l = Link.objects.get(...)
>>> l.tags
'tag1 tag2 tag3'
>>> Link.tags
'tag1 tag2 tag3 tag4'
"""
# Handle access on the model (i.e. Link.tags)
if instance is None:
return edit_string_for_tags(Tag.objects.usage_for_model(owner))
tags = self._get_instance_tag_cache(instance)
if tags is None:
if instance.pk is None:
self._set_instance_tag_cache(instance, '')
else:
self._set_instance_tag_cache(
instance, edit_string_for_tags(Tag.objects.get_for_object(instance)))
return self._get_instance_tag_cache(instance)
def __set__(self, instance, value):
"""
Set an object's tags.
"""
if instance is None:
raise AttributeError(_('%s can only be set on instances.') % self.name)
if settings.FORCE_LOWERCASE_TAGS and value is not None:
value = value.lower()
self._set_instance_tag_cache(instance, value)
def _pre_save(self, **kwargs): #signal, sender, instance):
"""
Save tags back to the database
"""
tags = self._get_instance_tag_cache(kwargs['instance'])
tags = parse_tag_input(tags)
#print 'Tags before: %s' % tags
instance = kwargs['instance']
self._set_instance_tag_cache(
instance, edit_string_for_tags(tags))
def _post_save(self, **kwargs): #signal, sender, instance):
"""
Save tags back to the database
"""
tags = self._get_instance_tag_cache(kwargs['instance'])
if tags is not None:
Tag.objects.update_tags(kwargs['instance'], tags)
if self.create_synonyms is not None:
tags = parse_tag_input(tags)
for tag in tags:
synonyms = self.create_synonyms(tag)
try:
tag = Tag.objects.get(name=tag)
for synonym in synonyms:
try:
synonym = Synonym.objects.create(name=synonym, tag=tag)
except IntegrityError:
pass
except Tag.DoesNotExist:
pass
def __delete__(self, instance):
"""
Clear all of an object's tags.
"""
self._set_instance_tag_cache(instance, '')
def _get_instance_tag_cache(self, instance):
"""
Helper: get an instance's tag cache.
"""
return getattr(instance, '_%s_cache' % self.attname, None)
def _set_instance_tag_cache(self, instance, tags):
"""
Helper: set an instance's tag cache.
"""
setattr(instance, '_%s_cache' % self.attname, tags)
def get_internal_type(self):
return 'CharField'
def formfield(self, **kwargs):
from tagging import forms
defaults = {'form_class': forms.TagField}
defaults.update(kwargs)
return super(TagField, self).formfield(**defaults)
| StarcoderdataPython |
1818858 | /*!
* @file mdm_T1FitterIR.h
* @brief Class for estimating T1 (and M0) in a single voxel using inversion recovery method
* @details
* @author <NAME> (c) Copyright QBI Lab, University of Manchester 2020
*/
#ifndef MDM_T1FITERRIR_HDR
#define MDM_T1FITERRIR_HDR
#include "mdm_api.h"
#include "mdm_T1FitterBase.h"
#include "mdm_ErrorTracker.h"
//! Class for estimating T1 (and M0) in a single voxel using inversion recovery method
class mdm_T1FitterIR : public mdm_T1FitterBase {
public:
//! Constructor from set of inversion times and repetition time
/*!
\param TIs vector of inversion recovery times in ms
\param TR repetition time in ms
*/
MDM_API mdm_T1FitterIR(const std::vector<double> &TIs, const double TR);
//! Default denstructor
/*!
*/
MDM_API ~mdm_T1FitterIR();
//! Set inversion recovery times
/*!
\param TIs vector of inversion recovery times in msecs
*/
MDM_API void setTIs(const std::vector<double> &TIs);
//! Set repetition time
/*!
\param TR repetition time
*/
MDM_API void setTR(const double TR);
//! Set inputs that vary on per voxel basis from which T1 will be estimated
/*!
If using B1 correction, inputs should be an nIR + 1 element vector, with signals in the first
nIR elements and the B1 correction at the end. Otherwise an nIR element vector signals.
\param inputs vector of signals (and B1 correction) from which T1 will be estimated
*/
MDM_API void setInputs(const std::vector<double> &inputs);
//! Perform T1 fit using inversion recovery method
/*!
\param T1value reference to hold computed T1
\param M0value reference to hold computed M0
*/
MDM_API mdm_ErrorTracker::ErrorCode fitT1(double &T1value, double &M0value);
//! Set inputs for computing T1 from a single line of an input data stream buffer
/*!
All sub-classes must implement this method.
\param ifs input data stream
\param nSignals number of signals in sample
\return false if streams EOF flag is reached, true otherwise
*/
MDM_API virtual bool setInputsFromStream(std::istream& ifs,
const int nSignals);
//! Return minimum inputs required, must be implemented by derived subclass
/*
\return minimum number of input signals required for T1 fitting method
*/
MDM_API int minimumInputs() const;
//! Return maximum inputs allowed, must be implemented by derived subclass
/*
\return maximum number of input signals allowed in T1 fitting method
*/
MDM_API int maximumInputs() const;
//! Compute signal using SPGR equation, given T1, M0, IR and TR
/*!
\param T1 in ms
\param M0 magnetisation constant
\param IR inversion recovery time in ms
\param TR repetition time in ms
\return signal
*/
MDM_API static double T1toSignal(
const double T1, const double M0, const double IR, const double TR);
private:
void computeSignalGradient(const double &T1, const double &M0,
const double &TI,
double &signal, double &signal_dT1, double &signal_dM0);
void computeSSEGradient(
const alglib::real_1d_array &x, double &func, alglib::real_1d_array &grad);
static void computeSSEGradientAlglib(
const alglib::real_1d_array &x, double &func, alglib::real_1d_array &grad,
void *context) {
static_cast<mdm_T1FitterIR*>(context)->computeSSEGradient(
x, func, grad);
}
std::vector<double> TIs_;
double TR_;
};
#endif /* MDM_T1CALC_HDR */
| StarcoderdataPython |
121486 | <filename>rha/core/admin.py
# coding: utf-8
from django.contrib import admin
from models import (Enterprise, Contact, Partner,
Step, Gallery, Course, Objective,
Public, Team, Cost, Graduation,
Institute, Subscribe)
from forms import (CourseModelForm, ObjectiveModelForm, PublicModelForm,
ContactModelForm)
class EnterpriseAdmin(admin.ModelAdmin):
list_display = ('name', 'phone1', 'phone2', 'phone3', 'email')
search_fields = ['name', 'description', 'address', 'cep',
'district', 'city', 'state',
'phone1', 'phone2', 'phone3', 'email']
class ContactAdmin(admin.ModelAdmin):
list_display = ['content']
search_fields = ['content']
form = ContactModelForm
class CourseAdmin(admin.ModelAdmin):
list_display = ['description']
search_fields = ['description']
form = CourseModelForm
class ObjectiveAdmin(admin.ModelAdmin):
list_display = ['description']
search_fields = ['description']
form = ObjectiveModelForm
class PublicAdmin(admin.ModelAdmin):
list_display = ['description']
search_fields = ['description']
form = PublicModelForm
class SubscribeAdmin(admin.ModelAdmin):
list_filter = ['module', 'state', 'city', 'institute__name',
'graduation__name', 'special']
list_display = ['subscribe_date', 'name', 'module',
'email', 'mobile', 'phone', 'state',
'city']
search_fields = ['subscribe_date', 'module', 'name', 'cpf',
'email', 'mobile', 'phone', 'cep', 'address',
'number', 'complement', 'district', 'city',
'state', 'graduation__name', 'year_conclusion',
'institute__name', 'special', 'which']
class TeamAdmin(admin.ModelAdmin):
list_display = ['name', 'link', 'description', 'image']
search_fields = ['name', 'link', 'description', 'image']
class InstituteAdmin(admin.ModelAdmin):
search_fields = ['name',]
list_per_page = 30
admin.site.register(Enterprise, EnterpriseAdmin)
admin.site.register(Contact, ContactAdmin)
admin.site.register(Partner)
admin.site.register(Step)
admin.site.register(Gallery)
admin.site.register(Course, CourseAdmin)
admin.site.register(Objective, ObjectiveAdmin)
admin.site.register(Public, PublicAdmin)
admin.site.register(Team, TeamAdmin)
admin.site.register(Cost)
admin.site.register(Graduation)
admin.site.register(Institute, InstituteAdmin)
admin.site.register(Subscribe, SubscribeAdmin)
| StarcoderdataPython |
4924221 | <reponame>open-data-toronto/ckan-customization-open-data-toronto
CUSTOM_MIMETYPES = {"gpkg": "application/geopackage+vnd.sqlite3"}
ZIPPED_FORMATS = ["SHP"]
CATALOGUE_SEARCH = {"rows": 10, "sort": "score desc", "start": 0}
GEOSPATIAL_FORMATS = {"CSV", "GEOJSON", "GPKG", "SHP"}
TABULAR_FORMATS = {"CSV", "JSON", "XML"}
DOWNLOAD_FORMAT = "csv"
DOWNLOAD_PROJECTION = "4326"
DOWNLOAD_OFFSET = "0"
DOWNLOAD_LIMIT = "0"
MAX_FIELD_LENGTH = 350
REMOVED_FIELDS = ["author", "maintainer", "version"]
TAG_LIST_FIELDS = ["civic_issues", "formats", "topics"]
DQ = {"package": "catalogue-quality-scores", "resource": "catalogue-scorecard"}
| StarcoderdataPython |
5123294 | <reponame>groovetch/edx-figures
"""
# Background
Figures originally calculated completions as the certificates generated
As of mid 2020, we are reworking metrics so that course completsions are based
off of gradable sections likely followed by using or adapting the completion
aggregator
We need to rename our current "completion" to indicate certificates and not
completsions
However, Figures UI and API still have the "completions" label for certificates
# This Test Module
This test module tests certificate metrics. If you read "completions" in this
test module, think "certificates" until we do our relabelling
"""
import pytest
from datetime import date
from dateutil.relativedelta import relativedelta
from freezegun import freeze_time
from figures.helpers import days_in_month
from figures.metrics import get_total_course_completions_for_time_period
from figures.models import CourseDailyMetrics
from tests.factories import (
CourseDailyMetricsFactory,
CourseOverviewFactory,
OrganizationFactory,
OrganizationCourseFactory,
SiteFactory
)
from tests.helpers import organizations_support_sites
@pytest.fixture
@pytest.mark.django_db
def cdm_test_data(db, settings):
"""Build CourseDailyMetrics data to test certificate counts
"""
our_site = SiteFactory()
mock_today = date(year=2020, month=6, day=7)
last_month = mock_today - relativedelta(months=1)
courses = [CourseOverviewFactory() for i in range(2)]
# Create data for previous month. Just need one record
# purpose is to make sure it is not included in our production code request
prev_month_cdm = [CourseDailyMetrics(site=our_site,
course_id=str(courses[0].id),
date_for=last_month)]
# Create data for our current month
curr_month_cdm = []
cdm_data = [
dict(day=1, course_id=str(courses[0].id), num_learners_completed=1),
dict(day=6, course_id=str(courses[0].id), num_learners_completed=10),
dict(day=1, course_id=str(courses[1].id), num_learners_completed=2),
dict(day=6, course_id=str(courses[1].id), num_learners_completed=20),
]
expected_cert_count = 30
for rec in cdm_data:
date_for = date(year=mock_today.year, month=mock_today.month, day=rec['day'])
cdm = CourseDailyMetricsFactory(
site=our_site,
course_id=rec['course_id'],
date_for=date_for,
num_learners_completed=rec['num_learners_completed'])
curr_month_cdm.append(cdm)
if organizations_support_sites():
settings.FEATURES['FIGURES_IS_MULTISITE'] = True
our_org = OrganizationFactory(sites=[our_site])
for course in courses:
OrganizationCourseFactory(organization=our_org,
course_id=str(course.id))
return dict(
mock_today=mock_today,
our_site=our_site,
courses=courses,
prev_month_cdm=prev_month_cdm,
curr_month_cdm=curr_month_cdm,
expected_cert_count=expected_cert_count,
)
def test_certificate_metrics(cdm_test_data):
date_for = cdm_test_data['mock_today']
site = cdm_test_data['our_site']
expected_cert_count = cdm_test_data['expected_cert_count']
freezer = freeze_time(date_for)
freezer.start()
start_date = date(year=date_for.year, month=date_for.month, day=1)
end_date = date(year=date_for.year,
month=date_for.month,
day=days_in_month(date_for))
count = get_total_course_completions_for_time_period(site=site,
start_date=start_date,
end_date=end_date)
freezer.stop()
assert count == expected_cert_count
| StarcoderdataPython |
1627638 |
def test_get_version(self):
"""Tests the get_version function."""
version = ${python_module_name}.get_version()
self.assertIsNotNone(version)
| StarcoderdataPython |
9749824 | <reponame>cdagnino/LearningModels
import src
import numpy as np
from scipy.stats import entropy
from scipy.special import expit
from numba import njit
def my_entropy(p):
return entropy(p)
@njit()
def force_sum_to_1(orig_lambdas):
"""
Forces lambdas to sum to 1
(although last element might be negative)
"""
sum_lambdas = orig_lambdas.sum()
if sum_lambdas > 1.:
orig_lambdas /= sum_lambdas
# TODO: think if this is what I want: might make third lambda 0 too much
return np.concatenate((orig_lambdas, np.array([0.])))
else:
return np.concatenate((orig_lambdas, 1 - np.array([sum_lambdas])))
def logit(p):
return np.log(p / (1 - p))
def reparam_lambdas(x):
""" inverse logit. Forces the lambdas to be within 0 and 1"""
return expit(x)
#@njit()
def h_and_exp_betas_eqns(orig_lambdas, βs, Eβ, H, w=np.array([[1., 0.], [0., 1./4.]])):
"""
orig_lambdas: original lambda tries (not summing to zero, not within [0, 1])
Eβ, H: the objectives
βs: fixed constant of the model
"""
lambdas = force_sum_to_1(src.reparam_lambdas(orig_lambdas))
g = np.array([entropy(lambdas) - H, np.dot(βs, lambdas) - Eβ])
return g.T @ w @ g
#Not relevant anymore (minimize is using a derivative free method)
def jac(x, βs):
"""
Jacobian for reparametrization of lambdas.
Code for only three lambdas
"""
# Derivatives wrt to H
block = np.log((1 - np.e ** (x[0] + x[1])) / (np.e ** (x[0]) + np.e ** (x[1]) + np.e ** (x[0] + x[1]) + 1))
num0 = (-np.log(np.e ** x[0] / (np.e ** x[0] + 1)) + block) * np.e ** x[0]
den0 = np.e ** (2 * x[0]) + 2 * np.e ** (x[0]) + 1
num1 = (-np.log(np.e ** x[1] / (np.e ** x[1] + 1)) + block) * np.e ** x[1]
den1 = np.e ** (2 * x[1]) + 2 * np.e ** (x[1]) + 1
dh_dx = np.array([num0 / den0, num1 / den1])
# Derivatives wrt E[B]
deb_0 = ((βs[0] - βs[2]) * np.e ** (-x[0])) / (1 + np.e ** (-x[0])) ** 2
deb_1 = ((βs[1] - βs[2]) * np.e ** (-x[1])) / (1 + np.e ** (-x[1])) ** 2
deb_dx = np.array([deb_0, deb_1])
return np.array([dh_dx, deb_dx])
def relative_error(true, solution):
"""
Average relative error to discriminate solutions
"""
return np.mean(2 * np.abs((true - solution) / (true + solution))) | StarcoderdataPython |
3496668 | class Solution:
def shiftingLetters(self, S, shifts):
"""
:type S: str
:type shifts: List[int]
:rtype: str
"""
a = ord("a")
s = 0
result = ""
for index in reversed(range(len(shifts))):
s += shifts[index]
s %= 26
result = chr((ord(S[index]) - a + s) % 26 + a) + result
return result
if __name__ == "__main__":
print(Solution().shiftingLetters("abc", [3, 5, 9]))
| StarcoderdataPython |
6573760 | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Objects for holding onto the results produced by Apache Beam jobs."""
from __future__ import absolute_import
from __future__ import annotations
from __future__ import unicode_literals
import heapq
import python_utils
import utils
from typing import Any, List, Tuple # isort: skip
MAX_OUTPUT_BYTES = 1500
class JobRunResult(python_utils.OBJECT):
"""Encapsulates the result of a job run.
The stdout and stderr are string values analogous to a program's stdout and
stderr pipes (reserved for standard output and errors, respectively).
"""
__slots__ = ('stdout', 'stderr')
def __init__(self, stdout: str = '', stderr: str = ''):
"""Initializes a new JobRunResult instance.
Args:
stdout: str. The standard output from a job run.
stderr: str. The error output from a job run.
"""
if not stdout and not stderr:
raise ValueError('JobRunResult instances must not be empty')
self.stdout, self.stderr = stdout, stderr
if self.len_in_bytes() > MAX_OUTPUT_BYTES:
raise ValueError(
'JobRunResult must not exceed %d bytes' % MAX_OUTPUT_BYTES)
@classmethod
def as_stdout(cls, value: Any, use_repr: bool = False) -> JobRunResult:
"""Returns a new JobRunResult with a stdout value.
Args:
value: *. The input value to convert into a stdout result. Types are
always casted to string using '%s' formatting.
use_repr: bool. Whether to use the `repr` of the value.
Returns:
JobRunResult. A JobRunResult with the given value as its stdout.
"""
str_value = ('%r' if use_repr else '%s') % (value,)
return JobRunResult(stdout=str_value)
@classmethod
def as_stderr(cls, value: Any, use_repr: bool = False) -> JobRunResult:
"""Returns a new JobRunResult with a stderr value.
Args:
value: *. The input value to convert into a stderr result. Types are
always casted to string using '%s' formatting.
use_repr: bool. Whether to use the `repr` of the value.
Returns:
JobRunResult. A JobRunResult with the given value as its stderr.
"""
str_value = ('%r' if use_repr else '%s') % (value,)
return JobRunResult(stderr=str_value)
@classmethod
def accumulate(cls, results: List[JobRunResult]) -> List[JobRunResult]:
"""Accumulates results into bigger ones that maintain the size limit.
The len_in_bytes() of each result is always less than MAX_OUTPUT_BYTES.
Args:
results: list(JobRunResult). The results to concatenate.
Returns:
list(JobRunResult). JobRunResult instances with stdout and stderr
values concatenated together with newline delimiters. Each
individual item maintains the size limit.
"""
if not results:
return []
results_heap: List[Tuple[int, int, JobRunResult]] = []
for i, result in enumerate(results):
# Use i as a tie-breaker so that results, which don't implement the
# comparison operators, don't get compared with one another.
heapq.heappush(results_heap, (result.len_in_bytes(), i, result))
batches = []
latest_batch_size, _, smallest = heapq.heappop(results_heap)
batches.append([smallest])
while results_heap:
result_size, _, next_smallest = heapq.heappop(results_heap)
# Compensates for the '\n' delimiter used to concatenate results.
# Results are never empty, so we either need two '\n' bytes (when
# stdout and stderr are both non-empty), or 1 (since at most one of
# them is empty).
padding = 2 if next_smallest.stdout and next_smallest.stderr else 1
if latest_batch_size + padding + result_size < MAX_OUTPUT_BYTES:
latest_batch_size += padding + result_size
batches[-1].append(next_smallest)
else:
latest_batch_size = result_size
batches.append([next_smallest])
batched_results = []
for batch in batches:
stdout = '\n'.join(r.stdout for r in batch if r.stdout)
stderr = '\n'.join(r.stderr for r in batch if r.stderr)
batched_results.append(JobRunResult(stdout=stdout, stderr=stderr))
return batched_results
def len_in_bytes(self) -> int:
"""Returns the number of bytes encoded by the JobRunResult instance.
Returns:
int. The number of bytes encoded by the JobRunResult instance.
"""
output_bytes = (
python_utils.convert_to_bytes(s) for s in (self.stdout, self.stderr)
)
return sum(len(output) for output in output_bytes)
def __repr__(self) -> str:
return '%s(stdout=%s, stderr=%s)' % (
self.__class__.__name__,
utils.quoted(self.stdout), utils.quoted(self.stderr))
def __hash__(self) -> int:
return hash((self.stdout, self.stderr))
# NOTE: Needs to return Any because of:
# https://github.com/python/mypy/issues/363#issue-39383094
def __eq__(self, other: Any) -> Any:
return (
(self.stdout, self.stderr) == (other.stdout, other.stderr) # pylint: disable=protected-access
if self.__class__ is other.__class__ else NotImplemented)
# NOTE: Needs to return Any because of:
# https://github.com/python/mypy/issues/363#issue-39383094
def __ne__(self, other: Any) -> Any:
return (
not (self == other)
if self.__class__ is other.__class__ else NotImplemented)
def __getstate__(self) -> Tuple[str, str]:
"""Called by pickle to get the value that uniquely defines self."""
return self.stdout, self.stderr
def __setstate__(self, state: Tuple[str, str]) -> None:
"""Called by pickle to build an instance from __getstate__'s value."""
self.stdout, self.stderr = state
| StarcoderdataPython |
9797919 | import webbrowser
from cactus.utils import run_subprocess
import os
import platform
from threading import Thread
s1 = """
tell application "Google Chrome"
set windowsList to windows as list
repeat with currWindow in windowsList
set tabsList to currWindow's tabs as list
repeat with currTab in tabsList
if "%s" is in currTab's URL then execute currTab javascript "%s"
end repeat
end repeat
end tell
"""
s2 = """
tell application "Safari"
if (count of windows) is greater than 0 then
set windowsList to windows as list
repeat with currWindow in windowsList
set tabsList to currWindow's tabs as list
repeat with currTab in tabsList
if "%s" is in currTab's URL then
tell currTab to do JavaScript "%s"
end if
end repeat
end repeat
end if
end tell
"""
def applescript(input):
# Bail if we're not on mac os for now
if platform.system() != "Darwin":
return
command = "osascript<<END%sEND" % input
return run_subprocess(command)
def _insertJavascript(urlMatch, js):
apps = appsRunning(['Safari', 'Google Chrome'])
if apps['Google Chrome']:
try:
applescript(s1 % (urlMatch, js))
except Exception:
pass
if apps['Safari']:
try:
applescript(s2 % (urlMatch, js))
except Exception:
pass
def browserReload(url, site):
if platform.system() != "Darwin":
if site.browser is None:
openurl(url, site)
else:
site.browser.refresh()
else:
_insertJavascript(url, "window.location.reload()")
def browserReloadCSS(url, site):
if platform.system() != "Darwin":
browserReload(url, site)
else:
_insertJavascript(url, "var links = document.getElementsByTagName('link'); for (var i = 0; i < links.length;i++) { var link = links[i]; if (link.rel === 'stylesheet') {link.href += '?'; }}")
def appsRunning(l):
if os.name == "nt":
psdata = run_subprocess(
['wmic', 'process', 'get', 'description']
)
else:
psdata = run_subprocess(['ps aux'])
retval = {}
for app in l:
retval[app] = app in psdata
return retval
def openurl(url, site):
if platform.system() != "Darwin":
if site.browser is None:
t = Thread(target=init_selenium, args=(site, url,))
t.start()
else:
site.browser.get(url)
else:
webbrowser.open(url)
def init_selenium(site, url):
from selenium import webdriver
b = site.config.get("common").get("browser", "chrome")
if b == "firefox":
site.browser = webdriver.Firefox()
elif b == "opera":
site.browser = webdriver.Opera()
elif b == "ie":
site.browser = webdriver.Ie()
else:
site.browser = webdriver.Chrome()
site.browser.get(url)
| StarcoderdataPython |
98895 | <reponame>gnmerritt/dailyrippl<filename>rippl/bills/migrations/0002_auto_20170109_2142.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-09 21:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bills', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='bill',
name='chamber',
field=models.CharField(choices=[('S', 'Senate'), ('H', 'House')], max_length=3, null=True),
),
migrations.AlterField(
model_name='bill',
name='official_title',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='bill',
name='popular_title',
field=models.CharField(default='', max_length=127),
),
migrations.AlterField(
model_name='bill',
name='summary',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='bill',
name='sunlight_id',
field=models.CharField(default='', max_length=63),
),
migrations.AlterField(
model_name='bill',
name='url',
field=models.CharField(default='', help_text='Permalink with more info', max_length=127),
),
]
| StarcoderdataPython |
6555175 | <gh_stars>1-10
#!/usr/bin/python
import argparse
import collections
import json
import math
import os
import struct
import sys
ANGLE_FACTOR = 2 * math.pi / 40000.0
SPEED_FACTOR = 1 / 1000.0
def parseConfig(settingsFile):
settings = {}
curSection = None
lines = [x.strip() for x in settingsFile.readlines()]
for line in lines:
if line.startswith('['):
curSection = line[1:-1]
settings[curSection] = {}
elif line.find('=') != -1:
key, value = line.split('=')
settings[curSection][key] = value
return settings
def convertPrizeWeights(prizeSettings):
prizeWeights = []
prizeWeights.append(0) # PrizeType.NONE
prizeWeights.append(int(prizeSettings['Gun']))
prizeWeights.append(int(prizeSettings['Bomb']))
prizeWeights.append(int(prizeSettings['QuickCharge']))
prizeWeights.append(int(prizeSettings['BouncingBullets']))
prizeWeights.append(int(prizeSettings['MultiFire']))
return prizeWeights
def convertShip(name, settings):
radius = int(settings[name]['Radius'])
if radius == 0:
radius = 14
jsonSettings = collections.OrderedDict()
jsonSettings['name'] = name
jsonSettings['radius'] = radius
jsonSettings['bounceFactor'] = 16.0 / int(settings['Misc']['BounceFactor'])
jsonSettings['rotationRadiansPerTick'] = int(settings[name]['InitialRotation']) * ANGLE_FACTOR
jsonSettings['speedPixelsPerTick'] = int(settings[name]['InitialSpeed']) * SPEED_FACTOR
jsonSettings['maxEnergy'] = int(settings[name]['InitialEnergy'])
jsonSettings['accelerationPerTick'] = int(settings[name]['InitialThrust']) / 1000.0
jsonSettings['afterburnerMaxSpeed'] = int(settings[name]['MaximumSpeed']) * SPEED_FACTOR
jsonSettings['afterburnerAcceleration'] = int(settings[name]['MaximumThrust']) / 1000.0
jsonSettings['afterburnerEnergy'] = int(settings[name]['AfterburnerEnergy']) / 1000.0
jsonSettings['rechargeRate'] = int(settings[name]['InitialRecharge']) / 1000.0
jsonSettings['respawnDelay'] = int(settings['Kill']['EnterDelay'])
bullet = collections.OrderedDict()
bullet['fireEnergy'] = int(settings[name]['BulletFireEnergy'])
bullet['speed'] = int(settings[name]['BulletSpeed']) * SPEED_FACTOR
bullet['fireDelay'] = int(settings[name]['BulletFireDelay'])
bullet['lifetime'] = int(settings['Bullet']['BulletAliveTime'])
bullet['damage'] = int(settings['Bullet']['BulletDamageLevel'])
bullet['damageUpgrade'] = int(settings['Bullet']['BulletDamageUpgrade'])
bullet['initialLevel'] = int(settings[name]['InitialGuns']) - 1
bullet['maxLevel'] = int(settings[name]['MaxGuns']) - 1
bullet['bounces'] = True
bullet['doubleBarrel'] = int(settings[name]['DoubleBarrel']) != 0
if int(settings[name]['MultiFireAngle']) != 0:
bullet['multifire'] = collections.OrderedDict()
bullet['multifire']['fireEnergy'] = int(settings[name]['MultiFireEnergy'])
bullet['multifire']['fireDelay'] = int(settings[name]['MultiFireDelay'])
bullet['multifire']['angle'] = int(settings[name]['MultiFireAngle']) * ANGLE_FACTOR
bomb = collections.OrderedDict()
bomb['fireEnergy'] = int(settings[name]['BombFireEnergy'])
bomb['fireEnergyUpgrade'] = int(settings[name]['BombFireEnergyUpgrade'])
bomb['speed'] = int(settings[name]['BombSpeed']) * SPEED_FACTOR
bomb['fireDelay'] = int(settings[name]['BombFireDelay'])
bomb['lifetime'] = int(settings['Bomb']['BombAliveTime'])
bomb['damage'] = int(settings['Bomb']['BombDamageLevel'])
bomb['damageUpgrade'] = int(settings['Bomb']['BombDamageLevel'])
bomb['initialLevel'] = int(settings[name]['InitialBombs']) - 1
bomb['maxLevel'] = int(settings[name]['MaxBombs']) - 1
bomb['blastRadius'] = int(settings['Bomb']['BombExplodePixels'])
bomb['blastRadiusUpgrade'] = int(settings['Bomb']['BombExplodePixels'])
bomb['proxRadius'] = int(settings['Bomb']['ProximityDistance'])
bomb['proxRadiusUpgrade'] = int(settings['Bomb']['ProximityDistance'])
bomb['bounceCount'] = int(settings[name]['BombBounceCount'])
bomb['recoilAcceleration'] = int(settings[name]['BombThrust']) / 1000.0
burst = collections.OrderedDict()
burst['fireDelay'] = int(settings[name]['BulletFireDelay']) # Assume burst fire delay is the same as the bullet fire delay
burst['lifetime'] = int(settings['Bullet']['BulletAliveTime']) # Assume burst lifetime is the same as a regular bullet
burst['damage'] = int(settings['Bullet']['BulletDamageLevel']) + 4 * int(settings['Bullet']['BulletDamageUpgrade'])
burst['speed'] = int(settings[name]['BurstSpeed']) * SPEED_FACTOR
burst['shrapnelCount'] = int(settings[name]['BurstShrapnel'])
burst['initialCount'] = int(settings[name]['InitialBurst'])
burst['maxCount'] = int(settings[name]['BurstMax'])
decoy = collections.OrderedDict()
decoy['fireDelay'] = bullet['fireDelay'] # Assume decoy fire delay is the same as the bullet fire delay
decoy['lifetime'] = int(settings['Misc']['DecoyAliveTime'])
decoy['initialCount'] = int(settings[name]['InitialDecoy'])
decoy['maxCount'] = int(settings[name]['DecoyMax'])
repel = collections.OrderedDict()
repel['fireDelay'] = 50 # TODO: figure out what this should be
repel['initialCount'] = int(settings[name]['InitialRepel'])
repel['maxCount'] = int(settings[name]['RepelMax'])
repel['lifetime'] = int(settings['Repel']['RepelTime'])
repel['distance'] = int(settings['Repel']['RepelDistance'])
repel['speed'] = int(settings['Repel']['RepelSpeed']) * SPEED_FACTOR
jsonSettings['bullet'] = bullet
jsonSettings['bomb'] = bomb
jsonSettings['burst'] = burst
jsonSettings['decoy'] = decoy
jsonSettings['repel'] = repel
return jsonSettings
def convertToJson(settings):
jsonSettings = collections.OrderedDict()
jsonSettings['game'] = collections.OrderedDict({
'killPoints': 20,
'maxTeams': 2
})
jsonSettings['network'] = collections.OrderedDict({
'sendPositionDelay': int(settings['Misc']['SendPositionDelay']),
'fastSendPositionDelay': max(1, int(settings['Misc']['SendPositionDelay']) / 4)
})
jsonSettings['map'] = collections.OrderedDict({
'width': 1024,
'height': 1024,
'tileWidth': 16,
'tileHeight': 16,
'spawnRadius': 500
})
jsonSettings['prize'] = collections.OrderedDict({
'decayTime': 18000,
'count': 50,
'radius': 128,
'weights': convertPrizeWeights(settings['PrizeWeight'])
})
jsonSettings['ships'] = [
convertShip('Warbird', settings),
convertShip('Javelin', settings),
convertShip('Spider', settings),
convertShip('Leviathan', settings),
convertShip('Terrier', settings),
convertShip('Weasel', settings),
convertShip('Lancaster', settings),
convertShip('Shark', settings)
]
return jsonSettings
def main():
parser = argparse.ArgumentParser(description = 'Converts a SubSpace server.cfg file to a dotproduct settings file.')
parser.add_argument('settingsFile', type=argparse.FileType('rb'))
args = parser.parse_args()
settings = parseConfig(args.settingsFile)
jsonSettings = convertToJson(settings)
print json.dumps(jsonSettings, indent = 2)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3400161 | <reponame>archibongn1/Project
from setuptools import setup, find_packages
setup(
name='ArithSmcho',
version='0.1',
packages=find_packages(exclude=['test*']),
url='',
license='',
author='smcho',
author_email='',
description=''
)
| StarcoderdataPython |
6649755 | <gh_stars>0
"""
Combination Sum II
Given a collection of candidate numbers (candidates) and a target number (target),
find all unique combinations in candidates where the candidate numbers sum to target.
Each number in candidates may only be used once in the combination.
Note: The solution set must not contain duplicate combinations.
Example 1:
Input: candidates = [10,1,2,7,6,1,5], target = 8
Output:
[
[1,1,6],
[1,2,5],
[1,7],
[2,6]
]
Example 2:
Input: candidates = [2,5,2,1,2], target = 5
Output:
[
[1,2,2],
[5]
]
Constraints:
1 <= candidates.length <= 100
1 <= candidates[i] <= 50
1 <= target <= 30
"""
class Solution:
def combinationSum2(self, candidates, target):
ret = []
self.backtrack(sorted(candidates), target, 0, [], ret)
return ret
def backtrack(self, nums, target, idx, path, ret):
if target <= 0:
if target == 0:
ret.append(path)
return
for i in range(idx, len(nums)):
if i > idx and nums[i] == nums[i-1]:
continue
self.backtrack(nums, target-nums[i], i+1, path+[nums[i]], ret)
| StarcoderdataPython |
105162 | from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.test import override_settings
from rest_framework.exceptions import ValidationError
from rest_framework.test import APITestCase
from documents.models import DocumentType
from documents.tests.literals import (
TEST_DOCUMENT_TYPE, TEST_SMALL_DOCUMENT_PATH
)
from user_management.tests.literals import (
TEST_ADMIN_EMAIL, TEST_ADMIN_PASSWORD, TEST_ADMIN_USERNAME
)
from ..models import SmartLink, SmartLinkCondition
from .literals import (
TEST_SMART_LINK_CONDITION_FOREIGN_DOCUMENT_DATA,
TEST_SMART_LINK_CONDITION_EXPRESSION,
TEST_SMART_LINK_CONDITION_EXPRESSION_EDITED,
TEST_SMART_LINK_CONDITION_OPERATOR, TEST_SMART_LINK_DYNAMIC_LABEL,
TEST_SMART_LINK_LABEL_EDITED, TEST_SMART_LINK_LABEL
)
@override_settings(OCR_AUTO_OCR=False)
class SmartLinkAPITestCase(APITestCase):
def setUp(self):
self.admin_user = get_user_model().objects.create_superuser(
username=TEST_ADMIN_USERNAME, email=TEST_ADMIN_EMAIL,
password=<PASSWORD>
)
self.client.login(
username=TEST_ADMIN_USERNAME, password=<PASSWORD>
)
def tearDown(self):
if hasattr(self, 'document_type'):
self.document_type.delete()
def _create_document_type(self):
self.document_type = DocumentType.objects.create(
label=TEST_DOCUMENT_TYPE
)
def _create_document(self):
with open(TEST_SMALL_DOCUMENT_PATH) as file_object:
self.document = self.document_type.new_document(
file_object=file_object
)
def _create_smart_link(self):
return SmartLink.objects.create(
label=TEST_SMART_LINK_LABEL,
dynamic_label=TEST_SMART_LINK_DYNAMIC_LABEL
)
def test_smart_link_create_view(self):
response = self.client.post(
reverse('rest_api:smartlink-list'), {
'label': TEST_SMART_LINK_LABEL
}
)
smart_link = SmartLink.objects.first()
self.assertEqual(response.data['id'], smart_link.pk)
self.assertEqual(response.data['label'], TEST_SMART_LINK_LABEL)
self.assertEqual(SmartLink.objects.count(), 1)
self.assertEqual(smart_link.label, TEST_SMART_LINK_LABEL)
def test_smart_link_create_with_document_types_view(self):
self._create_document_type()
response = self.client.post(
reverse('rest_api:smartlink-list'), data={
'label': TEST_SMART_LINK_LABEL,
'document_types_pk_list': self.document_type.pk
},
)
smart_link = SmartLink.objects.first()
self.assertEqual(response.data['id'], smart_link.pk)
self.assertEqual(response.data['label'], TEST_SMART_LINK_LABEL)
self.assertEqual(SmartLink.objects.count(), 1)
self.assertEqual(smart_link.label, TEST_SMART_LINK_LABEL)
self.assertQuerysetEqual(
smart_link.document_types.all(), (repr(self.document_type),)
)
def test_smart_link_delete_view(self):
smart_link = self._create_smart_link()
self.client.delete(
reverse('rest_api:smartlink-detail', args=(smart_link.pk,))
)
self.assertEqual(SmartLink.objects.count(), 0)
def test_smart_link_detail_view(self):
smart_link = self._create_smart_link()
response = self.client.get(
reverse('rest_api:smartlink-detail', args=(smart_link.pk,))
)
self.assertEqual(
response.data['label'], TEST_SMART_LINK_LABEL
)
def test_smart_link_patch_view(self):
self._create_document_type()
smart_link = self._create_smart_link()
self.client.patch(
reverse('rest_api:smartlink-detail', args=(smart_link.pk,)),
data={
'label': TEST_SMART_LINK_LABEL_EDITED,
'document_types_pk_list': self.document_type.pk
}
)
smart_link.refresh_from_db()
self.assertEqual(smart_link.label, TEST_SMART_LINK_LABEL_EDITED)
self.assertQuerysetEqual(
smart_link.document_types.all(), (repr(self.document_type),)
)
def test_smart_link_put_view(self):
smart_link = self._create_smart_link()
self.client.put(
reverse('rest_api:smartlink-detail', args=(smart_link.pk,)),
data={
'label': TEST_SMART_LINK_LABEL_EDITED,
}
)
smart_link.refresh_from_db()
self.assertEqual(smart_link.label, TEST_SMART_LINK_LABEL_EDITED)
@override_settings(OCR_AUTO_OCR=False)
class SmartLinkConditionAPITestCase(APITestCase):
def setUp(self):
self.admin_user = get_user_model().objects.create_superuser(
username=TEST_ADMIN_USERNAME, email=TEST_ADMIN_EMAIL,
password=<PASSWORD>
)
self.client.login(
username=TEST_ADMIN_USERNAME, password=<PASSWORD>
)
def tearDown(self):
if hasattr(self, 'document_type'):
self.document_type.delete()
def _create_document_type(self):
self.document_type = DocumentType.objects.create(
label=TEST_DOCUMENT_TYPE
)
def _create_document(self):
with open(TEST_SMALL_DOCUMENT_PATH) as file_object:
self.document = self.document_type.new_document(
file_object=file_object
)
def _create_smart_link(self):
self.smart_link = SmartLink.objects.create(
label=TEST_SMART_LINK_LABEL,
dynamic_label=TEST_SMART_LINK_DYNAMIC_LABEL
)
self.smart_link.document_types.add(self.document_type)
def _create_smart_link_condition(self):
self.smart_link_condition = SmartLinkCondition.objects.create(
smart_link=self.smart_link,
foreign_document_data=TEST_SMART_LINK_CONDITION_FOREIGN_DOCUMENT_DATA,
expression=TEST_SMART_LINK_CONDITION_EXPRESSION,
operator=TEST_SMART_LINK_CONDITION_OPERATOR
)
def test_resolved_smart_link_detail_view(self):
self._create_document_type()
self._create_smart_link()
self._create_smart_link_condition()
self._create_document()
response = self.client.get(
reverse(
'rest_api:resolvedsmartlink-detail',
args=(self.document.pk, self.smart_link.pk)
)
)
self.assertEqual(
response.data['label'], TEST_SMART_LINK_LABEL
)
def test_resolved_smart_link_list_view(self):
self._create_document_type()
self._create_smart_link()
self._create_smart_link_condition()
self._create_document()
response = self.client.get(
reverse(
'rest_api:resolvedsmartlink-list', args=(self.document.pk,)
)
)
self.assertEqual(
response.data['results'][0]['label'], TEST_SMART_LINK_LABEL
)
def test_resolved_smart_link_document_list_view(self):
self._create_document_type()
self._create_smart_link()
self._create_smart_link_condition()
self._create_document()
response = self.client.get(
reverse(
'rest_api:resolvedsmartlinkdocument-list',
args=(self.document.pk, self.smart_link.pk)
)
)
self.assertEqual(
response.data['results'][0]['label'], self.document.label
)
def test_smart_link_condition_create_view(self):
self._create_document_type()
self._create_smart_link()
response = self.client.post(
reverse(
'rest_api:smartlinkcondition-list', args=(self.smart_link.pk,)
), {
'foreign_document_data': TEST_SMART_LINK_CONDITION_FOREIGN_DOCUMENT_DATA,
'expression': TEST_SMART_LINK_CONDITION_EXPRESSION,
'operator': TEST_SMART_LINK_CONDITION_OPERATOR
}
)
smart_link_condition = SmartLinkCondition.objects.first()
self.assertEqual(response.data['id'], smart_link_condition.pk)
self.assertEqual(
response.data['operator'], TEST_SMART_LINK_CONDITION_OPERATOR
)
self.assertEqual(SmartLinkCondition.objects.count(), 1)
self.assertEqual(
smart_link_condition.operator, TEST_SMART_LINK_CONDITION_OPERATOR
)
def test_smart_link_condition_delete_view(self):
self._create_document_type()
self._create_smart_link()
self._create_smart_link_condition()
self.client.delete(
reverse(
'rest_api:smartlinkcondition-detail',
args=(self.smart_link.pk, self.smart_link_condition.pk)
)
)
self.assertEqual(SmartLinkCondition.objects.count(), 0)
def test_smart_link_condition_detail_view(self):
self._create_document_type()
self._create_smart_link()
self._create_smart_link_condition()
response = self.client.get(
reverse(
'rest_api:smartlinkcondition-detail',
args=(self.smart_link.pk, self.smart_link_condition.pk)
)
)
self.assertEqual(
response.data['operator'], TEST_SMART_LINK_CONDITION_OPERATOR
)
def test_smart_link_condition_patch_view(self):
self._create_document_type()
self._create_smart_link()
self._create_smart_link_condition()
self.client.patch(
reverse(
'rest_api:smartlinkcondition-detail',
args=(self.smart_link.pk, self.smart_link_condition.pk)
),
data={
'expression': TEST_SMART_LINK_CONDITION_EXPRESSION_EDITED,
}
)
self.smart_link_condition.refresh_from_db()
self.assertEqual(
self.smart_link_condition.expression,
TEST_SMART_LINK_CONDITION_EXPRESSION_EDITED
)
def test_smart_link_condition_put_view(self):
self._create_document_type()
self._create_smart_link()
self._create_smart_link_condition()
self.client.put(
reverse(
'rest_api:smartlinkcondition-detail',
args=(self.smart_link.pk, self.smart_link_condition.pk)
),
data={
'expression': TEST_SMART_LINK_CONDITION_EXPRESSION_EDITED,
'foreign_document_data': TEST_SMART_LINK_CONDITION_FOREIGN_DOCUMENT_DATA,
'operator': TEST_SMART_LINK_CONDITION_OPERATOR,
}
)
self.smart_link_condition.refresh_from_db()
self.assertEqual(
self.smart_link_condition.expression,
TEST_SMART_LINK_CONDITION_EXPRESSION_EDITED
)
| StarcoderdataPython |
11286415 | <filename>tests/logfile/test_logfile.py<gh_stars>100-1000
# pylint: disable=protected-access
import logging
import pytest
from pyctuator.logfile.logfile import PyctuatorLogfile # type: ignore
from pyctuator.pyctuator import default_logfile_format
test_buffer_size = 1000
@pytest.mark.mark_logfile_test_empty_response
def test_empty_response() -> None:
logfile = PyctuatorLogfile(test_buffer_size, default_logfile_format)
log, start, end = logfile.get_logfile(f"bytes=-{2 * test_buffer_size}")
assert log == ""
assert start == 0
assert end == 0
@pytest.mark.mark_logfile_test_buffer_not_full
def test_buffer_not_full() -> None:
logfile = PyctuatorLogfile(test_buffer_size, "%(message)s")
msg_num = "0123456789" * 50
record = logging.LogRecord("test record", logging.WARNING, "", 0, msg_num, (), None)
logfile.log_messages.emit(record)
log, start, end = logfile.get_logfile(f"bytes=-{2 * test_buffer_size}")
assert start == 0
assert end == len(log) == len(msg_num + "\n")
@pytest.mark.mark_logfile_buffer_overflow
def test_buffer_overflow() -> None:
logfile = PyctuatorLogfile(test_buffer_size, "%(message)s")
msg_num = "0123456789" * 10
record = logging.LogRecord("test record", logging.WARNING, "", 0, msg_num, (), None)
logfile.log_messages.emit(record)
msg_chr = "ABCDEFGHIJ" * 95
record = logging.LogRecord("test record", logging.WARNING, "", 0, msg_chr, (), None)
logfile.log_messages.emit(record)
log, start, end = logfile.get_logfile(f"bytes=-{2 * test_buffer_size}")
assert log.count("0123456789") == 4 # Implicitly Added newlines "break" a single string appearance
assert start == logfile.get_log_buffer_offset()
assert end == start + len(log)
@pytest.mark.mark_logfile_forgotten_records
def test_forgotten_records() -> None:
logfile = PyctuatorLogfile(test_buffer_size, "%(message)s")
msg_chr = "ABCDEFGHIJ"
record = logging.LogRecord("test record", logging.WARNING, "", 0, msg_chr, (), None)
logfile.log_messages.emit(record)
msg_num = "0123456789" * 100 # test_buffer_size
record = logging.LogRecord("test record", logging.WARNING, "", 0, msg_num, (), None)
logfile.log_messages.emit(record)
log, start, end = logfile.get_logfile(f"bytes=-{2 * test_buffer_size}")
assert log.count("ABCDEFGHIJ") == 0
assert start == logfile.get_log_buffer_offset()
assert end == start + len(log)
| StarcoderdataPython |
3440292 | """
Dans_Diffraction Examples
Read values from a Crystallographic Information File (.cif or .mcif), edit the structure, write a different file
"""
import sys, os
import numpy as np
import matplotlib.pyplot as plt # Plotting
cf = os.path.dirname(__file__)
sys.path.insert(0,os.path.join(cf,'..'))
import Dans_Diffraction as dif
#f = cf+'/../Dans_Diffraction/Structures/Ca2RuO4.cif'
xtl = dif.structure_list.Ca2RuO4()
xtl.Atoms.changeatom(1, mxmymz=[0, 3, 0.3])
xtl.generate_structure()
# write cif file
xtl.write_cif('../Test/test.mcif', comments='This is a test!') | StarcoderdataPython |
398511 | # Copyright 2018-2021 Faculty Science Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Interact with files in the Faculty workspace.
"""
from enum import Enum
from collections import namedtuple
from marshmallow import fields, post_load, validates_schema, ValidationError
from marshmallow_enum import EnumField
from faculty.clients.base import BaseSchema, BaseClient
File = namedtuple("File", ["path", "name", "last_modified", "size"])
Directory = namedtuple(
"Directory",
["path", "name", "last_modified", "size", "truncated", "content"],
)
class WorkspaceClient(BaseClient):
"""Client for the Faculty workspace service.
Either build this client with a session directly, or use the
:func:`faculty.client` helper function:
>>> client = faculty.client("workspace")
Parameters
----------
url : str
The URL of the workspace service.
session : faculty.session.Session
The session to use to make requests.
"""
SERVICE_NAME = "workspace"
def list(self, project_id, prefix, depth):
"""List files in a project workspace.
Parameters
----------
project_id : uuid.UUID
The ID of the project to list files in.
prefix : str
A file prefix to list files under.
depth : int
How deep down the file tree to list.
Returns
-------
Union[File, Directory]
The listed file tree.
"""
endpoint = "/project/{}/file".format(project_id)
params = {"depth": depth, "prefix": prefix}
response = self._get(endpoint, _ListResponseSchema(), params=params)
return response.content
class _FileNodeType(Enum):
FILE = "file"
DIRECTORY = "directory"
_ListResponse = namedtuple("_ListResponse", ["project_id", "path", "content"])
class _FileNodeSchema(BaseSchema):
path = fields.String(required=True)
name = fields.String(required=True)
type = EnumField(_FileNodeType, by_value=True, required=True)
last_modified = fields.DateTime(required=True)
size = fields.Integer(required=True)
truncated = fields.Boolean()
content = fields.Nested("self", many=True)
@validates_schema
def validate_type(self, data, **kwargs):
if data["type"] == _FileNodeType.DIRECTORY:
required_fields = Directory._fields
elif data["type"] == _FileNodeType.FILE:
required_fields = File._fields
if set(data.keys()) != set(required_fields).union({"type"}):
raise ValidationError("Wrong fields for {}.".format(data["type"]))
@post_load
def make_file_node(self, data, **kwargs):
if data["type"] == _FileNodeType.DIRECTORY:
return Directory(**{key: data[key] for key in Directory._fields})
elif data["type"] == _FileNodeType.FILE:
return File(**{key: data[key] for key in File._fields})
else:
raise ValueError("Invalid file node type.")
class _ListResponseSchema(BaseSchema):
project_id = fields.UUID(data_key="project_id", required=True)
path = fields.String(required=True)
content = fields.List(fields.Nested(_FileNodeSchema), required=True)
@post_load
def make_list_response(self, data, **kwargs):
return _ListResponse(**data)
| StarcoderdataPython |
8058830 | from unittest import TestCase
from maintain_frontend.llc1.validation.search_extent_validator import SearchExtentValidator
NO_GEOMETRY = None
FEATURE_COLLECTION = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [[[1, 2], [3, 4], [5, 6], [1, 2]]]
}
}
]
}
class TestSearchExtentValidator(TestCase):
def test_no_geometry_fails(self):
result = SearchExtentValidator.validate(NO_GEOMETRY).errors
self.assertEqual(len(result), 1)
self.assertEqual(result['map'].inline_message, 'Extent is required')
self.assertEqual(result['map'].summary_message, 'Draw the extent')
def test_feature_collection_passes(self):
result = SearchExtentValidator.validate(FEATURE_COLLECTION).errors
self.assertEqual(len(result), 0)
| StarcoderdataPython |
55582 | # Author: StevenChaoo
# -*- coding:UTF-8 -*-
import json
import logging
import time
import random
import sys
from sklearn_crfsuite import CRF
from sklearn.metrics import classification_report
from util import tools
from tqdm import tqdm
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
)
logger = logging.getLogger("root")
def sentence2feature(sentences):
# Extract context features
features = []
for sentence in tqdm(sentences):
result = []
for i in range(len(sentence)):
# Previous and next word
word = sentence[i]
previous_word = '<start>' if i == 0 else sentence[i-1]
next_word = '<end>' if i == (len(sentence)-1) else sentence[i+1]
# Contains five features
feature = {
"w": word,
"w-1": previous_word,
"w+1": next_word,
"w-1:w": previous_word+word,
"w:w+1": word+next_word
}
result.append(feature)
features.append(result)
# Return results
return features
def normalizationLabel(label_lists):
labels = []
for label_list in label_lists:
for label in label_list:
if len(label) > 1:
labels.append(label[2:])
else:
labels.append(label)
return labels
class CRFModel(object):
def __init__(self):
self.model = CRF(algorithm='l2sgd',
c2=0.1,
max_iterations=100)
def train(self, features, tag_lists):
self.model.fit(features, tag_lists)
def evaluate(self, features, tag_lists):
predict_tag = self.model.predict(features)
real_tag = normalizationLabel(tag_lists)
pred_tag = normalizationLabel(predict_tag)
print(classification_report(real_tag, pred_tag))
def dataProcess(path):
f = open(path, "r")
word_lists = []
label_lists = []
word_list = []
label_list = []
for line in f.readlines():
line_list = line.strip().split(" ")
if len(line_list) > 1:
word_list.append(line_list[0])
label_list.append(line_list[1])
else:
word_lists.append(word_list)
label_lists.append(label_list)
return word_lists, label_lists
def main():
# Prepare dataset
train_word_lists, train_label_lists = dataProcess("./data/dis/train.txt")
test_word_lists, test_label_lists = dataProcess("./data/dis/test.txt")
# Extract features
logger.info("Prepare train data")
train_features = sentence2feature(train_word_lists)
logger.info("Prepare test data")
test_features = sentence2feature(test_word_lists)
# Build CRF model
logger.info("Build CRF model")
crf = CRFModel()
logger.info("Success!")
# Train model
logger.info("Begin training")
crf.train(train_features, train_label_lists)
logger.info("Finish training")
# Evaluate model
logger.info("Begin evaluating")
crf.evaluate(test_features, test_label_lists)
logger.info("Finish evaluating")
if __name__ == "__main__":
# Main function
main()
| StarcoderdataPython |
11250080 | <gh_stars>10-100
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import copy
import unittest
from unittest.mock import patch, MagicMock
from cfn_policy_validator.tests import offline_only
from cfn_policy_validator.tests.boto_mocks import BotoClientError
from cfn_policy_validator.tests.parsers_tests import mock_identity_parser_setup
from cfn_policy_validator.tests.utils import required_property_error, load, account_config, expected_type_error, \
load_resources
from cfn_policy_validator.application_error import ApplicationError
from cfn_policy_validator.parsers.identity import IdentityParser
from cfn_policy_validator.parsers import identity
from cfn_policy_validator.tests.parsers_tests.test_identity import has_policy, \
sample_policy_a, sample_policy_b, IdentityParserTest, aws_lambda_basic_execution_response, \
aws_lambda_basic_execution_version_response, aws_lambda_execute_response, aws_lambda_execute_version_response
class WhenParsingAnIAMUserAndValidatingSchema(unittest.TestCase):
@mock_identity_parser_setup()
def test_with_invalid_path_type(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'Path': {'abc': 'def'}
}
}
})
with self.assertRaises(ApplicationError) as cm:
IdentityParser.parse(template, account_config)
self.assertEqual(expected_type_error('ResourceA.Properties.Path', 'string', "{'abc': 'def'}"),
str(cm.exception))
@mock_identity_parser_setup()
def test_with_invalid_user_name_type(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'UserName': ['Invalid']
}
}
})
with self.assertRaises(ApplicationError) as cm:
IdentityParser.parse(template, account_config)
self.assertEqual(expected_type_error('ResourceA.Properties.UserName', 'string', "['Invalid']"),
str(cm.exception))
@mock_identity_parser_setup()
def test_with_invalid_policies_type(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'Policies': 'PolicyA'
}
}
})
with self.assertRaises(ApplicationError) as cm:
IdentityParser.parse(template, account_config)
self.assertEqual(expected_type_error('ResourceA.Properties.Policies', 'array', "'PolicyA'"),
str(cm.exception))
@mock_identity_parser_setup()
def test_with_invalid_policies_item_type(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'Policies': ['PolicyA']
}
}
})
with self.assertRaises(ApplicationError) as cm:
IdentityParser.parse(template, account_config)
self.assertEqual(expected_type_error('ResourceA.Properties.Policies.0', 'object', "'PolicyA'"), str(cm.exception))
@mock_identity_parser_setup()
def test_with_no_policy_name(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'Policies': [
{
'PolicyDocument': copy.deepcopy(sample_policy_b)
}
]
}
}
})
with self.assertRaises(ApplicationError) as cm:
IdentityParser.parse(template, account_config)
self.assertEqual(required_property_error('PolicyName', 'ResourceA.Properties.Policies.0'),
str(cm.exception))
@mock_identity_parser_setup()
def test_with_invalid_policy_name_type(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'Policies': [
{
'PolicyName': ['Invalid'],
'PolicyDocument': copy.deepcopy(sample_policy_a)
}
]
}
}
})
with self.assertRaises(ApplicationError) as cm:
IdentityParser.parse(template, account_config)
self.assertEqual(
expected_type_error('ResourceA.Properties.Policies.0.PolicyName', 'string', "['Invalid']"),
str(cm.exception))
@mock_identity_parser_setup()
def test_with_no_policy_document(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'Policies': [
{
'PolicyName': 'root'
}
]
}
}
})
with self.assertRaises(ApplicationError) as cm:
IdentityParser.parse(template, account_config)
self.assertEqual(required_property_error('PolicyDocument', 'ResourceA.Properties.Policies.0'),
str(cm.exception))
@mock_identity_parser_setup()
def test_with_invalid_policy_document_type(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'Policies': [
{
'PolicyName': 'PolicyA',
'PolicyDocument': 'Invalid'
}
]
}
}
})
with self.assertRaises(ApplicationError) as cm:
IdentityParser.parse(template, account_config)
self.assertEqual(
expected_type_error('ResourceA.Properties.Policies.0.PolicyDocument', 'object', "'Invalid'"),
str(cm.exception))
@mock_identity_parser_setup()
def test_with_invalid_managed_policy_arns_type(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'ManagedPolicyArns': 'Invalid'
}
}
})
with self.assertRaises(ApplicationError) as cm:
IdentityParser.parse(template, account_config)
self.assertEqual(expected_type_error('ResourceA.Properties.ManagedPolicyArns', 'array', "'Invalid'"),
str(cm.exception))
@mock_identity_parser_setup()
def test_with_invalid_managed_policy_arns_item_type(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'ManagedPolicyArns': [['Invalid']]
}
}
})
with self.assertRaises(ApplicationError) as cm:
IdentityParser.parse(template, account_config)
self.assertEqual(expected_type_error('ResourceA.Properties.ManagedPolicyArns.0', 'string', "['Invalid']"), str(cm.exception))
@mock_identity_parser_setup()
def test_with_unsupported_function_in_unused_property(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'LoginProfile': {"Fn::GetAZs": {"Ref": "AWS::Region"}}
}
}
})
IdentityParser.parse(template, account_config)
self.assertTrue(True, 'Should not raise error.')
@mock_identity_parser_setup()
def test_with_ref_to_parameter_in_unused_property(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'LoginProfile': {'Ref': 'SomeProperty'}
}
}
})
IdentityParser.parse(template, account_config)
self.assertTrue(True, 'Should not raise error.')
class WhenParsingAnIAMUserWithAName(IdentityParserTest):
@mock_identity_parser_setup()
def test_returns_a_user(self):
template = load({
'Resources': {
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'Path': '/custom/user/path',
'UserName': 'MyUserName'
}
}
}
})
self.parse(template, account_config)
self.assertResults(number_of_users=1)
user = self.users[0]
self.assertEqual("MyUserName", user.UserName)
self.assertEqual("/custom/user/path", user.UserPath)
self.assertEqual(0, len(user.Policies))
class WhenParsingAnIAMUserWithNoName(IdentityParserTest):
@mock_identity_parser_setup()
def test_returns_a_user(self):
template = load({
'Resources': {
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'Path': '/custom/user/path'
}
}
}
})
self.parse(template, account_config)
self.assertResults(number_of_users=1)
user = self.users[0]
self.assertEqual("ResourceA", user.UserName)
self.assertEqual("/custom/user/path", user.UserPath)
self.assertEqual(0, len(user.Policies))
class WhenParsingAnIAMPolicyAttachedToAUser(IdentityParserTest):
@mock_identity_parser_setup()
def test_returns_a_user_and_policy(self):
template = load({
'Resources': {
'User': {
'Type': 'AWS::IAM::User',
'Properties': {
'Policies': [
{
'PolicyName': 'PolicyA',
'PolicyDocument': copy.deepcopy(sample_policy_a)
},
{
'PolicyName': 'PolicyB',
'PolicyDocument': copy.deepcopy(sample_policy_b)
}
]
}
}
}
})
self.parse(template, account_config)
self.assertResults(number_of_users=1)
user = self.users[0]
self.assertEqual("User", user.UserName)
self.assertEqual("/", user.UserPath)
self.assertEqual(2, len(user.Policies))
self.assertTrue(has_policy(user, 'PolicyA', sample_policy_a))
self.assertTrue(has_policy(user, 'PolicyB', sample_policy_b))
class WhenParsingAnIAMUserWithReferencesInEachField(IdentityParserTest):
# this is a test to ensure that each field is being evaluated for references in a user
@mock_identity_parser_setup()
def test_returns_a_user_with_references_resolved(self):
inline_policy = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': 'ec2:RunInstance',
'Resources': {'Ref': 'Resource'}
}
]
}
template = load({
'Parameters': {
'UserPath': {},
'UserName': {},
'Resource': {},
'ManagedPolicyArn': {}
},
'Resources': {
'ManagedPolicy': {
'Type': 'AWS::IAM::ManagedPolicy',
'Properties': {
'PolicyDocument': copy.deepcopy(sample_policy_a)
}
},
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'Path': {'Ref': 'UserPath'},
'UserName': {'Ref': 'UserName'},
'Policies': [{
'PolicyName': 'Policy1',
'PolicyDocument': inline_policy
}],
'ManagedPolicyArns': [
{'Ref': 'ManagedPolicy'}
]
}
}
}
},
{
'UserPath': '/custom/user/path',
'UserName': 'CustomUserName',
'Resource': 'my_resource/*'
}
)
self.parse(template, account_config)
self.assertResults(number_of_users=1)
user = self.users[0]
self.assertEqual("CustomUserName", user.UserName)
self.assertEqual("/custom/user/path", user.UserPath)
expected_inline_policy = inline_policy.copy()
expected_inline_policy['Statement'][0]['Resources'] = 'my_resource/*'
self.assertEqual(2, len(user.Policies))
self.assertTrue(has_policy(user, 'Policy1', expected_inline_policy))
self.assertTrue(has_policy(user, "ManagedPolicy", sample_policy_a))
class WhenParsingManagedPoliciesAttachedToAUserFromTheUser(IdentityParserTest):
@mock_identity_parser_setup()
def test_returns_a_user_with_attached_policies(self):
template = load({
'Resources': {
'ManagedPolicyA': {
'Type': 'AWS::IAM::ManagedPolicy',
'Properties': {
'PolicyDocument': copy.deepcopy(sample_policy_a)
}
},
'ManagedPolicyB': {
'Type': 'AWS::IAM::ManagedPolicy',
'Properties': {
'PolicyDocument': copy.deepcopy(sample_policy_b)
}
},
'User': {
'Type': 'AWS::IAM::User',
'Properties': {
'ManagedPolicyArns': [
{'Ref': 'ManagedPolicyA'},
{'Ref': 'ManagedPolicyB'}
]
}
}
}
})
self.parse(template, account_config)
self.assertResults(number_of_users=1)
user = self.users[0]
self.assertEqual(2, len(user.Policies))
self.assertTrue(has_policy(user, "ManagedPolicyA", sample_policy_a))
self.assertTrue(has_policy(user, "ManagedPolicyB", sample_policy_b))
# note that the DependsOn is required here, otherwise the managed policy would not exist when the user attempts to find it
class WhenParsingManagedPoliciesAttachedToAUserFromTheUserAndArnIsNotRef(IdentityParserTest):
@mock_identity_parser_setup()
def test_returns_users_with_attached_policies(self):
template = load({
'Resources': {
'User': {
'Type': 'AWS::IAM::User',
'Properties': {
'ManagedPolicyArns': [
f"arn:aws:iam::{account_config.account_id}:policy/MyManagedPolicy"
]
},
'DependsOn': 'ManagedPolicyA'
},
'ManagedPolicyA': {
'Type': 'AWS::IAM::ManagedPolicy',
'Properties': {
"ManagedPolicyName": "MyManagedPolicy",
'PolicyDocument': copy.deepcopy(sample_policy_a)
}
}
}
})
self.parse(template, account_config)
self.assertResults(number_of_users=1)
user = self.users[0]
self.assertEqual(1, len(user.Policies))
self.assertTrue(has_policy(user, "MyManagedPolicy", sample_policy_a))
class WhenParsingManagedPolicyAttachedToAUserAndThePolicyIsAWSManaged(IdentityParserTest):
@mock_identity_parser_setup(
iam=[
aws_lambda_basic_execution_response(),
aws_lambda_basic_execution_version_response(),
aws_lambda_execute_response(),
aws_lambda_execute_version_response()
]
)
@offline_only
def test_returns_user_with_attached_policies(self):
template = load({
'Resources': {
'User': {
'Type': 'AWS::IAM::User',
'Properties': {
'ManagedPolicyArns': [
'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole',
'arn:aws:iam::aws:policy/AWSLambdaExecute'
]
}
}
}
})
self.parse(template, account_config)
self.assertResults(number_of_users=1)
user = self.users[0]
self.assertEqual(2, len(user.Policies))
self.assertTrue(has_policy(user, "AWSLambdaBasicExecutionRole", sample_policy_a, '/service-role/'))
self.assertTrue(has_policy(user, "AWSLambdaExecute", sample_policy_b, '/'))
class WhenParsingManagedPolicyAttachedToAUserAndThePolicyDoesNotExistInTemplateOrAWS(unittest.TestCase):
@mock_identity_parser_setup(
iam=[
BotoClientError(
method='get_policy',
service_error_code='NoSuchEntity',
expected_params={
'PolicyArn': 'arn:aws:iam::aws:policy/DoesNotExist'
}
)
]
)
def test_throws_exception(self):
template = load({
'Resources': {
'User': {
'Type': 'AWS::IAM::User',
'Properties': {
'ManagedPolicyArns': [
'arn:aws:iam::aws:policy/DoesNotExist'
]
}
}
}
})
with self.assertRaises(ApplicationError) as cm:
IdentityParser.parse(template, account_config)
self.assertEqual('Could not find managed policy with arn:aws:iam::aws:policy/DoesNotExist in template '
'or in environment.', str(cm.exception))
| StarcoderdataPython |
3219149 | from libs import browser_init, insta_login
import pandas as pd
import numpy as np
import os
import sys
from datetime import datetime
from pathlib import Path
#initialising the connection
browser = browser_init()
insta_login(browser)
# Setting up a dict with the name and insta accounts of profiles to scrape
dct={}
dir = Path(__name__).resolve().parents[1]
dir= str(dir)+'/data_repo/Comp_insta.xlsx'
def load_data(dir):
sheet = pd.read_excel(dir,engine='openpyxl')
return sheet
sheet = load_data(dir)
for index in range(sheet.shape[0]):
name=sheet.iloc[index,0]
page=sheet.iloc[index,1]
dct[name]=page
print(sheet.iloc[index,0],sheet.iloc[index,1])
# Requesting the data
data = {}
for name, page in dct.items():
print(f'Getting page: {page}')
try:
browser.get(page)
data[name]={}
data[name]['date']= datetime.now()
for elem in browser.find_elements_by_xpath('//li[@class = "Y8-fY "]'):
print(f'elem.text: {elem.text}')
try:
elem = elem.text.split(' ')
data[name][elem[1]]=elem[0]
print(f'appended: {data[name][elem[1]]}')
except Exception as e:
print(e)
except Exception as e:
print(e)
pass
# Dataframe to structure this run's data pull
df1 = pd.DataFrame.from_dict(data,orient='index',columns=['date','posts','followers','following'])
# Writing to csv
path = Path(__name__).resolve().parents[1]
if 'demo.csv' not in os.listdir(str(path)+'/data_repo'):
df1.to_csv(str(path)+'/data_repo/demo.csv', header= True)
else:
df1.to_csv(str(path)+'/data_repo/demo.csv', header= False, mode= 'a') | StarcoderdataPython |
302500 | from flask_login.utils import logout_user
from app import app, db, bcrypt
from flask import render_template, redirect, flash, url_for, request
from forms import StuRegistration, StuLogin, StuUpdate
from model import Student, Organization, Scholarship, scholarship_application
from flask_login import login_user, current_user, login_required
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/', methods=['POST', 'GET'])
def home():
if current_user.is_authenticated:
flash(f'Already loged in', 'success')
return redirect(url_for('dashboard'))
form = StuRegistration()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(
form.password.data).decode('Utf-8')
user = Student(username=form.name.data, email=form.email.data,
phone=form.phone.data, password=<PASSWORD>)
db.session.add(user)
db.session.commit()
flash(
f'Created New Account.Check Your Registered mail for more details!!', 'success')
return redirect(url_for('login'))
return render_template("home.html", title="Home", form=form)
@app.route('/login', methods=['POST', 'GET'])
def login():
if current_user.is_authenticated:
flash(f'Already loged in', 'success')
return redirect(url_for('dashboard'))
form = StuLogin()
if form.validate_on_submit():
user = Student.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
flash(f'Logged In', 'success')
if user.address == None or user.income == None or user.clx == None or user.clxmarks == None:
flash(f'Please Complete your Profile First!!!', 'warning')
return redirect(url_for('update'))
return redirect(url_for('dashboard'))
flash(f'Login Unsuccessfull, Please Check Email and Password!!!', 'danger')
return redirect(url_for('login'))
return render_template("login.html", title="Login", form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash(f'Successfully Loggeout User', 'info')
return redirect(url_for('home'))
@app.route('/dashboard')
@login_required
def dashboard():
scholarship_list = []
for i in Scholarship.query.all():
if(i.cls_x_min_per <= current_user.clxmarks and i.cls_xii_min_per <= current_user.clxiimarks and i.cls_ug_min_per <= current_user.ugmarks and i.life == 1):
scholarship_list.append(i)
return render_template("studentDashboard.html", title="Welcome "+current_user.username, scholarshipList=scholarship_list)
@app.route('/search', methods =["GET", "POST"])
@login_required
def search():
scholarship_list = []
for i in Scholarship.query.all():
if(i.cls_x_min_per <= current_user.clxmarks and i.cls_xii_min_per <= current_user.clxiimarks and i.cls_ug_min_per <= current_user.ugmarks and i.life == 1):
scholarship_list.append(i)
if request.method == "POST":
tag = request.form.get("searchField")
filtered_list = []
for i in scholarship_list:
if i.name.find(tag) != -1 or i.organization.username.find(tag) != -1 or i.description.find(tag) != -1 or str(i.amount).find(tag) != -1:
filtered_list.append(i)
return render_template("studentDashboard.html", title="Welcome "+current_user.username, scholarshipList=filtered_list)
@app.route('/update', methods=['POST', 'GET'])
@login_required
def update():
form = StuUpdate()
if form.validate_on_submit():
current_user.username = form.name.data
current_user.email = form.email.data
current_user.phone = form.phone.data
current_user.address = form.address.data
current_user.income = form.earning.data
current_user.clx = form.xinst.data
current_user.clxmarks = form.xmarks.data
current_user.clxii = form.xiiinst.data
current_user.clxiimarks = form.xiimarks.data
current_user.ug = form.uginst.data
current_user.ugmarks = form.ugmarks.data
current_user.pg = form.pginst.data
current_user.pgmarks = form.pgmarks.data
db.session.commit()
flash(f'Account Updated Successfully!!', 'success')
return redirect(url_for('dashboard'))
elif request.method == 'GET':
form.name.data = current_user.username
form.email.data = current_user.email
form.phone.data = current_user.phone
form.address.data = current_user.address
form.earning.data = current_user.income
form.xinst.data = current_user.clx
form.xmarks.data = current_user.clxmarks
form.xiiinst.data = current_user.clxii
form.xiimarks.data = current_user.clxiimarks
form.uginst.data = current_user.ug
form.ugmarks.data = current_user.ugmarks
form.pginst.data = current_user.pg
form.pgmarks.data = current_user.pgmarks
return render_template("update.html", title="Update", form=form)
@app.route("/scheme/<int:sch_id>")
def scheme(sch_id):
scholarship = Scholarship.query.filter_by(id=sch_id).first()
return render_template("schemes.html", title=scholarship.name, scholarship=scholarship)
@app.route("/apply/<int:sch_id>")
def apply(sch_id):
scholarship = Scholarship.query.filter_by(id=sch_id).first()
already_applied = scholarship_application.query.filter_by(
stu_id=current_user.id, sch_id=sch_id).first()
if already_applied:
flash(f'Already applied to '+scholarship.name, 'warning')
return redirect(url_for('dashboard'))
application = scholarship_application(
sch_id=scholarship.id, stu_id=current_user.id, org_id=scholarship.organization.id, status=1)
db.session.add(application)
db.session.commit()
flash(f'Applied Successfully to '+scholarship.name, 'success')
return redirect(url_for('dashboard'))
@app.route("/trackApplications/<int:stu_id>")
def track_applications(stu_id):
scholarship_applications = scholarship_application.query.filter_by(stu_id = stu_id)
return render_template("applications.html", scholarship_applications = scholarship_applications)
| StarcoderdataPython |
80070 | <reponame>tiagoeckhardt/trac
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
import unittest
from trac.tests.functional import FunctionalTwillTestCaseSetup, tc
class TestSearchFilterSelection(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Check persistence of search filters in session (#11292)."""
filters = ['milestone', 'changeset', 'ticket', 'wiki']
def setfilters(checked):
for i, f in enumerate(filters):
tc.formvalue('fullsearch', f, checked[i])
def checkfilters(checked):
for i, f in enumerate(filters):
is_checked = r'id="%s"[^>]* checked="checked"' % f
if checked[i]:
tc.find(is_checked)
else:
tc.notfind(is_checked)
self._tester.go_to_front()
# First sequence:
tc.follow('^Search')
seq_a = [True, False, True, False]
setfilters(seq_a)
tc.formvalue('fullsearch', 'q', 'anything...')
tc.submit()
# In the result page, the filters checkboxes reflect what's
# been selected
checkfilters(seq_a)
# Now, this selection also persists after resetting the search page
tc.follow('^Search')
checkfilters(seq_a)
# Second sequence:
seq_b = [False, True, False, True]
setfilters(seq_b)
tc.formvalue('fullsearch', 'q', 'anything...')
tc.submit()
checkfilters(seq_b)
tc.follow('^Search')
checkfilters(seq_b)
def functionalSuite(suite=None):
if not suite:
import trac.tests.functional
suite = trac.tests.functional.functionalSuite()
suite.addTest(TestSearchFilterSelection())
return suite
test_suite = functionalSuite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| StarcoderdataPython |
9785630 | <gh_stars>0
"""
Scripts that querry ADS for the list of dissertations each year as well as scripts that parse the institutions.
"""
import urllib
import json
import yaml
import glob
import numpy as np
import astropy
import astropy.io.ascii
import os
import requests, bs4
#ADS_KEY = os.getenv('ADS_KEY')
ADS_TOKEN = os.getenv('ADS_TOKEN')
def pull_data(year=2003, out_dir='../data/', root='ads', database='astronomy'):
"""
Querries ADS for PHD theses form a given year.
"""
if not os.path.exists(out_dir):
os.system('mkdir {}'.format(out_dir))
r = requests.get('https://api.adsabs.harvard.edu/v1/search/query',
params = {'q':'bibstem:"PhDT"',
'fl':'bibcode,pubdate,database,aff,author,citation_count,pub,year,title,identifier,id',
'sort':'pubdate desc',
'rows':'10000',
'fq':'year:{},database:"astronomy"'.format(year)},
headers={'Authorization':'Bearer:{}'.format(ADS_TOKEN)}
)
if r.json()['response']['numFound'] == 0:
print('No results found.')
return
else:
print('Writing {0}{1}_{2}.dat'.format(out_dir, root, year))
out_file = open('{0}{1}_{2}.dat'.format(out_dir, root, year),'w')
data = r.text
out_file.write(data)
out_file.close()
def get_data(year_start=1970, year_end=2018):
"""
Gets data for all dissertations from 1970 to 2014
"""
years = np.arange(year_start, year_end+1, 1)
for year in years:
pull_data(year=year)
def unique_institutions(dir='../data/', out_file='unique_institutions.dat'):
"""
Takes all dissertation data in the ads_careers/data directories
and otputs a list of unique institutions to ../data/unique_institutions.dat
The user can then parse by hand the US and non-US institutions.
Since there is no uniform format for institution names, this is a bit of a mess and cannot be automated.
I parsed out the US institutions by hand and saved them to ../data/unique_institutions_usa.dat
"""
files = glob.glob(dir+'ads_[12]*.dat')
full_affils = np.empty(1, dtype=object)
for file in files:
print(file)
data = open(file,'r')
data_j = yaml.load(data.readline())
affils = np.empty(len(data_j["response"]["docs"]), dtype=object)
for i in range(len(data_j["response"]["docs"])):
if data_j["response"]["docs"][i]["aff"] != ['-']:
affils[i] = json.dumps(data_j["response"]["docs"][i]["aff"])[2:-2]
full_affils = np.concatenate((full_affils, affils), axis=0)
# remove None
index = full_affils != np.array(None)
full_affils = full_affils[index]
unique = np.unique(full_affils)
print("There are {} unique institutions.".format(len(unique)))
out_file = open(dir+out_file,'w')
for uni in unique:
out_file.write("{}\n".format(uni))
out_file.close
def read_usa_institutions():
"""
Reads in the list of US institutions and returns a numpy string array for further use.
"""
us_institutions =[]
us_institutions = np.array([line.strip() for line in open('../data/unique_institutions_usa.dat')], dtype=object)
return us_institutions
def read_top_10_usa_institutions():
"""
Reads in the list of the top 10 US institutions and returns a numpy string array for further use.
This is a subset of ../data/unique_institutions_usa.dat consisting of all the different possible
names of the top 10 US institutions.
"""
top_10_us_institutions =[]
top_10_us_institutions = np.array([line.strip() for line in open('../data/top10_unique_institutions_usa.dat')], dtype=object)
return top_10_us_institutions
def get_missing_affil(bibcode=''):
"""
For a given bibcode, return publication institution listed on the ADS HTML page.
For some dissertations there is no institution in the querry result, but there
is one on the HTML page. This is especially a problem for 1996 for some reason.
"""
import requests, bs4
affil = ''
response = requests.get('http://adsabs.harvard.edu/abs/{}'.format(bibcode))
soup = bs4.BeautifulSoup(response.text, features="lxml")
for line in soup.find_all('meta'):
if (line.has_attr('name') and line['name'].startswith('dc.source')):
affil = line['content']
if affil:
return bibcode+' | '+affil
else:
return bibcode+' | None'
def check_missing_affils(files=[], out_file = 'missing_affil.dat', out_dir='../data/'):
"""
Go through the PhD entries, find ones without affiliation, grab the ADS HTML page for that bibcode,
get publication institution from there.
Just does it for all the files in the data directory.
"""
import time
if not files:
files = glob.glob('../data/ads_*.dat')
out_file = open(out_dir+out_file,'w')
for file in files:
data = open(file,'r')
data_j = yaml.load(data.readline())
for i in range(len(data_j["response"]["docs"])):
if (data_j["response"]["docs"][i]["aff"] == ['-']) or (data_j["response"]["docs"][i]["aff"] == ['--']):
#print 'Checking {}.'.format(data_j["response"]["docs"][i]['bibcode'])
affil = get_missing_affil(bibcode=data_j["response"]["docs"][i]['bibcode'])
time.sleep(1+np.random.uniform(size=1)[0])
out_file.write('{}\n'.format(affil.encode("utf-8")))
print(affil)
out_file.close()
def read_missing_usa_bib(dir='../data/'):
"""
Reads in the list of US institutions and returns a numpy string array for further use.
"""
us_bib =[]
us_bib = np.array([line.split('|')[0].strip() for line in open(dir+'missing_affil_usa.dat')], dtype=object)
return us_bib
def parse_output(year=2002, dir='../data/', verbose=True):
"""
One way to parse the results: outputs a list of first names and a formatted list of dissertations for ease of reading.
"""
in_file = glob.glob(dir+'ads_{}.dat'.format(year))
out_file = open(dir+'ads_{}_formatted.dat'.format(year),'w')
parse_file = open(dir+'ads_{}_parse.dat'.format(year),'w')
names_file = open(dir+'ads_first_names.dat'.format(year),'a')
us_institutions = read_usa_institutions()
jj = 0
print(in_file)
data = open(in_file[0],'r')
data_j = yaml.load(data.readline())
for i in range(len(data_j["response"]["docs"])):
if (data_j["response"]["docs"][i]["aff"][0] in us_institutions):
try:
tmp_name = data_j["response"]["docs"][i]["author"][0].split(',')[1].split()
if (tmp_name[0][-1] == '.') and (len(tmp_name) > 1):
names_file.write("{}\n".format(tmp_name[1]))
else:
names_file.write("{}\n".format(tmp_name[0]))
except:
print(data_j["response"]["docs"][i]["author"][0])
names_file.write("{}\n".format(data_j["response"]["docs"][i]["author"][0]))
out_file.write("{}. {}\n{}\n{}\n{}\n\n".format(jj+1, data_j["response"]["docs"][i]["author"][0], data_j["response"]["docs"][i]["aff"][0], data_j["response"]["docs"][i]["title"],data_j["response"]["docs"][i]["bibcode"]))
parse_file.write("{} | {} | {} | {} |\n".format(jj+1, data_j["response"]["docs"][i]["author"][0], data_j["response"]["docs"][i]["aff"][0], year))
jj+=1
if verbose:
print(jj, ';', data_j["response"]["docs"][i]["author"][0])#, ';', data_j["results"]["docs"][i]["title"]
out_file.close()
parse_file.close()
names_file.close()
print("Output file is {}".format(out_file.name))
| StarcoderdataPython |
9605532 | import psalg.configdb.configdb as cdb
import json
# json2xtc conversion depends on these being present with ':RO'
# (and the :RO does not appear in the xtc names)
leave_alone = ['detName:RO','detType:RO','detId:RO','doc:RO','alg:RO','version:RO']
def remove_read_only(cfg):
# be careful here: iterating recursively over dictionaries
# while deleting items can produce strange effects (which we
# need to do to effectively "rename" the keys without ":RO"). So
# create a new dict, unfortunately.
new = {}
for k, v in cfg.items():
if isinstance(v, dict):
v = remove_read_only(v)
if k in leave_alone:
new[k] = v
else:
new[k.replace(':RO', '')] = v
return new
def get_config(connect_json,cfgtype,detname):
connect_info = json.loads(connect_json)
control_info = connect_info['body']['control']['0']['control_info']
instrument = control_info['instrument']
cfg_dbase = control_info['cfg_dbase'].split('/')
db_url = cfg_dbase[0]
db_name =cfg_dbase[1]
return get_config_with_params(db_url, instrument, db_name, cfgtype, detname)
def get_config_with_params(db_url, instrument, db_name, cfgtype, detname):
create = False
mycdb = cdb.configdb(db_url, instrument, create, db_name)
cfg = mycdb.get_configuration(cfgtype, detname)
if cfg is None: raise ValueError('Config for instrument/detname %s/%s not found. dbase url: %s, db_name: %s, config_style: %s'%(instrument,detname,db_url,db_name,cfgtype))
cfg_no_RO_names = remove_read_only(cfg)
return cfg_no_RO_names
def get_config_json(*args):
return json.dumps(get_config(*args))
def get_config_json_with_params(*args):
return json.dumps(get_config_with_params(*args))
| StarcoderdataPython |
3507808 | """In this example we're using GitHub's APIs and we're going to access a private repo.
In this example I will show you how to populate headers for your API call using a
OAuth token (that expires in April).
"""
import requests
url = 'https://api.github.com/repos/robot297/hello-world'
headers = {
'Accept': 'application/vnd.github.v3+json',
'Authorization': 'token <KEY>' # Please note this key is set to expire
}
payload = {} # If you wanted to send data via the body that would go here
data = requests.get(url=url, headers=headers, data=payload).json()
print(data['description'])
| StarcoderdataPython |
3553110 | import csbuilder
from csbuilder.standard import Protocols, Roles, States
@csbuilder.protocols
class ThesisProtocols(Protocols):
AUTHENTICATION = 0
CHECK = 1
SEARCH = 2
MATCH = 3
REGISTER = 4
@csbuilder.roles(protocol=ThesisProtocols.AUTHENTICATION)
@csbuilder.roles(protocol=ThesisProtocols.CHECK)
@csbuilder.roles(protocol=ThesisProtocols.SEARCH)
@csbuilder.roles(protocol=ThesisProtocols.MATCH)
@csbuilder.roles(protocol=ThesisProtocols.REGISTER)
class ThesisRoles(Roles):
SERVER = 0
CLIENT = 1
@csbuilder.states(ThesisProtocols.AUTHENTICATION, ThesisRoles.SERVER)
class AuthenticationServerStates(States):
IGNORE = 0
SUCCESS = 1
FAILURE = 2
@csbuilder.states(ThesisProtocols.AUTHENTICATION, ThesisRoles.CLIENT)
class AuthenticationClientStates(States):
IGNORE = 0
REQUEST = 1
@csbuilder.states(ThesisProtocols.SEARCH, ThesisRoles.SERVER)
class SearchServerStates(States):
IGNORE = 0
SUCCESS = 1
FAILURE = 2
@csbuilder.states(ThesisProtocols.SEARCH, ThesisRoles.CLIENT)
class SearchClientStates(States):
IGNORE = 0
REQUEST = 1
@csbuilder.states(ThesisProtocols.CHECK, ThesisRoles.SERVER)
class CheckServerStates(States):
IGNORE = 0
SUCCESS = 1
FAILURE = 2
@csbuilder.states(ThesisProtocols.CHECK, ThesisRoles.CLIENT)
class CheckClientStates(States):
IGNORE = 0
QUERY = 1
@csbuilder.states(ThesisProtocols.MATCH, ThesisRoles.SERVER)
class MatchServerStates(States):
IGNORE = 0
SUCCESS = 1
FAILURE = 2
@csbuilder.states(ThesisProtocols.MATCH, ThesisRoles.CLIENT)
class MatchClientStates(States):
IGNORE = 0
QUERY = 1
| StarcoderdataPython |
9627859 | <filename>libs/dimension_reduction.py
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import datasets
from sklearn.decomposition import (PCA, IncrementalPCA,
KernelPCA, TruncatedSVD,
FastICA, MiniBatchDictionaryLearning,
SparsePCA)
from sklearn.manifold import (MDS, Isomap,
TSNE, LocallyLinearEmbedding)
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import (GaussianRandomProjection,
SparseRandomProjection)
from sklearn.svm import LinearSVC
from sklearn.neighbors import (KNeighborsClassifier,
NeighborhoodComponentsAnalysis)
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
class dimension_reduction(object):
def __init__(self, features, n_ch):
super(dimension_reduction, self).__init__()
self.data = data
# self.n_ch = n_ch
# self.n_features = n_ch * sum(
# [f.dim_per_channel for f in self.data])
# self.output = np.zeros(self.n_ch * self.n_features)
def process_fit(self, data):
return np.hstack([f.fit(data) for f in self.data])
def process_transform(self, data):
return np.hstack([f.transform(data) for f in self.data])
def __repr__(self):
return "%s.%s(%s)" % (
self.__class__.__module__,
self.__class__.__name__,
str([str(f) for f in self.data])
)
class data(object):
def __repr__(self):
return "%s.%s()" % (
self.__class__.__module__,
self.__class__.__name__
)
#------------------------------------------------------------------------
# n_neighbors = 3
# random_state = 0
# # Load Digits dataset
# digits = datasets.load_digits()
# X, y = digits.data, digits.target
# # Split into train/test
# X_train, X_test, y_train, y_test = \
# train_test_split(X, y, test_size=0.5, stratify=y,
# random_state=random_state)
# dim = len(X[0])
# n_classes = len(np.unique(y))
#------------------------------------------------------------------------
class PCA():
def __init__(self, n_components, random_state):
self.pca = PCA(n_components=n_components,
random_state=random_state))
def fit(self, x, y):
return self.pca.fit(x, y)
def transform(self, x)
return self.pca.transform(x)
# Reduce dimension to 2 with Incremental PCA
inc_pca = make_pipeline(StandardScaler(),
IncrementalPCA(n_components=2))
# Reduce dimension to 2 with Kernel PCA
kpca = make_pipeline(StandardScaler(),
KernelPCA(kernel="rbf",
n_components=2,
gamma=None,
fit_inverse_transform=True,
random_state=random_state,
n_jobs=1))
# Reduce dimension to 2 with Sparse PCA
sparsepca = make_pipeline(StandardScaler(),
SparsePCA(n_components=2,
alpha=0.0001,
random_state=random_state,
n_jobs=-1))
# Reduce dimension to 2 with Singular Value Decomposition [SVD]
SVD = make_pipeline(StandardScaler(),
TruncatedSVD(n_components=2,
algorithm='randomized',
random_state=2019,
n_iter=5))
# Reduce dimension to 2 with Gaussian Random Projection [GRP]
GRP = make_pipeline(StandardScaler(),
GaussianRandomProjection(n_components=2,
eps = 0.5,
random_state=random_state))
# Reduce dimension to 2 with LinearDiscriminantAnalysis
lda = make_pipeline(StandardScaler(),
LinearDiscriminantAnalysis(n_components=2))
# Reduce dimension to 2 with NeighborhoodComponentAnalysis
nca = make_pipeline(StandardScaler(),
NeighborhoodComponentsAnalysis(n_components=2,
random_state=random_state))
# Reduce dimension to 2 with Sparse Random Projection [SRP]
SRP = make_pipeline(StandardScaler(),
SparseRandomProjection(n_components=2,
density = 'auto',
eps = 0.5,
random_state=random_state,
dense_output = False))
# Reduce dimension to 2 with MultiDimensional Scaling [MDS]
mds = make_pipeline(StandardScaler(),
MDS(n_components=2,
n_init=12,
max_iter=1200,
metric=True,
n_jobs=4,
random_state=random_state))
# Reduce dimension to 2 with Isomap
isomap = make_pipeline(StandardScaler(),
Isomap(n_components=2,
n_jobs = 4,
n_neighbors = 5))
# Reduce dimension to 2 with MiniBatch Dictionary Learning
miniBatchDictLearning = make_pipeline(StandardScaler(),
MiniBatchDictionaryLearning(n_components=2,
batch_size = 200,
alpha = 1,
n_iter = 25,
random_state=random_state))
# Reduce dimension to 2 with Independent Composent Analysis [ICA]
FastICA = make_pipeline(StandardScaler(),
FastICA(n_components=2,
algorithm = 'parallel',
whiten = True,
max_iter = 100,
random_state=random_state))
# Reduce dimension to 2 with T-distributed Stochastic Neighbor Embedding [T-SNE]
tsne = make_pipeline(StandardScaler(),
TSNE(n_components=2,
learning_rate=300,
perplexity = 30,
early_exaggeration = 12,
init = 'random',
random_state=random_state))
# Reduce dimension to 2 with Locally Linear Embedding [LLE]
lle = make_pipeline(StandardScaler(),
LocallyLinearEmbedding(n_components=2,
n_neighbors = 10,
method = 'modified',
n_jobs = 4,
random_state=random_state))
# Reduce dimension to 2 with L1-based feature selection
lsvc = make_pipeline(StandardScaler(),
LinearSVC(C=0.01,
penalty="l1",
dual=False))
#------------------------------------------------------------------------
# Use a nearest neighbor classifier to evaluate the methods
knn = KNeighborsClassifier(n_neighbors=n_neighbors)
#------------------------------------------------------------------------
# Make a list of the methods to be compared
dim_reduction_methods = [('PCA', pca),
('LDA', lda),
('NCA', nca),
('INC PCA', inc_pca),
('KPCA', kpca),
##('Sparced PCA', sparsepca),
('SVD', SVD),
('GRP', GRP),
('SRP', SRP),
#('MDS', mds),
('IsoMap', isomap),
('MBD', miniBatchDictLearning),
('ICA', FastICA),
#('TSNE', tsne),
('LLE', lle),]
plt.figure(figsize=(24, 36))
for i, (name, model) in enumerate(dim_reduction_methods):
plt.subplot(3, 4, i + 1, aspect=1)
# Fit the method's model
model.fit(X_train, y_train)
# Fit a nearest neighbor classifier on the embedded training set
knn.fit(model.transform(X_train), y_train)
# Compute the nearest neighbor accuracy on the embedded test set
acc_knn = knn.score(model.transform(X_test), y_test)
# Embed the data set in 2 dimensions using the fitted model
X_embedded = model.transform(X)
df = pd.DataFrame(np.concatenate((X_embedded, np.reshape(y, (-1, 1))), axis=1))
# Plot the projected points and show the evaluation score
plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=y, s=30, cmap='Set1')
plt.title("{}, KNN (k={})\nTest accuracy = {:.2f}".format(name,
n_neighbors,
acc_knn))
for i, number in enumerate(y_test):
plt.annotate(number,
df.loc[df[2]==number,[0,1]].mean(),
horizontalalignment='center',
verticalalignment='center',
weight='bold',
size='20')
plt.show()
| StarcoderdataPython |
1604509 | # import os
# import logging
# import logging.config
# import yaml
# def setup_logging(
# default_path='logging.yaml',
# default_level=logging.INFO,
# env_key='LOG_CFG'
# ):
# """Setup logging configuration
# """
# path = default_path
# value = os.getenv(env_key, None)
# print(path)
# if value:
# path = value
# if os.path.exists(path):
# with open(path, 'rt') as f:
# config = yaml.safe_load(f.read())
# logging.config.dictConfig(config)
# print('loaded yaml. path:{}'.format(path))
# else:
# logging.basicConfig(level=default_level)
# print('not found configFile.')
import os
import yaml
import logging.config
import logging
import coloredlogs
def setup_logging(default_path='logging.yaml', default_level=logging.INFO, env_key='LOG_CFG'):
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
try:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
coloredlogs.install()
except Exception as e:
print(e)
print('Error in Logging Configuration. Using default configs')
logging.basicConfig(level=default_level)
coloredlogs.install(level=default_level)
print('loaded yaml. path:{}'.format(path))
else:
logging.basicConfig(level=default_level)
coloredlogs.install(level=default_level)
print('Failed to load configuration file. Using default configs')
def setup_logging_root(loggingLv=logging.DEBUG, filePath=None,handlers=None, fmtParam=None, datefmtParam=None):
if fmtParam is None:
fmtParam = '%(levelname)s:%(name)s: %(message)s '
'(%(asctime)s; %(filename)s:%(lineno)d)'
if datefmtParam is None:
datefmtParam = "%Y-%m-%d %H:%M:%S"
f = logging.Formatter(fmt=fmtParam, datefmt=datefmtParam)
if handlers is None:
handlers = [logging.StreamHandler()]
if filePath is not None:
handlers.append(
logging.handlers.RotatingFileHandler(filePath,
encoding='utf8',
maxBytes=100000, backupCount=1)
)
log = logging.getLogger()
log.setLevel(loggingLv)
for h in handlers:
h.setFormatter(f)
h.setLevel(loggingLv)
log.addHandler(h) | StarcoderdataPython |
344617 | #!/usr/bin/python3
# <NAME> @2013
# steinkirch at gmail
from collections import defaultdict
def defaultdict_example():
''' show some examples for defaultdicts '''
pairs = {('a', 1), ('b',2), ('c',3)}
d1 = {}
for key, value in pairs:
if key not in d1:
d1[key] = []
d1[key].append(value)
print(d1)
d2 = defaultdict(list)
for key, value in pairs:
d2[key].append(value)
print(d2)
if __name__ == '__main__':
defaultdict_example()
| StarcoderdataPython |
280595 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from astropy.table import QTable
__all__ = [
'SpectrumButterfly',
]
class SpectrumButterfly(QTable):
"""Spectral model butterfly class.
Columns:
- ``energy``
- ``flux_lo``
- ``flux``
- ``flux_hi``
"""
def plot(self, energy_range=None, ax=None, energy_power=0,
energy_unit='TeV', flux_unit='cm-2 s-1 TeV-1', **kwargs):
"""Plot.
``kwargs`` are passed to ``matplotlib.pyplot.errorbar``.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
kwargs.setdefault('color', 'black')
kwargs.setdefault('alpha', 0.2)
kwargs.setdefault('linewidth', 0)
energy = self['energy'].to(energy_unit)
flux_lo = self['flux_lo'].to(flux_unit)
flux_hi = self['flux_hi'].to(flux_unit)
y_lo = flux_lo * np.power(energy, energy_power)
y_hi = flux_hi * np.power(energy, energy_power)
eunit = [_ for _ in flux_lo.unit.bases if _.physical_type == 'energy'][0]
y_lo = y_lo.to(eunit ** energy_power * flux_lo.unit)
y_hi = y_hi.to(eunit ** energy_power * flux_hi.unit)
if energy_range is None:
energy_range = np.min(energy), np.max(energy)
where = (y_hi > 0) & (energy >= energy_range[0]) & (energy <= energy_range[1])
ax.fill_between(energy.value, y_lo.value, y_hi.value, where=where, **kwargs)
ax.set_xlabel('Energy [{}]'.format(self['energy'].unit))
if energy_power > 0:
ax.set_ylabel('E{0} * Flux [{1}]'.format(energy_power, y_lo.unit))
else:
ax.set_ylabel('Flux [{}]'.format(y_lo.unit))
ax.set_xscale("log", nonposx='clip')
ax.set_yscale("log", nonposy='clip')
return ax
| StarcoderdataPython |
12858998 | <filename>tests/timesheet/test_regroup.py
import datetime
from . import create_timesheet
def test_regroup_doesnt_regroup_entries_with_different_alias():
contents = """01.04.2013
foo 2 bar
bar 2 bar"""
t = create_timesheet(contents)
entries = list(t.entries.filter(regroup=True).values())[0]
assert len(entries) == 2
def test_regroup_doesnt_regroup_entries_with_different_description():
contents = """01.04.2013
foo 2 bar
foo 2 baz"""
t = create_timesheet(contents)
entries = list(t.entries.filter(regroup=True).values())[0]
assert len(entries) == 2
def test_regroup_regroups_entries_with_same_alias_and_description():
contents = """01.04.2013
foo 2 bar
foo 3 bar
bar 1 barz"""
t = create_timesheet(contents)
entries = list(t.entries.filter(regroup=True).values())[0]
assert len(entries) == 2
def test_regroup_adds_time():
contents = """01.04.2013
foo 2 bar
foo 3 bar"""
t = create_timesheet(contents)
entries = list(t.entries.filter(regroup=True).values())[0]
assert entries[0].hours == 5
def test_regroup_adds_time_with_start_and_end_time():
contents = """01.04.2013
foo 2 bar
foo 0900-1000 bar"""
t = create_timesheet(contents)
entries = list(t.entries.filter(regroup=True).values())[0]
assert entries[0].hours == 3
def test_regroup_doesnt_regroup_ignored_entries_with_non_ignored_entries():
contents = """01.04.2013
foo 2 bar
? foo 3 test"""
t = create_timesheet(contents)
entries = list(t.entries.filter(regroup=True).values())[0]
assert len(entries) == 2
def test_regroup_regroups_entries_with_partial_time():
contents = """01.04.2013
foo 2 bar
foo 0800-0900 bar
bar -1000 bar
foo -1100 bar"""
t = create_timesheet(contents)
entries = t.entries.filter(regroup=True)[datetime.date(2013, 4, 1)]
assert len(entries) == 2
assert entries[0].hours == 4
def test_set_pushed_flag_on_regrouped_entry_sets_flag_on_associated_entries():
contents = """01.04.2013
foo 2 bar
bar 0900-1000 bar
foo 1 bar"""
t = create_timesheet(contents)
entries = t.entries.filter(regroup=True)[datetime.date(2013, 4, 1)]
for entry in entries:
entry.pushed = True
lines = t.entries.to_lines()
assert lines == ["01.04.2013", "= foo 2 bar", "= bar 0900-1000 bar",
"= foo 1 bar"]
| StarcoderdataPython |
5089915 | import configparser
import dataclasses
import logging
import pathlib
from cod.instance import BaseInstance
from cod.tunnel import Tunnel
class Config:
"""
Loads the "~/cod.ini" configuration file into memory. It is an INI file
with the following sections:
# global settings here
ssh=/usr/bin/ssh
ssh-agent=/usr/bin/ssh-agent
ssh-add=/usr/bin/ssh-add
[aws]
aws_access_key_id=
aws_secret_access_key=
[keys]
my-key=/home/me/.ssh/some_key
[instance.<name>]
host=
port=
login=
identity=
agent_forwarding=
[tunnel.<name>]
instance=
address=
remote_port=
local_port=
local=
"""
def __init__(self, dot_file=None):
"""
Load the dot file and parse it.
"""
self.dot_file = dot_file
# default to ~/ezsh.ini
if not self.dot_file:
self.dot_file = str(pathlib.Path.home().joinpath('cod.ini'))
try:
# attempt to read the dot file as an INI
self.config = configparser.ConfigParser()
self.config.read(self.dot_file)
except OSError as e:
logging.warning('%s does not exist; creating...', self.dot_file)
except configparser.Error as e:
logging.error('Failed to load %s; %s', self.dot_file, e)
def save(self):
"""
Write the configuration file.
"""
try:
with open(self.dot_file, mode='wt') as fp:
self.config.write(fp)
except OSError as e:
logging.error('Failed to save %s; %s', self.dot_file, e)
def get(self, option, default=None):
"""
Lookup a global option value in the DEFAULT section.
"""
return self.config.get('DEFAULT', option, fallback=default)
def set(self, option, value):
"""
Set a global option value.
"""
if not self.config.has_section('DEFAULT'):
self.config.add_section('DEFAULT')
self.config.set('DEFAULT', option, str(value))
def identity(self, name):
"""
Return the settings for a saved SSH key.
"""
if not self.config.has_section('keys'):
return None
return self.config.get('keys', name, fallback=None)
def set_identity(self, name, value):
"""
Write the key name and save the configuration.
"""
if not self.config.has_section('keys'):
self.config.add_section('keys')
self.config.set('keys', name, str(value))
def section(self, name, dataclass_constructor):
"""
Lookup a section and return an instance of it.
"""
if self.config.has_section(name):
return dataclass_constructor(**dict(self.config.items(name)))
def set_section(self, name, dataclass_instance):
"""
Overwrite a section in the configuration.
"""
if not self.config.has_section(name):
self.config.add_section(name)
# add/update the options
for k, v in dataclasses.asdict(dataclass_instance).items():
if v is not None:
self.config.set(name, k, str(v))
def remove_section(self, name):
"""
Remove a section from the configuration.
"""
if self.config.has_section(name):
self.config.remove_section(name)
def tunnel(self, name):
"""
Return the settings for a tunnel.
"""
return self.section(f'tunnel.{name}', Tunnel)
def set_tunnel(self, name, tunnel):
"""
Update the parameters of a tunnel entry.
"""
self.set_section(f'instance.{name}', tunnel)
def remove_tunnel(self, name):
"""
Remove an instance section.
"""
self.remove_section(f'tunnel.{name}')
def instance(self, name):
"""
Return the details of a particular instance as a dictionary. If
the instance doesn't exist, returns None.
"""
return self.section(f'instance.{name}', BaseInstance)
def set_instance(self, name, instance):
"""
Overwrite the instance section for a particular instance with
an updated BaseInstance connection settings.
"""
self.set_section(f'instance.{name}', instance)
def remove_instance(self, name):
"""
Remove an instance section.
"""
self.remove_section(f'instance.{name}')
| StarcoderdataPython |
42065 | from __future__ import annotations
import subprocess
import pytest
from conftest import CustomTOMLFile
@pytest.mark.parametrize("command", [["update"], ["types", "update"]])
def test_update(command: list[str], toml_file: CustomTOMLFile):
content = toml_file.poetry
content["dependencies"].add("requests", "^2.27.1")
del content["dependencies"]["colorama"]
toml_file.write_poetry(content)
subprocess.run(["python", "-m", "poetry", *command])
assert "types-colorama" not in toml_file.poetry["group"]["types"]["dependencies"]
assert "types-requests" in toml_file.poetry["group"]["types"]["dependencies"]
@pytest.mark.parametrize("command", [["add", "requests"], ["types", "add", "requests"]])
def test_add(command: list[str], toml_file: CustomTOMLFile):
subprocess.run(["python", "-m", "poetry", *command])
assert "types-requests" in toml_file.poetry["group"]["types"]["dependencies"]
@pytest.mark.parametrize(
"command", [["remove", "colorama"], ["types", "remove", "colorama"]]
)
def test_remove(command: list[str], toml_file: CustomTOMLFile):
subprocess.run(["python", "-m", "poetry", *command])
assert "types-colorama" not in toml_file.poetry["group"]["types"]["dependencies"]
| StarcoderdataPython |
5101186 | <filename>prepare_reads_file.py
import pandas as pd
import click
from prepare_reads_file_helpers import parse_html, prepare_hsa_files
import os
@click.command()
@click.argument('hsa_gff_mirbase_file')
@click.argument('output_file')
@click.argument('output_folder')
def main(hsa_gff_mirbase_file, output_file,
output_folder):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if not os.path.exists(output_folder + '/temp_reference'):
os.makedirs(output_folder + '/temp_reference')
prepare_hsa_files(hsa_gff_mirbase_file, output_folder)
loc_info = pd.read_csv(output_folder + '/temp_reference/hsa_mirbase_coordinates.csv', sep=',')
pre_mirbase = loc_info[loc_info['type'] == 'miRNA_primary_transcript'].copy()
matures = loc_info[loc_info['type'] == 'miRNA'].copy()
pre_mirbase.drop(['.', '.2', '.3', 'Alias', 'type', 'From', 'chr'], inplace=True, axis=1)
matures.drop(['.', '.2', '.3', 'type'], inplace=True, axis=1)
joined_df = matures.join(pre_mirbase.set_index('ID'), on='From', how='left', rsuffix='_pre')
joined_df['reads'] = joined_df.apply(lambda x: parse_html(x), axis=1)
joined_df.to_csv(output_file, sep=',', index=False)
click.echo("Reads file created")
if __name__ == "__main__":
main()
| StarcoderdataPython |
220070 | <filename>models/models.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch.autograd as autograd
from torch.autograd.variable import Variable
from threading import Lock
from torch.distributions import Categorical
global_lock = Lock()
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# ==============================
# Original Model without Gating
# ==============================
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet74(pretrained=False, **kwargs):
""" ResNet-74"""
model = ResNet(Bottleneck, [3, 4, 14, 3], **kwargs)
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
# ======================
# Recurrent Gate Design
# ======================
def repackage_hidden(h):
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
class RNNGate(nn.Module):
"""given the fixed input size, return a single layer lstm """
def __init__(self, input_dim, hidden_dim, rnn_type='lstm'):
super(RNNGate, self).__init__()
self.rnn_type = rnn_type
self.input_dim = input_dim
self.hidden_dim = hidden_dim
if self.rnn_type == 'lstm':
self.rnn = nn.LSTM(input_dim, hidden_dim)
else:
self.rnn = None
self.hidden = None
# reduce dim
self.proj = nn.Conv2d(in_channels=hidden_dim, out_channels=1,
kernel_size=1, stride=1)
self.prob = nn.Sigmoid()
def init_hidden(self, batch_size):
# Before we've done anything, we dont have any hidden state.
# Refer to the Pytorch documentation to see exactly
# why they have this dimensionality.
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
return (autograd.Variable(torch.zeros(1, batch_size,
self.hidden_dim).cuda()),
autograd.Variable(torch.zeros(1, batch_size,
self.hidden_dim).cuda()))
def repackage_hidden(self):
self.hidden = repackage_hidden(self.hidden)
def forward(self, x):
batch_size = x.size(0)
self.rnn.flatten_parameters()
out, self.hidden = self.rnn(x.view(1, batch_size, -1), self.hidden)
out = out.squeeze()
proj = self.proj(out.view(out.size(0), out.size(1), 1, 1,)).squeeze()
prob = self.prob(proj)
disc_prob = (prob > 0.5).float().detach() - prob.detach() + prob
disc_prob = disc_prob.view(batch_size, 1, 1, 1)
return disc_prob, prob
# =======================
# Recurrent Gate Model
# =======================
class RecurrentGatedResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, embed_dim=10,
hidden_dim=10, gate_type='rnn', **kwargs):
self.inplanes = 64
super(RecurrentGatedResNet, self).__init__()
self.num_layers = layers
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2,
padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
# going to have 4 groups of layers. For the easiness of skipping,
# We are going to break the sequential of layers into a list of layers.
self._make_group(block, 64, layers[0], group_id=1, pool_size=56)
self._make_group(block, 128, layers[1], group_id=2, pool_size=28)
self._make_group(block, 256, layers[2], group_id=3, pool_size=14)
self._make_group(block, 512, layers[3], group_id=4, pool_size=7)
if gate_type == 'rnn':
self.control = RNNGate(embed_dim, hidden_dim, rnn_type='lstm')
else:
print('gate type {} not implemented'.format(gate_type))
self.control = None
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(0) * m.weight.size(1)
m.weight.data.normal_(0, math.sqrt(2. / n))
def _make_group(self, block, planes, layers, group_id=1, pool_size=56):
""" Create the whole group """
for i in range(layers):
if group_id > 1 and i == 0:
stride = 2
else:
stride = 1
meta = self._make_layer_v2(block, planes, stride=stride,
pool_size=pool_size)
setattr(self, 'group{}_ds{}'.format(group_id, i), meta[0])
setattr(self, 'group{}_layer{}'.format(group_id, i), meta[1])
setattr(self, 'group{}_gate{}'.format(group_id, i), meta[2])
def _make_layer_v2(self, block, planes, stride=1, pool_size=56):
""" create one block and optional a gate module """
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layer = block(self.inplanes, planes, stride, downsample)
self.inplanes = planes * block.expansion
# this is for having the same input dimension to rnn gate.
gate_layer = nn.Sequential(
nn.AvgPool2d(pool_size),
nn.Conv2d(in_channels=planes * block.expansion,
out_channels=self.embed_dim,
kernel_size=1,
stride=1))
if downsample:
return downsample, layer, gate_layer
else:
return None, layer, gate_layer
def repackage_hidden(self):
self.control.hidden = repackage_hidden(self.control.hidden)
def forward(self, x):
"""mask_values is for the test random gates"""
# pdb.set_trace()
batch_size = x.size(0)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
# reinitialize hidden units
self.control.hidden = self.control.init_hidden(batch_size)
masks = []
gprobs = []
# must pass through the first layer in first group
x = getattr(self, 'group1_layer0')(x)
# gate takes the output of the current layer
gate_feature = getattr(self, 'group1_gate0')(x)
mask, gprob = self.control(gate_feature)
gprobs.append(gprob)
masks.append(mask.squeeze())
prev = x # input of next layer
for g in range(4):
for i in range(0 + int(g == 0), self.num_layers[g]):
if getattr(self, 'group{}_ds{}'.format(g+1, i)) is not None:
prev = getattr(self, 'group{}_ds{}'.format(g+1, i))(prev)
x = getattr(self, 'group{}_layer{}'.format(g+1, i))(x)
prev = x = mask.expand_as(x)*x + (1-mask).expand_as(prev)*prev
gate_feature = getattr(self, 'group{}_gate{}'.format(g+1, i))(x)
mask, gprob = self.control(gate_feature)
if not (g == 3 and i == (self.num_layers[3]-1)):
# not add the last mask to masks
gprobs.append(gprob)
masks.append(mask.squeeze())
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x, masks, gprobs, self.control.hidden
def imagenet_rnn_gate_18(pretrained=False, **kwargs):
""" Construct SkipNet-18 + SP """
model = RecurrentGatedResNet(BasicBlock, [2, 2, 2, 2],
embed_dim=10, hidden_dim=10, gate_type='rnn')
return model
def imagenet_rnn_gate_34(pretrained=False, **kwargs):
""" Construct SkipNet-34 + SP """
model = RecurrentGatedResNet(BasicBlock, [3, 4, 6, 3],
embed_dim=10, hidden_dim=10, gate_type='rnn')
return model
def imagenet_rnn_gate_50(pretrained=False, **kwargs):
""" Construct SkipNet-50 + SP """
model = RecurrentGatedResNet(Bottleneck, [3, 4, 6, 3],
embed_dim=10, hidden_dim=10, gate_type='rnn')
return model
def imagenet_rnn_gate_101(pretrained=False, **kwargs):
""" Constructs SkipNet-101 + SP """
model = RecurrentGatedResNet(Bottleneck, [3, 4, 23, 3],
embed_dim=10, hidden_dim=10, gate_type='rnn')
return model
def imagenet_rnn_gate_152(pretrained=False, **kwargs):
"""Constructs SkipNet-152 + SP """
model = RecurrentGatedResNet(Bottleneck, [3, 8, 36, 3],
embed_dim=10, hidden_dim=10, gate_type='rnn')
return model
# =============================
# Recurrent Gate Model with RL
# =============================
class RNNGatePolicy(nn.Module):
def __init__(self, input_dim, hidden_dim, rnn_type='lstm'):
super(RNNGatePolicy, self).__init__()
self.rnn_type = rnn_type
self.input_dim = input_dim
self.hidden_dim = hidden_dim
if self.rnn_type == 'lstm':
self.rnn = nn.LSTM(input_dim, hidden_dim)
else:
self.rnn = None
self.hidden = None
self.proj = nn.Conv2d(in_channels=hidden_dim, out_channels=1,
kernel_size=1, stride=1)
self.prob = nn.Sigmoid()
def hotter(self, t):
self.proj.weight.data /= t
self.proj.bias.data /= t
def init_hidden(self, batch_size):
# Before we've done anything, we dont have any hidden state.
# Refer to the Pytorch documentation to see exactly
# why they have this dimensionality.
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
return (autograd.Variable(torch.zeros(1, batch_size,
self.hidden_dim).cuda()),
autograd.Variable(torch.zeros(1, batch_size,
self.hidden_dim).cuda()))
def repackage_hidden(self):
self.hidden = repackage_hidden(self.hidden)
def forward(self, x):
batch_size = x.size(0)
self.rnn.flatten_parameters()
out, self.hidden = self.rnn(x.view(1, batch_size, -1), self.hidden)
out = out.squeeze()
out = out.view(out.size(0), out.size(1), 1, 1)
proj = self.proj(out).squeeze()
prob = self.prob(proj)
bi_prob = torch.stack([1-prob, prob]).t()
# do action selection in the forward pass
if self.training:
# action = bi_prob.multinomial()
dist = Categorical(bi_prob)
action = dist.sample()
else:
dist = None
action = (prob > 0.5).float()
action_reshape = action.view(action.size(0), 1, 1, 1).float()
return action_reshape, prob, action, dist
# ================================
# Recurrent Gate Model with RL
# ================================
class RecurrentGatedRLResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, embed_dim=10,
hidden_dim=10, **kwargs):
self.inplanes = 64
super(RecurrentGatedRLResNet, self).__init__()
self.num_layers = layers
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2,
padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
# going to have 4 groups of layers. For the easiness of skipping,
# We are going to break the sequential of layers into a list of layers.
self._make_group(block, 64, layers[0], group_id=1, pool_size=56)
self._make_group(block, 128, layers[1], group_id=2, pool_size=28)
self._make_group(block, 256, layers[2], group_id=3, pool_size=14)
self._make_group(block, 512, layers[3], group_id=4, pool_size=7)
self.control = RNNGatePolicy(embed_dim, hidden_dim, rnn_type='lstm')
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.softmax = nn.Softmax()
# save everything
self.saved_actions = {}
self.saved_dists = {}
self.saved_outputs = {}
self.saved_targets = {}
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(0) * m.weight.size(1)
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
def _make_group(self, block, planes, layers, group_id=1, pool_size=56):
""" Create the whole group"""
for i in range(layers):
if group_id > 1 and i == 0:
stride = 2
else:
stride = 1
meta = self._make_layer_v2(block, planes, stride=stride,
pool_size=pool_size)
setattr(self, 'group{}_ds{}'.format(group_id, i), meta[0])
setattr(self, 'group{}_layer{}'.format(group_id, i), meta[1])
setattr(self, 'group{}_gate{}'.format(group_id, i), meta[2])
def _make_layer_v2(self, block, planes, stride=1, pool_size=56):
""" create one block and optional a gate module """
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layer = block(self.inplanes, planes, stride, downsample)
self.inplanes = planes * block.expansion
gate_layer = nn.Sequential(
nn.AvgPool2d(pool_size),
nn.Conv2d(in_channels=planes * block.expansion,
out_channels=self.embed_dim,
kernel_size=1,
stride=1))
return downsample, layer, gate_layer
def forward(self, x, target_var, reinforce=False):
batch_size = x.size(0)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
# reinitialize hidden units
self.control.hidden = self.control.init_hidden(batch_size)
masks = []
gprobs = []
actions = []
dists = []
# must pass through the first layer in first group
x = getattr(self, 'group1_layer0')(x)
# gate takes the output of the current layer
gate_feature = getattr(self, 'group1_gate0')(x)
mask, gprob, action, dist = self.control(gate_feature)
gprobs.append(gprob)
masks.append(mask.squeeze())
prev = x # input of next layer
current_device = torch.cuda.current_device()
actions.append(action)
dists.append(dist)
for g in range(4):
for i in range(0 + int(g == 0), self.num_layers[g]):
if getattr(self, 'group{}_ds{}'.format(g+1, i)) is not None:
prev = getattr(self, 'group{}_ds{}'.format(g+1, i))(prev)
x = getattr(self, 'group{}_layer{}'.format(g+1, i))(x)
prev = x = mask.expand_as(x)*x + (1-mask).expand_as(prev)*prev
if not (g == 3 and (i == self.num_layers[g] - 1)):
gate_feature = getattr(self,
'group{}_gate{}'.format(g+1, i))(x)
mask, gprob, action, dist = self.control(gate_feature)
gprobs.append(gprob)
masks.append(mask.squeeze())
actions.append(action)
dists.append(dist)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
if reinforce:
softmax = self.softmax(x)
# action = softmax.multinomial()
dist = Categorical(softmax)
action = dist.sample()
actions.append(action)
dists.append(dist)
with global_lock:
self.saved_actions[current_device] = actions
self.saved_outputs[current_device] = x
self.saved_targets[current_device] = target_var
self.saved_dists[current_device] = dists
return x, masks, gprobs, self.control.hidden
def imagenet_rnn_gate_rl_18(pretrained=False, **kwargs):
""" Construct SkipNet-18 + HRL.
has the same architecture as SkipNet-18+SP """
model = RecurrentGatedRLResNet(BasicBlock, [2, 2, 2, 2], embed_dim=10,
hidden_dim=10, gate_type='rnn')
return model
def imagenet_rnn_gate_rl_34(pretrained=False, **kwargs):
""" Construct SkipNet-34 + HRL.
has the same architecture as SkipNet-34+SP """
model = RecurrentGatedRLResNet(BasicBlock, [3, 4, 6, 3], embed_dim=10,
hidden_dim=10, gate_type='rnn')
return model
def imagenet_rnn_gate_rl_50(pretrained=False, **kwargs):
""" Construct SkipNet-50 + HRL.
has the same architecture as SkipNet-50+SP """
model = RecurrentGatedRLResNet(Bottleneck, [3, 4, 6, 3], embed_dim=10,
hidden_dim=10, gate_type='rnn')
return model
def imagenet_rnn_gate_rl_101(pretrained=False, **kwargs):
""" Construct SkipNet-101 + HRL.
has the same architecture as SkipNet-101+SP """
model = RecurrentGatedRLResNet(Bottleneck, [3, 4, 23, 3], embed_dim=10,
hidden_dim=10, gate_type='rnn')
return model
def imagenet_rnn_gate_rl_152(pretrained=False, **kwargs):
""" Construct SkipNet-152 + HRL.
has the same architecture as SkipNet-152+SP """
model = RecurrentGatedRLResNet(Bottleneck, [3, 8, 36, 3], embed_dim=10,
hidden_dim=10, gate_type='rnn')
return model
| StarcoderdataPython |
1822526 | # ----------------------------------------------------------------------
# ctokens.py
#
# Token specifications for symbols in ANSI C and C++. This file is
# meant to be used as a library in other tokenizers.
# ----------------------------------------------------------------------
# Reserved words
tokens = [
# Literals (identifier, integer constant, float constant, string constant,
# char const)
'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL', 'RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
# Increment/decrement (++,--)
'INCREMENT', 'DECREMENT',
# Structure dereference (->)
'ARROW',
# Ternary operator (?)
'TERNARY',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'SEMI', 'COLON',
# Ellipsis (...)
'ELLIPSIS',
]
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MODULO = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'\^='
# Increment/decrement
t_INCREMENT = r'\+\+'
t_DECREMENT = r'--'
# ->
t_ARROW = r'->'
# ?
t_TERNARY = r'\?'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
# Identifiers
t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
# Integer literal
t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_STRING = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
# Comment (C-Style)
def t_COMMENT(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
return t
# Comment (C++-Style)
def t_CPPCOMMENT(t):
r'//.*\n'
t.lexer.lineno += 1
return t
| StarcoderdataPython |
1657031 | # -*- coding: utf-8 -*-
# standard system of measurement in United States, also known as
# ``british`` or ``imperial`` system
IMPERIAL = 'imperial'
# metric system of measurement..
METRIC = 'metric'
| StarcoderdataPython |
1798963 | <filename>src/tutorial/employee.py
"""
Automatically generated by Zserio Python extension version 2.4.0.
Generator setup: writerCode, pubsubCode, serviceCode, sqlCode.
"""
from __future__ import annotations
import typing
import zserio
import tutorial.experience
import tutorial.role
class Employee:
def __init__(
self,
age_: int = int(),
name_: str = str(),
salary_: int = int(),
bonus_: typing.Optional[int] = None,
role_: typing.Union[tutorial.role.Role, None] = None,
skills_: typing.Optional[typing.List[tutorial.experience.Experience]] = None) -> None:
self._age_ = age_
self._name_ = name_
self._salary_ = salary_
self._bonus_ = bonus_
self._role_ = role_
if skills_ is None:
self._skills_ = None
else:
self._skills_ = zserio.array.Array(zserio.array.ObjectArrayTraits(self._element_creator_skills, self._packed_element_creator_skills, tutorial.experience.Experience.create_packing_context), skills_, is_auto=True)
@classmethod
def from_reader(
cls: typing.Type['Employee'],
zserio_reader: zserio.BitStreamReader) -> 'Employee':
instance = cls()
instance.read(zserio_reader)
return instance
@classmethod
def from_reader_packed(
cls: typing.Type['Employee'],
zserio_context_node: zserio.array.PackingContextNode,
zserio_reader: zserio.BitStreamReader) -> 'Employee':
instance = cls()
instance.read_packed(zserio_context_node, zserio_reader)
return instance
def __eq__(self, other: object) -> bool:
if isinstance(other, Employee):
return (self._age_ == other._age_ and
self._name_ == other._name_ and
self._salary_ == other._salary_ and
(not self.is_bonus_used() or self._bonus_ == other._bonus_) and
self._role_ == other._role_ and
(not self.is_skills_used() or self._skills_ == other._skills_))
return False
def __hash__(self) -> int:
result = zserio.hashcode.HASH_SEED
result = zserio.hashcode.calc_hashcode(result, hash(self._age_))
result = zserio.hashcode.calc_hashcode(result, hash(self._name_))
result = zserio.hashcode.calc_hashcode(result, hash(self._salary_))
if self.is_bonus_used():
result = zserio.hashcode.calc_hashcode(result, hash(self._bonus_))
result = zserio.hashcode.calc_hashcode(result, hash(self._role_))
if self.is_skills_used():
result = zserio.hashcode.calc_hashcode(result, hash(self._skills_))
return result
@property
def age(self) -> int:
return self._age_
@age.setter
def age(self, age_: int) -> None:
self._age_ = age_
@property
def name(self) -> str:
return self._name_
@name.setter
def name(self, name_: str) -> None:
self._name_ = name_
@property
def salary(self) -> int:
return self._salary_
@salary.setter
def salary(self, salary_: int) -> None:
self._salary_ = salary_
@property
def bonus(self) -> typing.Optional[int]:
return self._bonus_
@bonus.setter
def bonus(self, bonus_: typing.Optional[int]) -> None:
self._bonus_ = bonus_
def is_bonus_used(self) -> bool:
return not self._bonus_ is None
@property
def role(self) -> typing.Union[tutorial.role.Role, None]:
return self._role_
@role.setter
def role(self, role_: typing.Union[tutorial.role.Role, None]) -> None:
self._role_ = role_
@property
def skills(self) -> typing.Optional[typing.List[tutorial.experience.Experience]]:
return None if self._skills_ is None else self._skills_.raw_array
@skills.setter
def skills(self, skills_: typing.Optional[typing.List[tutorial.experience.Experience]]) -> None:
if skills_ is None:
self._skills_ = None
else:
self._skills_ = zserio.array.Array(zserio.array.ObjectArrayTraits(self._element_creator_skills, self._packed_element_creator_skills, tutorial.experience.Experience.create_packing_context), skills_, is_auto=True)
def is_skills_used(self) -> bool:
return self.role == tutorial.role.Role.DEVELOPER
@staticmethod
def create_packing_context(context_node: zserio.array.PackingContextNode) -> None:
context_node.create_child().create_context()
context_node.create_child()
context_node.create_child().create_context()
context_node.create_child().create_context()
tutorial.role.Role.create_packing_context(context_node.create_child())
context_node.create_child()
def init_packing_context(self, context_node: zserio.array.PackingContextNode) -> None:
zserio_ctx_node_age = context_node.children[0]
zserio_ctx_node_age.context.init(self._age_)
zserio_ctx_node_salary = context_node.children[2]
zserio_ctx_node_salary.context.init(self._salary_)
zserio_ctx_node_bonus = context_node.children[3]
if self.is_bonus_used():
zserio_ctx_node_bonus.context.init(self._bonus_)
zserio_ctx_node_role = context_node.children[4]
self._role_.init_packing_context(zserio_ctx_node_role)
def bitsizeof(self, bitposition: int = 0) -> int:
end_bitposition = bitposition
end_bitposition += 8
end_bitposition += zserio.bitsizeof.bitsizeof_string(self._name_)
end_bitposition += 16
end_bitposition += 1
if self.is_bonus_used():
end_bitposition += 16
end_bitposition += self._role_.bitsizeof(end_bitposition)
if self.is_skills_used():
end_bitposition += self._skills_.bitsizeof(end_bitposition)
return end_bitposition - bitposition
def bitsizeof_packed(self, context_node: zserio.array.PackingContextNode,
bitposition: int = 0) -> int:
end_bitposition = bitposition
zserio_ctx_node_age = context_node.children[0]
end_bitposition += zserio_ctx_node_age.context.bitsizeof(zserio.array.BitFieldArrayTraits(8), end_bitposition, self._age_)
end_bitposition += zserio.bitsizeof.bitsizeof_string(self._name_)
zserio_ctx_node_salary = context_node.children[2]
end_bitposition += zserio_ctx_node_salary.context.bitsizeof(zserio.array.BitFieldArrayTraits(16), end_bitposition, self._salary_)
zserio_ctx_node_bonus = context_node.children[3]
end_bitposition += 1
if self.is_bonus_used():
end_bitposition += zserio_ctx_node_bonus.context.bitsizeof(zserio.array.BitFieldArrayTraits(16), end_bitposition, self._bonus_)
zserio_ctx_node_role = context_node.children[4]
end_bitposition += self._role_.bitsizeof_packed(zserio_ctx_node_role, end_bitposition)
if self.is_skills_used():
end_bitposition += self._skills_.bitsizeof_packed(end_bitposition)
return end_bitposition - bitposition
def initialize_offsets(self, bitposition: int) -> int:
end_bitposition = bitposition
end_bitposition += 8
end_bitposition += zserio.bitsizeof.bitsizeof_string(self._name_)
end_bitposition += 16
end_bitposition += 1
if self.is_bonus_used():
end_bitposition += 16
end_bitposition = self._role_.initialize_offsets(end_bitposition)
if self.is_skills_used():
end_bitposition = self._skills_.initialize_offsets(end_bitposition)
return end_bitposition
def initialize_offsets_packed(self, context_node: zserio.array.PackingContextNode,
bitposition: int) -> int:
end_bitposition = bitposition
zserio_ctx_node_age = context_node.children[0]
end_bitposition += zserio_ctx_node_age.context.bitsizeof(zserio.array.BitFieldArrayTraits(8), end_bitposition, self._age_)
end_bitposition += zserio.bitsizeof.bitsizeof_string(self._name_)
zserio_ctx_node_salary = context_node.children[2]
end_bitposition += zserio_ctx_node_salary.context.bitsizeof(zserio.array.BitFieldArrayTraits(16), end_bitposition, self._salary_)
zserio_ctx_node_bonus = context_node.children[3]
end_bitposition += 1
if self.is_bonus_used():
end_bitposition += zserio_ctx_node_bonus.context.bitsizeof(zserio.array.BitFieldArrayTraits(16), end_bitposition, self._bonus_)
zserio_ctx_node_role = context_node.children[4]
end_bitposition = self._role_.initialize_offsets_packed(zserio_ctx_node_role, end_bitposition)
if self.is_skills_used():
end_bitposition = self._skills_.initialize_offsets_packed(end_bitposition)
return end_bitposition
def read(self, zserio_reader: zserio.BitStreamReader) -> None:
self._age_ = zserio_reader.read_bits(8)
# check constraint
if not (self.age <= 65):
raise zserio.PythonRuntimeException("Constraint violated for field Employee.age!")
self._name_ = zserio_reader.read_string()
self._salary_ = zserio_reader.read_bits(16)
if zserio_reader.read_bool():
self._bonus_ = zserio_reader.read_bits(16)
self._role_ = tutorial.role.Role.from_reader(zserio_reader)
if self.is_skills_used():
self._skills_ = zserio.array.Array.from_reader(zserio.array.ObjectArrayTraits(self._element_creator_skills, self._packed_element_creator_skills, tutorial.experience.Experience.create_packing_context), zserio_reader, is_auto=True)
def read_packed(self, zserio_context_node: zserio.array.PackingContextNode,
zserio_reader: zserio.BitStreamReader) -> None:
zserio_ctx_node_age = zserio_context_node.children[0]
self._age_ = zserio_ctx_node_age.context.read(zserio.array.BitFieldArrayTraits(8), zserio_reader)
# check constraint
if not (self.age <= 65):
raise zserio.PythonRuntimeException("Constraint violated for field Employee.age!")
self._name_ = zserio_reader.read_string()
zserio_ctx_node_salary = zserio_context_node.children[2]
self._salary_ = zserio_ctx_node_salary.context.read(zserio.array.BitFieldArrayTraits(16), zserio_reader)
zserio_ctx_node_bonus = zserio_context_node.children[3]
if zserio_reader.read_bool():
self._bonus_ = zserio_ctx_node_bonus.context.read(zserio.array.BitFieldArrayTraits(16), zserio_reader)
zserio_ctx_node_role = zserio_context_node.children[4]
self._role_ = tutorial.role.Role.from_reader_packed(zserio_ctx_node_role, zserio_reader)
if self.is_skills_used():
self._skills_ = zserio.array.Array.from_reader_packed(zserio.array.ObjectArrayTraits(self._element_creator_skills, self._packed_element_creator_skills, tutorial.experience.Experience.create_packing_context), zserio_reader, is_auto=True)
def write(self, zserio_writer: zserio.BitStreamWriter, *,
zserio_call_initialize_offsets: bool = True) -> None:
del zserio_call_initialize_offsets
# check constraint
if not (self.age <= 65):
raise zserio.PythonRuntimeException("Constraint violated for field Employee.age!")
zserio_writer.write_bits(self._age_, 8)
zserio_writer.write_string(self._name_)
zserio_writer.write_bits(self._salary_, 16)
if self.is_bonus_used():
zserio_writer.write_bool(True)
zserio_writer.write_bits(self._bonus_, 16)
else:
zserio_writer.write_bool(False)
self._role_.write(zserio_writer)
if self.is_skills_used():
self._skills_.write(zserio_writer)
def write_packed(self, zserio_context_node: zserio.array.PackingContextNode,
zserio_writer: zserio.BitStreamWriter) -> None:
zserio_ctx_node_age = zserio_context_node.children[0]
# check constraint
if not (self.age <= 65):
raise zserio.PythonRuntimeException("Constraint violated for field Employee.age!")
zserio_ctx_node_age.context.write(zserio.array.BitFieldArrayTraits(8), zserio_writer, self._age_)
zserio_writer.write_string(self._name_)
zserio_ctx_node_salary = zserio_context_node.children[2]
zserio_ctx_node_salary.context.write(zserio.array.BitFieldArrayTraits(16), zserio_writer, self._salary_)
zserio_ctx_node_bonus = zserio_context_node.children[3]
if self.is_bonus_used():
zserio_writer.write_bool(True)
zserio_ctx_node_bonus.context.write(zserio.array.BitFieldArrayTraits(16), zserio_writer, self._bonus_)
else:
zserio_writer.write_bool(False)
zserio_ctx_node_role = zserio_context_node.children[4]
self._role_.write_packed(zserio_ctx_node_role, zserio_writer)
if self.is_skills_used():
self._skills_.write_packed(zserio_writer)
def _element_creator_skills(self, zserio_reader: zserio.BitStreamReader, zserio_index: int) -> tutorial.experience.Experience:
del zserio_index
return tutorial.experience.Experience.from_reader(zserio_reader)
def _packed_element_creator_skills(
self, zserio_context_node: zserio.array.PackingContextNode,
zserio_reader: zserio.BitStreamReader, zserio_index: int) -> tutorial.experience.Experience:
del zserio_index
return tutorial.experience.Experience.from_reader_packed(zserio_context_node, zserio_reader)
| StarcoderdataPython |
3496462 | import sys
import requests
import logging
from io import StringIO, BytesIO
from japrp.app.main_window import Ui_MainWindow
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt, pyqtSlot, QTimer
from PyQt5.QtGui import QPixmap
from japrp.app_parts.qt_search import ClickableSearchResult
from japrp.parser import RadioBrowserSimple
from japrp.audio_backends.audio_backend_vlc import VlcBackend
from japrp.audio_backends.audio_backend_pyqt5 import QtMediaPlayerWrapper
from functools import partial
_BACKEND = "vlc"
_SEARCH_LIMIT = 20
_SONG_UPDATE_TIMER = 30 * 1000
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
#https://github.com/baoboa/pyqt5/blob/master/examples/multimediawidgets/player.py how to make pyqt5 media playr work with playlist
class Japrp(QMainWindow):
def __init__(self):
super(Japrp, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.search_results = []
self.searcher = RadioBrowserSimple()
if _BACKEND == "vlc":
self.player = VlcBackend()
else:
self.player = QtMediaPlayerWrapper()
self.ui.searchbar.returnPressed.connect(self.search_radio)
self.ui.play.clicked.connect(self.start_playing)
self.ui.stop.clicked.connect(self.stop_playing)
self.ui.searchedContent.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.ui.searchedContent.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.ui.searchedContent.setWidgetResizable(True)
self._station_icon_default = QPixmap("../../img/empty_icon.png")
self._station_name_default = ""
self.ui.sender_icon.setPixmap(self._station_icon_default)
self.ui.sender_name.setText(self._station_name_default)
self.ui.volumeSlider.setValue(100)
self.ui.volumeSlider.valueChanged.connect(self.set_volume)
self.player_is_active = False
self.ui.song_title.setWordWrap(True)
self.timer = QTimer(self)
self.timer.timeout.connect(self.get_song_name)
self.timer.start(_SONG_UPDATE_TIMER)
def get_song_name(self):
print("Updating Song Title")
if self.player_is_active:
title = self.player.get_meta_data_icy(self.player.get_url())
self.ui.song_title.setText(title)
self.timer.start(_SONG_UPDATE_TIMER)
@pyqtSlot()
def search_radio(self):
# To dynamically create and add to scroll area we need a container. We create the container inside the function, s.t. it is reseted between searchs
self.containerWidget = QWidget()
self.containerLayout = QVBoxLayout()
if not self.search_results:
self.search_results = []
if len(self.search_results) > 0:
self.search_results = []
temp_search = self.searcher.search_limited(name=self.ui.searchbar.text(), limit=_SEARCH_LIMIT)
res = self.searcher.process_result(temp_search)
for key, val in res.items():
widget = ClickableSearchResult(key, val)
#print(widget)
self.search_results.append(widget)
# Use partial instead of lambda functions here, because with lambda the value passed to the function will
# be set to len(list) after it is created -> see for details: https://stackoverflow.com/questions/45090982/passing-extra-arguments-through-connect
widget.play_btn.clicked.connect(partial(self.openPlayer, len(self.search_results) - 1))
self.containerLayout.addWidget(widget)
self.containerWidget.setLayout(self.containerLayout)
self.ui.searchedContent.setWidget(self.containerWidget)
self.ui.searchedContent.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)
@pyqtSlot(int)
def openPlayer(self, idx_widget):
self.player.stop()
self.player.set_media(self.search_results[idx_widget].value["url"])
self.player.play()
self.ui.song_title.setText("")
temp_icon_value = self.search_results[idx_widget].value.get("favicon")
if temp_icon_value is not None:
if len(temp_icon_value) > 0:
url_ok = False
try:
icon_decoded = requests.get(temp_icon_value, timeout=5)
url_ok = icon_decoded.ok
except requests.RequestException as e:
logger.exception("Icon Url was not ok because of exception %s" %e)
if url_ok:
qp = QPixmap()
qp.loadFromData(icon_decoded.content)
qp.scaled(2, 2, Qt.KeepAspectRatioByExpanding)
self.ui.sender_icon.setPixmap(qp)
self.ui.sender_icon.setScaledContents(True)
else:
self.ui.sender_icon.setPixmap(self._station_icon_default)
else:
self.ui.sender_icon.setPixmap(self._station_icon_default)
temp_station_name = self.search_results[idx_widget].value.get("name")
if temp_station_name is not None:
if len(temp_station_name) > 0:
self.ui.sender_name.setText(temp_station_name)
else:
self.ui.sender_name.setText(self._station_name_default)
self.player_is_active = True
#@pyqtSlot()
def start_playing(self):
# self.stream = subprocess.Popen(["python", "-m", "streamer_script.py"], stdout=sys.stdout)
try:
if self.player.get_is_playing():
self.player.pause()
logger.debug("Pause player")
else:
self.player.play()
logger.debug("Start playing")
except ValueError as ex:
logger.exception("Play was hit before a media was selected: %s" %ex)
@pyqtSlot()
def stop_playing(self):
print("Stopping")
if self.player.media is not None or self.player is not None:
self.player.stop()
logger.debug("Stop playing")
self.player_is_active = False
@pyqtSlot()
def set_volume(self):
self.player.set_volume(self.ui.volumeSlider.value())
if __name__ == "__main__":
app = QApplication(sys.argv)
w = Japrp()
w.show()
sys.exit(app.exec())
| StarcoderdataPython |
294074 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import json
import jsonschema
from datetime import datetime
def get_route():
"""
Запросить данные о маршруте.
"""
destination = input("Пункт назначения? ")
number = input("Номер поезда? ")
time = input("Время отправления?(формат чч:мм) ")
try:
datetime.strptime(time, "%H:%M")
except ValueError:
print("Неправильный формат времени", file=sys.stderr)
exit(1)
return {
'destination': destination,
'number': number,
'time': time
}
def display_routes(way):
"""
Отобразить список маршрутов.
"""
if way:
line = '+-{}-+-{}-+-{}-+'.format(
'-' * 30,
'-' * 4,
'-' * 20
)
print(line)
print(
'| {:^30} | {:^4} | {:^20} |'.format(
"Пункт назначения",
"№",
"Время"
)
)
print(line)
for route in way:
print(
'| {:<30} | {:>4} | {:<20} |'.format(
route.get('destination', ''),
route.get('number', ''),
route.get('time', '')
)
)
print(line)
else:
print("Маршруты не найдены")
def select_routes(way, period):
"""
Выбрать маршруты после заданного времени.
"""
result = []
for route in way:
time_route = route.get('time')
time_route = datetime.strptime(time_route, "%H:%M")
if period < time_route:
result.append(route)
# Возвратить список выбранных маршрутов.
return result
def save_routes(file_name, way):
"""
Сохранить все пути в файл JSON.
"""
# Открыть файл с заданным именем для записи.
with open(file_name, "w", encoding="utf-8") as f:
# Выполнить сериализацию данных в формат JSON.
# Для поддержки кирилицы установим ensure_ascii=False
json.dump(way, f, ensure_ascii=False, indent=4)
def load_routes(file_name):
"""
Загрузить все пути из файла JSON.
"""
# Открыть файл с JSON schema.
with open("schema.json", 'r', encoding="utf-8") as schem:
schema = json.load(schem)
# Открыть файл с заданным именем для чтения.
with open(file_name, "r", encoding="utf-8") as fl:
data = json.load(fl)
validator = jsonschema.Draft7Validator(schema)
try:
if not validator.validate(data):
print("Данные успешно загружены")
except jsonschema.exceptions.ValidationError:
print("Ошибка загрузки данных", file=sys.stderr)
exit(1)
return data
def main():
"""
Главная функция программы.
"""
# Список маршрутов.
routes = []
# Организовать бесконечный цикл запроса команд.
while True:
# Запросить команду из терминала.
command = input(">>> ").lower()
# Выполнить действие в соответствие с командой.
if command == 'exit':
break
elif command == 'add':
# Запросить данные о маршруте.
route = get_route()
# Добавить словарь в список.
routes.append(route)
# Отсортировать список в случае необходимости.
if len(routes) > 1:
routes.sort(key=lambda item: item.get('destination', ''))
elif command == 'list':
# Отобразить все маршруты.
display_routes(routes)
elif command == 'select':
time_select = input("Выберите время отправления(формат чч:мм): ")
try:
time_select = datetime.strptime(time_select, "%H:%M")
except ValueError:
print("Неправильный формат времени", file=sys.stderr)
exit(1)
selected = select_routes(routes, time_select)
# Отобразить выбранные маршруты.
display_routes(selected)
elif command.startswith("save "):
# Разбить команду на части для выделения имени файла.
parts = command.split(maxsplit=1)
# Получить имя файла.
file_name = parts[1]
# Сохранить данные в файл с заданным именем.
save_routes(file_name, routes)
elif command.startswith("load "):
# Разбить команду на части для выделения имени файла.
parts = command.split(maxsplit=1)
# Получить имя файла.
file_name = parts[1]
# Сохранить данные в файл с заданным именем.
routes = load_routes(file_name)
elif command == 'help':
# Вывести справку о работе с программой.
print("Список команд:\n")
print("add - добавить маршрут;")
print("list - вывести список маршрутов;")
print("select - нати маршруты по времени")
print("help - отобразить справку;")
print("exit - завершить работу с программой.")
else:
print(f"Неизвестная команда {command}", file=sys.stderr)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4861157 | import evaluate
from formulas import jaccard, ochiai, tarantula, ample, wong1, wong2, wong3, op1, op2, gp_list, gpif, gpasgn, gpcall, gpseq
import math
import sys
def compare_formula(spectra_list, f1, f2):
f1_list = list(map(lambda sp : f1(sp[0], sp[1], sp[2], sp[3]), spectra_list))
f2_list = list(map(lambda sp : f2(sp[0], sp[1], sp[2], sp[3]), spectra_list))
f1_sorted = sorted(f1_list, reverse = True)
f2_sorted = sorted(f2_list, reverse = True)
f1_rank = list(map(lambda x : f1_sorted.index(x), f1_list))
f2_rank = list(map(lambda x : f2_sorted.index(x), f2_list))
return measure_similarity(f1_rank, f2_rank)
def measure_similarity(order1, order2):
res = 0
for (i1, i2) in zip(order1, order2):
res += abs(i1 - i2)
return res
def add(a, b):
return a + b
def sub(a, b):
return a - b
def mul(a, b):
return a * b
def div(a, b):
if b == 0:
return 1
else:
return a / b
def sqrt(a):
return math.sqrt(abs(a))
def parse_formula(fun_str):
return lambda ep, ef, np, nf : eval(fun_str)
def read_formulas(file_path):
formula_list = []
with open(file_path, 'r') as file:
while True:
line = file.readline()
if '"' not in line:
continue
if not line:
break
fun_str = line.split('"')[1]
formula_list.append(parse_formula(fun_str))
return formula_list
def file_test(formula_path, data_path, write_path):
formula_path = sys.argv[1]
formula_list = read_formulas(formula_path)
data_path = sys.argv[2]
write_path = sys.argv[3]
human_list = [("jaccard", jaccard), ("ochiai", ochiai), ("tarantula", tarantula),
("ample", ample), ("wong1", wong1), ("wong2", wong2), ("wong3", wong3), ("op1", op1), ("op2", op2)]
spectra_list = evaluate.spectra_list([data_path])[0]["spectra_list"]
with open(write_path, 'w') as out:
for i in range(1, 31):
eval_formula = gp_list[i - 1]
formula_name = "gp" + str(i)
sim_list = list(map(lambda f : str(compare_formula(spectra_list, f, eval_formula)), formula_list))
out.write(formula_name + "," + ','.join(sim_list) + '\n')
for name, eval_formula in human_list:
sim_list = list(map(lambda f : str(compare_formula(spectra_list, f, eval_formula)), formula_list))
out.write(name + "," + ','.join(sim_list) + '\n')
def form_test(data_path, write_path):
formula_list = [gpif, gpasgn, gpcall, gpseq]
human_list = [("jaccard", jaccard), ("ochiai", ochiai), ("tarantula", tarantula),
("ample", ample), ("wong1", wong1), ("wong2", wong2), ("wong3", wong3), ("op1", op1), ("op2", op2)]
spectra_list = evaluate.spectra_list([data_path])[0]["spectra_list"]
with open(write_path, 'w') as out:
for i in range(1, 31):
eval_formula = gp_list[i - 1]
formula_name = "gp" + str(i)
sim_list = list(map(lambda f : str(compare_formula(spectra_list, f, eval_formula)), formula_list))
out.write(formula_name + "," + ','.join(sim_list) + '\n')
for name, eval_formula in human_list:
sim_list = list(map(lambda f : str(compare_formula(spectra_list, f, eval_formula)), formula_list))
out.write(name + "," + ','.join(sim_list) + '\n')
if __name__ == "__main__":
if len(sys.argv) == 4:
file_test(sys.argv[1], sys.argv[2], sys.argv[3])
else:
form_test(sys.argv[1], sys.argv[2])
| StarcoderdataPython |
3372849 | <gh_stars>0
import os
import sys
import cv2
_IMAGE_SIZE = 512
image_folder = '/Users/kunato/Downloads/train/'
filenames = [os.path.join(image_folder, filename)
for filename in next(os.walk(image_folder))[-1]]
for fname in filenames:
print(
"\r>> Reading file [%s] image" % fname)
try:
img = cv2.imread(fname, cv2.IMREAD_COLOR)
img = img[:, :, ::-1]
img = cv2.resize(img, (_IMAGE_SIZE, _IMAGE_SIZE))
except:
print("\r>> [ERROR] Reading file [%s] image" % fname)
| StarcoderdataPython |
3514026 | #!/usr/bin/env python3
import time
from concurrent.futures import ThreadPoolExecutor
import concur as c
executor = ThreadPoolExecutor()
def timer():
yield from c.orr([c.text(""), c.button("Start timer")])
yield
future = executor.submit(lambda: time.sleep(3))
yield from c.orr([c.text("waiting for 3s..."), c.button("Cancel"), c.Block(future)])
def app():
return c.orr([c.forever(timer) for _ in range(3)])
if __name__ == "__main__":
c.main(app(), "Timers")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.