id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1780225 | <gh_stars>1000+
import os
import sys
from examples.iql import mujoco_finetune as iql
from rlkit.core import logger
from rlkit.testing import csv_util
def test_iql():
logger.reset()
# make tests small by mutating variant
iql.variant["algo_kwargs"]["start_epoch"] = -2
iql.variant["algo_kwargs"]["num_epochs"] = 2
iql.variant["algo_kwargs"]["batch_size"] = 2
iql.variant["algo_kwargs"]["num_eval_steps_per_epoch"] = 2
iql.variant["algo_kwargs"]["num_expl_steps_per_train_loop"] = 2
iql.variant["algo_kwargs"]["num_trains_per_train_loop"] = 100
iql.variant["algo_kwargs"]["min_num_steps_before_training"] = 2
iql.variant["qf_kwargs"] = dict(hidden_sizes=[2, 2])
iql.variant["seed"] = 25580
iql.main()
reference_csv = "tests/regression/iql/halfcheetah_online_progress.csv"
output_csv = os.path.join(logger.get_snapshot_dir(), "progress.csv")
print("comparing reference %s against output %s" % (reference_csv, output_csv))
output = csv_util.get_exp(output_csv)
reference = csv_util.get_exp(reference_csv)
keys = ["epoch", "expl/num steps total", "expl/Average Returns", "trainer/Q1 Predictions Mean", ]
csv_util.check_equal(reference, output, keys)
if __name__ == "__main__":
test_iql()
| StarcoderdataPython |
105405 | <reponame>Ratgor/iLikeit-voting-platform
from django.db import models
# Create your models here.
# RTG: example from http://v1k45.com/blog/modern-django-part-3-creating-an-api-and-integrating-with-react/
# RTG: example from http://v1k45.com/blog/modern-django-part-4-adding-authentication-to-react-spa-using-drf/
from django.contrib.auth.models import User
class Note(models.Model):
text = models.TextField(blank=True)
owner = models.ForeignKey(User,
related_name="notes",
on_delete=models.CASCADE,
null=True,
blank=True)
created_at = models.DateTimeField(auto_now_add=True)
encrypted = models.BooleanField(blank=False, default=True)
def __str__(self):
return self.text
| StarcoderdataPython |
1704244 | import sys
import os
import tkinter as tk
__dir__ = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(__dir__, '../..'))
import suzu.tktool.filepathentry as filepathentry
if __name__ == '__main__':
app = tk.Tk()
fpath = filepathentry.Saveas(app)
# operation
# set
def set_action():
fpath.set('test string')
setbtn = tk.Button(app, text='set', command=set_action)
# get
def get_action():
print(fpath.get())
getbtn = tk.Button(app, text='get', command=get_action)
# clear
def clear_action():
fpath.clear()
clearbtn = tk.Button(app, text='clear', command=clear_action)
fpath.pack(side=tk.TOP, expand=True, fill=tk.X)
setbtn.pack(side=tk.LEFT)
getbtn.pack(side=tk.LEFT)
clearbtn.pack(side=tk.LEFT)
#fpath.grid(row=1, column=0, columnspan=3)
#setbtn.grid(row=2, column=0)
#getbtn.grid(row=2, column=1)
#clearbtn.grid(row=2, column=2)
app.mainloop()
| StarcoderdataPython |
3212742 | import os
from pyelliptic.openssl import OpenSSL
def randomBytes(n):
try:
return os.urandom(n)
except NotImplementedError:
return OpenSSL.rand(n)
| StarcoderdataPython |
3278151 | import os, sys, subprocess, shutil
sys.path.append(os.path.dirname(__file__) + "/../lib")
from test_helper import create_virtenv, run_test
ENV_NAME = "pytest_test_env_" + os.path.basename(sys.executable)
ENV_DIR = os.path.abspath(ENV_NAME)
SRC_DIR = os.path.abspath(os.path.join(ENV_NAME, "src"))
PYTHON_EXE = os.path.abspath(os.path.join(ENV_NAME, "bin", "python"))
pkg = ["pytest==2.8.2"]
create_virtenv(ENV_NAME, pkg)
PYTEST_DIR = os.path.abspath(os.path.join(SRC_DIR, "pytest"))
test_dir = os.path.join(ENV_DIR, "tests")
if not os.path.exists(test_dir):
os.mkdir(test_dir)
with open(os.path.join(test_dir, "test_foo.py"), 'w') as f:
f.write("""
import pytest
@pytest.mark.skipif(True, reason="for fun")
def test_skipif_true():
1/0
""")
subprocess.check_call([os.path.join(ENV_DIR, "bin", "py.test"), test_dir])
# subprocess.check_call(["gdb", "--args", PYTHON_EXE, "-m", "pytest", test_dir])
| StarcoderdataPython |
3356489 | # model
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{4}") # a vector of 4 float32s
i2 = Output("op2", "TENSOR_FLOAT32", "{4}") # a vector of 4 float32s
model = model.Operation("EXP", i1).To(i2)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[3.0, 4.0, 5.0, 6.0]}
output0 = {i2: # output 0
[20.085537, 54.59815, 148.41316, 403.4288]}
# Instantiate an example
Example((input0, output0))
| StarcoderdataPython |
95037 | alist = ['bob', 'alice', 'tom', 'jerry']
# for i in range(len(alist)):
# print(i, alist[i])
print(list(enumerate(alist)))
for data in enumerate(alist):
print(data)
for i, name in enumerate(alist):
print(i, name)
| StarcoderdataPython |
56334 | <filename>tests/integration/resources_permissions/test_webhooks_resources.py
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import reverse
from taiga.base.utils import json
from taiga.projects import choices as project_choices
from taiga.webhooks.serializers import WebhookSerializer
from taiga.webhooks.models import Webhook
from tests import factories as f
from tests.utils import helper_test_http_method, disconnect_signals, reconnect_signals
from unittest import mock
import pytest
pytestmark = pytest.mark.django_db
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
@pytest.fixture
def data():
m = type("Models", (object,), {})
m.registered_user = f.UserFactory.create()
m.project_owner = f.UserFactory.create()
m.project1 = f.ProjectFactory(is_private=True,
anon_permissions=[],
public_permissions=[],
owner=m.project_owner)
m.project2 = f.ProjectFactory(is_private=True,
anon_permissions=[],
public_permissions=[],
owner=m.project_owner)
m.blocked_project = f.ProjectFactory(is_private=True,
anon_permissions=[],
public_permissions=[],
owner=m.project_owner,
blocked_code=project_choices.BLOCKED_BY_STAFF)
f.MembershipFactory(project=m.project1,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.blocked_project,
user=m.project_owner,
is_admin=True)
m.webhook1 = f.WebhookFactory(project=m.project1)
m.webhooklog1 = f.WebhookLogFactory(webhook=m.webhook1)
m.webhook2 = f.WebhookFactory(project=m.project2)
m.webhooklog2 = f.WebhookLogFactory(webhook=m.webhook2)
m.blocked_webhook = f.WebhookFactory(project=m.blocked_project)
m.blocked_webhooklog = f.WebhookLogFactory(webhook=m.blocked_webhook)
return m
def test_webhook_retrieve(client, data):
url1 = reverse('webhooks-detail', kwargs={"pk": data.webhook1.pk})
url2 = reverse('webhooks-detail', kwargs={"pk": data.webhook2.pk})
blocked_url = reverse('webhooks-detail', kwargs={"pk": data.blocked_webhook.pk})
users = [
None,
data.registered_user,
data.project_owner
]
results = helper_test_http_method(client, 'get', url1, None, users)
assert results == [401, 403, 200]
results = helper_test_http_method(client, 'get', url2, None, users)
assert results == [401, 403, 403]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 200]
def test_webhook_update(client, data):
url1 = reverse('webhooks-detail', kwargs={"pk": data.webhook1.pk})
url2 = reverse('webhooks-detail', kwargs={"pk": data.webhook2.pk})
blocked_url = reverse('webhooks-detail', kwargs={"pk": data.blocked_webhook.pk})
users = [
None,
data.registered_user,
data.project_owner
]
webhook_data = WebhookSerializer(data.webhook1).data
webhook_data["key"] = "test"
webhook_data = json.dumps(webhook_data)
results = helper_test_http_method(client, 'put', url1, webhook_data, users)
assert results == [401, 403, 200]
webhook_data = WebhookSerializer(data.webhook2).data
webhook_data["key"] = "test"
webhook_data = json.dumps(webhook_data)
results = helper_test_http_method(client, 'put', url2, webhook_data, users)
assert results == [401, 403, 403]
webhook_data = WebhookSerializer(data.blocked_webhook).data
webhook_data["key"] = "test"
webhook_data = json.dumps(webhook_data)
results = helper_test_http_method(client, 'put', blocked_url, webhook_data, users)
assert results == [401, 403, 451]
def test_webhook_delete(client, data):
url1 = reverse('webhooks-detail', kwargs={"pk": data.webhook1.pk})
url2 = reverse('webhooks-detail', kwargs={"pk": data.webhook2.pk})
blocked_url = reverse('webhooks-detail', kwargs={"pk": data.blocked_webhook.pk})
users = [
None,
data.registered_user,
data.project_owner
]
results = helper_test_http_method(client, 'delete', url1, None, users)
assert results == [401, 403, 204]
results = helper_test_http_method(client, 'delete', url2, None, users)
assert results == [401, 403, 403]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 451]
def test_webhook_list(client, data):
url = reverse('webhooks-list')
response = client.get(url)
webhooks_data = json.loads(response.content.decode('utf-8'))
assert len(webhooks_data) == 0
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
webhooks_data = json.loads(response.content.decode('utf-8'))
assert len(webhooks_data) == 0
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
webhooks_data = json.loads(response.content.decode('utf-8'))
assert len(webhooks_data) == 2
assert response.status_code == 200
def test_webhook_create(client, data):
url = reverse('webhooks-list')
users = [
None,
data.registered_user,
data.project_owner
]
create_data = json.dumps({
"name": "Test",
"url": "http://test.com",
"key": "test",
"project": data.project1.pk,
})
results = helper_test_http_method(client, 'post', url, create_data, users, lambda: Webhook.objects.all().delete())
assert results == [401, 403, 201]
create_data = json.dumps({
"name": "Test",
"url": "http://test.com",
"key": "test",
"project": data.project2.pk,
})
results = helper_test_http_method(client, 'post', url, create_data, users, lambda: Webhook.objects.all().delete())
assert results == [401, 403, 403]
create_data = json.dumps({
"name": "Test",
"url": "http://test.com",
"key": "test",
"project": data.blocked_project.pk,
})
results = helper_test_http_method(client, 'post', url, create_data, users, lambda: Webhook.objects.all().delete())
assert results == [401, 403, 451]
def test_webhook_patch(client, data):
url1 = reverse('webhooks-detail', kwargs={"pk": data.webhook1.pk})
url2 = reverse('webhooks-detail', kwargs={"pk": data.webhook2.pk})
blocked_url = reverse('webhooks-detail', kwargs={"pk": data.blocked_webhook.pk})
users = [
None,
data.registered_user,
data.project_owner
]
patch_data = json.dumps({"key": "test"})
results = helper_test_http_method(client, 'patch', url1, patch_data, users)
assert results == [401, 403, 200]
patch_data = json.dumps({"key": "test"})
results = helper_test_http_method(client, 'patch', url2, patch_data, users)
assert results == [401, 403, 403]
patch_data = json.dumps({"key": "test"})
results = helper_test_http_method(client, 'patch', blocked_url, patch_data, users)
assert results == [401, 403, 451]
def test_webhook_action_test(client, data):
url1 = reverse('webhooks-test', kwargs={"pk": data.webhook1.pk})
url2 = reverse('webhooks-test', kwargs={"pk": data.webhook2.pk})
blocked_url = reverse('webhooks-test', kwargs={"pk": data.blocked_webhook.pk})
users = [
None,
data.registered_user,
data.project_owner
]
with mock.patch('taiga.webhooks.tasks._send_request') as _send_request_mock:
_send_request_mock.return_value = data.webhooklog1
results = helper_test_http_method(client, 'post', url1, None, users)
assert results == [404, 404, 200]
assert _send_request_mock.called is True
with mock.patch('taiga.webhooks.tasks._send_request') as _send_request_mock:
_send_request_mock.return_value = data.webhooklog1
results = helper_test_http_method(client, 'post', url2, None, users)
assert results == [404, 404, 404]
assert _send_request_mock.called is False
with mock.patch('taiga.webhooks.tasks._send_request') as _send_request_mock:
_send_request_mock.return_value = data.webhooklog1
results = helper_test_http_method(client, 'post', blocked_url, None, users)
assert results == [404, 404, 451]
assert _send_request_mock.called is False
def test_webhooklogs_list(client, data):
url = reverse('webhooklogs-list')
response = client.get(url)
webhooklogs_data = json.loads(response.content.decode('utf-8'))
assert len(webhooklogs_data) == 0
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
webhooklogs_data = json.loads(response.content.decode('utf-8'))
assert len(webhooklogs_data) == 0
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
webhooklogs_data = json.loads(response.content.decode('utf-8'))
assert len(webhooklogs_data) == 2
assert response.status_code == 200
def test_webhooklogs_retrieve(client, data):
url1 = reverse('webhooklogs-detail', kwargs={"pk": data.webhooklog1.pk})
url2 = reverse('webhooklogs-detail', kwargs={"pk": data.webhooklog2.pk})
blocked_url = reverse('webhooks-detail', kwargs={"pk": data.blocked_webhook.pk})
users = [
None,
data.registered_user,
data.project_owner
]
results = helper_test_http_method(client, 'get', url1, None, users)
assert results == [401, 403, 200]
results = helper_test_http_method(client, 'get', url2, None, users)
assert results == [401, 403, 403]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 200]
def test_webhooklogs_create(client, data):
url1 = reverse('webhooklogs-list')
url2 = reverse('webhooklogs-list')
blocked_url = reverse('webhooklogs-list')
users = [
None,
data.registered_user,
data.project_owner
]
results = helper_test_http_method(client, 'post', url1, None, users)
assert results == [405, 405, 405]
results = helper_test_http_method(client, 'post', url2, None, users)
assert results == [405, 405, 405]
results = helper_test_http_method(client, 'post', blocked_url, None, users)
assert results == [405, 405, 405]
def test_webhooklogs_delete(client, data):
url1 = reverse('webhooklogs-detail', kwargs={"pk": data.webhooklog1.pk})
url2 = reverse('webhooklogs-detail', kwargs={"pk": data.webhooklog2.pk})
blocked_url = reverse('webhooklogs-detail', kwargs={"pk": data.blocked_webhooklog.pk})
users = [
None,
data.registered_user,
data.project_owner
]
results = helper_test_http_method(client, 'delete', url1, None, users)
assert results == [405, 405, 405]
results = helper_test_http_method(client, 'delete', url2, None, users)
assert results == [405, 405, 405]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [405, 405, 405]
def test_webhooklogs_update(client, data):
url1 = reverse('webhooklogs-detail', kwargs={"pk": data.webhooklog1.pk})
url2 = reverse('webhooklogs-detail', kwargs={"pk": data.webhooklog2.pk})
blocked_url = reverse('webhooklogs-detail', kwargs={"pk": data.blocked_webhooklog.pk})
users = [
None,
data.registered_user,
data.project_owner
]
results = helper_test_http_method(client, 'put', url1, None, users)
assert results == [405, 405, 405]
results = helper_test_http_method(client, 'put', url2, None, users)
assert results == [405, 405, 405]
results = helper_test_http_method(client, 'put', blocked_url, None, users)
assert results == [405, 405, 405]
results = helper_test_http_method(client, 'patch', url1, None, users)
assert results == [405, 405, 405]
results = helper_test_http_method(client, 'patch', url2, None, users)
assert results == [405, 405, 405]
results = helper_test_http_method(client, 'patch', blocked_url, None, users)
assert results == [405, 405, 405]
def test_webhooklogs_action_resend(client, data):
url1 = reverse('webhooklogs-resend', kwargs={"pk": data.webhooklog1.pk})
url2 = reverse('webhooklogs-resend', kwargs={"pk": data.webhooklog2.pk})
blocked_url = reverse('webhooklogs-resend', kwargs={"pk": data.blocked_webhooklog.pk})
users = [
None,
data.registered_user,
data.project_owner
]
results = helper_test_http_method(client, 'post', url1, None, users)
assert results == [404, 404, 200]
results = helper_test_http_method(client, 'post', url2, None, users)
assert results == [404, 404, 404]
results = helper_test_http_method(client, 'post', blocked_url, None, users)
assert results == [404, 404, 451]
| StarcoderdataPython |
1746845 | <reponame>SciGaP/DEPRECATED-Cipres-Airavata-POC<gh_stars>0
import os
import string
import math
import re
import subprocess
def getProperties(filename):
propFile= file( filename, "rU" )
propDict= dict()
for propLine in propFile:
propDef= propLine.strip()
if len(propDef) == 0:
continue
if propDef[0] in ( '!', '#' ):
continue
punctuation= [ propDef.find(c) for c in ':= ' ] + [ len(propDef) ]
found= min( [ pos for pos in punctuation if pos != -1 ] )
name= propDef[:found].rstrip()
value= propDef[found:].lstrip(":= ").rstrip()
propDict[name]= value
propFile.close()
# print propDict
return propDict
# There's only one queue and it's limited to 2 days (i.e. 2880 minutes).
short_queue = "workq"
queues = ( ("workq", 2880.0), )
cores_per_node = 8
max_nodes = 256
max_cores = 2048
default_cores = cores_per_node
account = "TG-DEB090011"
scheduler_file = "scheduler.conf"
email = "<EMAIL>"
jobname = ""
runfile = "./batch_command.run"
statusfile = "./batch_command.status"
cmdfile = "./batch_command.cmdline"
jobdir = os.getcwd()
jobname = os.environ.get("WB_JOBID", "cipres")
def schedulerInfo(properties):
""" properties is a dictionary containing keys:
jobtype, mpi_processes, threads_per_process, nodes, runhours.
Based on properties and hardcoded info about the resource this returns a dictionary
containing:
is_mpi, queue, runtime, mpi_processes, nodes, ppn"""
# get runhours from properties and convert it to minutes, default to zero if not specified.
try:
runtime = properties.get("runhours", 0.0)
runtime = math.ceil(float(runtime) * 60 )
except:
runtime = 0.0
qname = 0
qlimit = 1
# if runtime is 0 (which isn't really valid), change it to limit for the shortest queue
# so we have something reasonable to work with.
if runtime == 0.0:
runtime = queues[qname][qlimit]
# based on runtime, figure out which queue we should be using.
queue = None
for entry in queues:
if runtime <= entry[qlimit]:
queue = entry[qname]
break
if queue == None:
queue = queues[-1][qname]
runtime = queues[-1][qlimit]
retval = {"runtime":runtime, "queue":queue}
if properties.get("jobtype", "") == "mpi":
retval["is_mpi"] = True
else:
retval["is_mpi"] = False
if (retval["is_mpi"] == 1):
# Some of our pise xml interfaces just specify the number of mpi processes they want.
# We round it down to a multiple of the number of cores per node and request enough nodes
# so that each mpi process has its own core.
if (properties.get("nodes", "") == "") and (properties.get("thread_per_process", "") == ""):
processes = int(properties.get("mpi_processes", 1))
processes = int(processes / cores_per_node) * cores_per_node
processes = min(max(processes, default_cores), max_cores)
retval["nodes"] = processes / cores_per_node
retval["mpi_processes"] = processes
retval["ppn"] = int(retval["mpi_processes"]) / int(retval["nodes"]);
# Pise interfaces that have more knowledge of the specific machine explicitly specify
# the number of nodes as well as the number of mpi processes; we don't 2nd guess them.
else:
retval["nodes"] = int(properties.get("nodes", 1));
retval["mpi_processes"] = int(properties.get("mpi_processes", 1));
retval["ppn"] = int(retval["mpi_processes"]) / int(retval["nodes"]);
return retval
def log(filename, message):
f = open(filename, "a")
f.write(message)
f.close()
def jobInQueue():
cmd = "qstat"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outerr = p.communicate()
output = outerr[0]
err = outerr[1]
if (p.returncode != 0):
raise SystemError("Error running qstat, return code is %d. stderr is %s" % (p.returncode, err))
if (len(err) != 0):
raise SystemError("Error running qstat, stderr is %s" % (err))
if (len(output) < 5):
raise SystemError("Error running qstat, output looks wrong: %s" % (output))
cmd = 'echo "%s" | grep `whoami`' % output
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outerr = p.communicate()
output = outerr[0]
err = outerr[1]
if (len(err) != 0):
raise SystemError("Error piping qstat thru grep: %s" % (err))
output_rows = output.split("\n")
jobs = []
for row in output_rows:
r = row.split()
if len(r) > 4 and r[4] != "C":
r[0] = r[0].split(".", 1)[0]
jobs.append(r[0])
return jobs
def submitJob():
cmd = "qsub %s 2>> %s" % (runfile, statusfile)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output = p.communicate()[0]
retval = p.returncode
if retval != 0:
# read whatever qsub wrote to the statusfile and print it to stdout
print "Error submitting job:\n"
f = open(statusfile, "r"); print f.read(), "\n\n"; f.close()
print output
# When we return 2 it means there are too many jobs queued. Not sure
# if -226 is actually the correct return code on queenbee.
if retval == -226:
retval = 2
log(statusfile, "submit_job is returning %d\n" % retval)
return retval
log(statusfile, "qsub output is: " + output + "\n" +
"======================================================================" + "\n")
# output from qsub should look like:
# 3053477.qb2
p = re.compile(r"^(\d+).qb2\s*$", re.M)
m = p.search(output)
if m != None:
jobid = m.group(0)
short_jobid = m.group(1)
print "jobid=%d" % int(short_jobid)
log(statusfile, "JOBID is %s\n" % jobid)
log("./_JOBINFO.TXT", "\nJOBID=%s\n" % jobid)
return 0
else:
print "Error, qsub says: %s" % output
log(statusfile, "can't get jobid, submit_job is returning 1\n")
return 1
| StarcoderdataPython |
1723744 | <gh_stars>1-10
# modified from Kristen's code
## assumes k=2 class set-up
## p_in = [p_in_1, p_in_2]
def create_affiliation_model_temp(average_node_degree,
lambda_block_parameter,
dispersion_parameter_vect,
class_size_vect):
# total number of nodes
N = np.sum(class_size_vect)
### BLOCK STRUCTURE
## define p_in; p_out: following equation (78) and (79) on page 29 of supplyment materials
p_in = (lambda_block_parameter * average_node_degree)/N
#print('p_in: ', p_in)
#previous parameterization
denominator = []
for j in range(len(class_size_vect)):
denominator.append(class_size_vect[j] * class_size_vect[~j])
denom = np.sum(denominator)
p_out = (average_node_degree * N - np.sum(class_size_vect**2 * p_in))/denom
#print('p_out: ', p_out)
#print('')
## Expected Degree Sequence for nodes in class 1,2,...k
## Generates in-class degree sequence and out-class sequence
in_class_list = []
out_class_list = []
for j in range(len(class_size_vect)):
#intent here is to iterate through each class
#and important -- assumes a specific data format for input dispersion_parameter_vect
(in_class, out_class) = create_expected_degree_sequence(class_size_vect[j],p_in,p_out,dispersion_parameter_vect[j][0], dispersion_parameter_vect[j][1])
in_class_list.append(in_class)
out_class_list.append(out_class)
# What is expected prob matrix??
expected_prob_matrix=np.zeros((N,N))
for i in range(len(class_size_vect)):
for j in range(len(class_size_vect)):
idx = np.sum(class_size_vect[0:i])
jdx = np.sum(class_size_vect[0:j])
if i==j:
expected_prob_matrix[idx:idx+class_size_vect[j],jdx:jdx+class_size_vect[j]] = in_class_matrix(in_class_list[j])/(class_size_vect[j]**2*p_in)
else:
out = out_class_matrix(out_class_list[i], out_class_list[j])/(class_size_vect[i]*class_size_vect[j]*p_out)
if j<i:
expected_prob_matrix[idx:idx+class_size_vect[i],jdx:jdx+class_size_vect[j]] = out
if i<j:
expected_prob_matrix[idx:idx+class_size_vect[i],jdx:jdx+class_size_vect[j]] = out
#A_ij_tmp = np.matrix(map(bernoulli.rvs,expected_prob_matrix))
f = np.vectorize(bernoulli.rvs)
A_ij_tmp = np.matrix(f(expected_prob_matrix))
Adj_corrected = np.matrix(np.triu(A_ij_tmp, k=0) + np.transpose(np.triu(A_ij_tmp, k=1)))
f2 = np.vectorize(np.tile)
Membership = np.concatenate(list(map(np.tile,np.array(range(len(class_size_vect))), class_size_vect)),axis=0)
return( Adj_corrected, Membership)
| StarcoderdataPython |
3354394 | <reponame>parada3desu/foxy-key-broker
import random
from src.contexts.kms.cryptokeys.domain.entities.CryptoKeyId import CryptoKeyId
from src.contexts.kms.cryptokeys.domain.entities.CryptoKeyPayload import CryptoKeyPayload
from src.contexts.kms.cryptokeys.domain.repositories.CryptoKeyRepository import CryptoKeyRepository
from src.contexts.shared.domain.EventBus import EventBus
class CryptoKeyRotator:
def __init__(self, cryptokey_repository: CryptoKeyRepository, event_bus: EventBus):
self._cryptokey_repository = cryptokey_repository
self._event_bus = event_bus
async def run(self, key_id: CryptoKeyId):
crypto_key = await self._cryptokey_repository.find_by_id(key_id)
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
length = 62
def uid(seed):
random.seed(seed)
return ''.join([chars[int(random.random() * length)] for _ in range(len(crypto_key.payload.value()))])
# TODO: rotation
uid(crypto_key.payload.value())
rote = uid(crypto_key.payload.value())
crypto_key.payload = CryptoKeyPayload(rote)
await self._cryptokey_repository.update_one(crypto_key)
# TODO: dispatch key rotated event
| StarcoderdataPython |
3294889 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Selects the appropriate scraper for Chrome."""
def GetScraper(version):
"""Returns the scraper module for the given version.
Args:
version: version string of Chrome, or None for most recent
Returns:
scrape module for given version
"""
if version is None:
version = "0.1.101.0"
parsed_version = [int(x) for x in version.split(".")]
if (parsed_version[0] > 0 or
parsed_version[1] > 1 or
parsed_version[2] > 97 or
parsed_version[3] > 0):
scraper_version = "chrome011010"
else:
scraper_version = "chrome01970"
return __import__(scraper_version, globals(), locals(), [''])
# if invoked rather than imported, test
if __name__ == "__main__":
print GetScraper("0.1.101.0").version
| StarcoderdataPython |
3252380 | <reponame>PaulGureghian1/Rainbow_HAT<gh_stars>1-10
#!/usr/bin/env python
import time
#import blinkt
from rainbowhat import rainbow as blinkt
blinkt.set_clear_on_exit()
step = 0
while True:
if step == 0:
blinkt.set_all(128,0,0)
if step == 1:
blinkt.set_all(0,128,0)
if step == 2:
blinkt.set_all(0,0,128)
step+=1
step%=3
blinkt.show()
time.sleep(0.5)
| StarcoderdataPython |
170068 | from bravado_core.spec import Spec
from bravado_types.config import Config
from bravado_types.data_model import (ModelInfo, OperationInfo, ParameterInfo,
PropertyInfo, ResourceInfo, ResponseInfo,
SpecInfo)
from bravado_types.extract import get_spec_info
def test_extract_minimal():
spec = Spec.from_dict({
'swagger': '2.0',
'info': {
'title': 'Minimal schema',
'version': '1.0',
},
'paths': {},
})
spec_info = get_spec_info(spec, Config(name='Test', path='/tmp/test.py'))
assert isinstance(spec_info, SpecInfo)
assert spec_info.spec is spec
assert spec_info.models == []
assert spec_info.resources == []
assert spec_info.operations == []
def test_extract_basic():
spec = Spec.from_dict({
'swagger': '2.0',
'info': {
'title': 'Simple schema',
'version': '1.0',
},
'paths': {
'/foo': {
'post': {
'operationId': 'createFoo',
'tags': ['foo'],
'parameters': [
{
'name': 'request',
'in': 'body',
'required': True,
'schema': {
'$ref': '#/definitions/Foo',
},
},
],
'responses': {
'204': {'$ref': '#/responses/successNoContent'},
}
}
},
'/foo/{id}': {
'get': {
'operationId': 'getFoo',
'tags': ['foo'],
'parameters': [
{
'name': 'id',
'in': 'path',
'type': 'integer',
'required': True,
},
{'$ref': '#/parameters/headerParam'},
],
'responses': {
'200': {
'description': 'Success',
'schema': {'$ref': '#/definitions/Foo'},
},
'404': {
'description': 'Not Found',
'schema': {},
},
},
},
},
'/bar': {
'get': {
'operationId': 'getBar',
'tags': ['bar'],
'responses': {
'200': {
'description': 'Success',
'schema': {'$ref': '#/definitions/BarList'},
},
},
},
},
},
'definitions': {
'Foo': {
'type': 'object',
'properties': {
'id': {'type': 'integer'},
'foobar': {'type': 'string'},
},
'required': ['id'],
},
'Bar': {
'type': 'object',
},
'BarList': {
'type': 'array',
'items': {'$ref': '#/definitions/Bar'},
},
},
'parameters': {
'headerParam': {
'name': 'Header-Param',
'in': 'header',
'type': 'string',
},
},
'responses': {
'successNoContent': {
'description': 'Operation successful, no response content',
},
},
})
spec_info = get_spec_info(spec, Config(name='Test', path='/tmp/test.py'))
assert spec_info.spec is spec
assert spec_info.models == [
ModelInfo(spec.definitions['Bar'], 'Bar', [], []),
ModelInfo(spec.definitions['Foo'], 'Foo', [], [
PropertyInfo('foobar', 'str', False),
PropertyInfo('id', 'int', True),
]),
]
createFoo = spec.resources['foo'].operations['createFoo']
getFoo = spec.resources['foo'].operations['getFoo']
getBar = spec.resources['bar'].operations['getBar']
assert spec_info.operations == [
OperationInfo(createFoo, 'createFoo', [
ParameterInfo(createFoo.params['request'], 'request', 'FooModel',
True),
], [
ResponseInfo('204', 'None'),
]),
OperationInfo(getBar, 'getBar', [], [
ResponseInfo('200', 'typing.List[BarModel]'),
]),
OperationInfo(getFoo, 'getFoo', [
ParameterInfo(getFoo.params['Header_Param'], 'Header_Param', 'str',
False),
ParameterInfo(getFoo.params['id'], 'id', 'int', True),
], [
ResponseInfo('200', 'FooModel'),
ResponseInfo('404', 'typing.Any'),
]),
]
assert spec_info.resources == [
ResourceInfo(
spec.resources['bar'], 'bar',
operations=[spec_info.operations[1]]),
ResourceInfo(
spec.resources['foo'], 'foo',
operations=[spec_info.operations[0], spec_info.operations[2]]),
]
| StarcoderdataPython |
85114 | # -*- coding: utf-8 -*-
from numpy import array, pi
from scipy.linalg import solve
def solve_EEC(self):
"""Compute the parameters dict for the analytical equivalent electrical circuit
cf "Influence of the Number of Pole Pairs on the Audible
Noise of Inverter-Fed Induction Motors: Radial
Force Waves and Mechanical Resonances"
<NAME>, <NAME>, <NAME>
Parameters
----------
self : EEC_ANL
an EEC_ANL object
Return
------
out_dict : dict
Dict containing all magnetic quantities that have been calculated in EEC
"""
f = self.freq0
ws = 2 * pi * f
PAR = self.parameters
out_dict = dict()
if "Ud" in PAR and "Id" in PAR:
# No need to compute
out_dict["Id"] = PAR["Id"]
out_dict["Iq"] = PAR["Iq"]
out_dict["Ud"] = PAR["Ud"]
out_dict["Uq"] = PAR["Uq"]
elif "Ud" in PAR: # Voltage driven
out_dict["Id"] = PAR["Ud"] / (1j * ws * PAR["Ld"])
out_dict["Iq"] = PAR["Uq"] / (1j * ws * PAR["Lq"])
out_dict["Ud"] = PAR["Ud"]
out_dict["Uq"] = PAR["Uq"]
else: # Current Driven
out_dict["Ud"] = PAR["Id"] * (1j * ws * PAR["Ld"])
out_dict["Uq"] = PAR["Iq"] * (1j * ws * PAR["Lq"])
out_dict["Id"] = PAR["Id"]
out_dict["Iq"] = PAR["Iq"]
return out_dict
| StarcoderdataPython |
3305275 | <filename>pydatatool/crowdhuman/crowdhuman_eval/demo.py
from .common import *
from tqdm import tqdm
from multiprocessing import Process,Queue
import numpy as np
import math
from .utils.infrastructure import compute_JC
def commom_process(func, data, nr_procs, *args):
total = len(data)
stride = math.ceil(total/nr_procs)
result_queue = Queue(1000)
results, procs = [],[]
tqdm.monitor_interval = 0
pbar = tqdm(total = total)
for i in range(nr_procs):
start = i*stride
end = np.min([start+stride,total])
sample_data = data[start:end]
p = Process(target= func,args=(result_queue, sample_data, *args))
p.start()
procs.append(p)
for i in range(total):
t = result_queue.get()
if t is None:
pbar.update(1)
continue
results.append(t)
pbar.update()
for p in procs:
p.join()
return results
def _is_ignore(rb):
flag = False
if 'extra' in rb:
if 'ignore' in rb['extra']:
if rb['extra']['ignore']:
flag = True
return flag
def worker(result_queue, records, gt, bm_thr):
total, eps = len(records), 1e-6
for i in range(total):
record = records[i]
ID = record['ID']
height, width = record['height'], record['width']
if len(record['dtboxes']) < 1:
result_queue.put_nowait(None)
continue
_gt = list(filter(lambda rb:rb['ID'] == ID, gt))
if len(_gt) < 1:
result_queue.put_nowait(None)
continue
_gt = _gt[0]
flags = np.array([_is_ignore(rb) for rb in _gt['gtboxes']])
rows = np.where(~flags)[0]
gtboxes = np.vstack([_gt['gtboxes'][j]['fbox'] for j in rows])
gtboxes = recover_func(gtboxes)
gtboxes = clip_boundary(gtboxes, height, width).astype(np.float32)
dtboxes = np.vstack([np.hstack([rb['box'], rb['score']]) for rb in record['dtboxes']])
dtboxes = recover_func(dtboxes)
dtboxes = clip_boundary(dtboxes, height, width).astype(np.float32)
matches = compute_JC(dtboxes, gtboxes, bm_thr)
k = len(matches)
m = gtboxes.shape[0]
n = dtboxes.shape[0]
ratio = k / (m + n -k + eps)
recall = k / (m + eps)
cover = k / (n + eps)
noise = 1 - cover
result_dict = dict(ID = ID, ratio = ratio, recall = recall , noise = noise ,
cover = cover, valid= k ,total = n, gtn = m)
result_queue.put_nowait(result_dict)
if __name__ == '__main__':
fpath = 'data/gt_human.odgt'
gt = load_func(fpath)
fpath = 'data/epoch-22.human'
records = load_func(fpath)
results = commom_process(worker, records, 4, gt, 0.5)
mean_ratio = np.sum([rb['ratio'] for rb in results]) / len(results)
print('mJI@0.5 is {:.4f}'.format(mean_ratio))
| StarcoderdataPython |
3361150 | #! /usr/bin/env python
import math
import rospy
import tf
import tf2_ros
from sensor_msgs.msg import LaserScan
def callback2(laserData): #function for determining laser distance at determined angle
# print len(msg.ranges)
try:
if lidar_angle == 500:
return
lidar_angle_new = int(lidar_angle)
print(lidar_angle_new)
except NameError:
return
animalDistance = laserData.ranges[lidar_angle_new]
print(animalDistance)
#lidar_angle = 500
#animalDistance = laserData.ranges[int(lidar_angle)]
#lidar_angle = None
#print(animalDistance)
# if lidar_angle:
# print(lidar_angle)
# try:
# animal_distance = laserData.ranges[lidar_angle]
# # if x_angle:
# print(animal_distance)
#
# except(IndexError):
#pub = rospy.Publisher('animalFound', Coordinates, queue_size=10)
#return
if __name__ == '__main__':
#sub1 = rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes , callback1)
sub2 = rospy.Subscriber('/scan', LaserScan , callback2)
# sub3 = rospy.Subscriber('chatter', Coordinates , callback2)
rospy.spin() #continuous loop
| StarcoderdataPython |
157076 | import rospy
from std_msgs.msg import Float64
from rospy import Subscriber
from common.architectural.Singleton import Singleton
class RAMSensor(metaclass=Singleton):
__ram_sub: Subscriber
__ram_percentage: float = 0.0
def __init__(self):
self.__ram_sub = rospy.Subscriber('/ram_usage', Float64, self.__ram_usage_clbk)
def __ram_usage_clbk(self, msg: Float64):
self.__ram_percentage = msg.data
def get_percentage(self) -> float:
return self.__ram_percentage | StarcoderdataPython |
1649319 | <filename>omop_cdm/datalab_to_prepared_source.py
import logging
import json
import os
import argparse
import csv
import hashlib
import sys
try:
from mapping_classes import InputClass
except ImportError:
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], os.path.pardir, "src")))
from mapping_classes import InputClass
from mapping_classes import OutputClassCSVRealization, InputOutputMapperDirectory, OutputClassDirectory, \
CoderMapperJSONClass, TransformMapper, FunctionMapper, FilterHasKeyValueMapper, ChainMapper, CascadeKeyMapper, \
CascadeMapper, KeyTranslator, PassThroughFunctionMapper, CodeMapperDictClass, CodeMapperDictClass, ConstantMapper, \
ReplacementMapper, MapperClass
from prepared_source_classes import SourcePersonObject, SourceCareSiteObject, SourceEncounterObject, \
SourceObservationPeriodObject, SourceEncounterCoverageObject, SourceResultObject, SourceConditionObject, \
SourceProcedureObject, SourceMedicationObject, SourceLocationObject, SourceEncounterDetailObject
from source_to_cdm_functions import generate_mapper_obj, IntFloatMapper
from utility_functions import generate_observation_period
from prepared_source_functions import build_name_lookup_csv, build_key_func_dict
logging.basicConfig(level=logging.INFO)
class PopulationDemographics(InputClass):
def fields(self):
return ["empiPersonId", "gender_code", "gender_code_oid", "gender_code_text", "birthsex_code", "birthsex_code_oid",
"birthsex_code_text", "birthdate", "dateofdeath", "zip_code", "race_code", "race_code_oid", "race_code_text",
"ethnicity_code", "ethnicity_code_oid", "ethnicity_code_text", "active"]
class PopulationEncounter(InputClass):
def fields(self):
return ["encounterid", "empiPersonId", "hospitalizationstartdate", "readmission", "dischargedate", "servicedate",
"financialclass_code", "financialclass_code_oid", "financialclass_code_text", "hospitalservice_code",
"hospitalservice_code_oid", "hospitalservice_code_text", "classfication_code", "classification_code_oid",
"classification_code_text", "type_code", "type_code_oid", "type_code_text", "dischargedisposition_code",
"dischargedisposition_code_oid", "dischargedisposition_code_text", "dischargetolocation_code",
"dischargetolocation_code_oid", "dischargetolocation_code_text", "admissionsource_code",
"admissionsource_code_oid", "admissionsource_code_text", "admissiontype_code", "admissiontype_code_oid",
"admissiontype_code_text", "status_code", "status_code_oid", "status_code_text", "estimatedarrivaldate",
"estimateddeparturedate", "actualarrivaldate", "source", "active"]
class PopulationCondition(InputClass):
def fields(self):
return ["conditionid", "empiPersonId", "encounterid", "condition_code", "condition_code_oid", "condition_code_text",
"effectiveDate", "billingrank", "presentonadmission_code", "presentonadmission_code_oid",
"presentonadmission_text", "type_primary_code", "type_primary_code_oid", "type_primary_text",
"source"]
class PopulationProcedure(InputClass):
def fields(self):
return ["procedureid", "empiPersonId", "encounterid", "procedure_code", "procedure_code_oid",
"procedure_code_display", "modifier_code", "modifier_oid", "modifier_text", "servicestartdate",
"serviceenddate", "status_code", "status_oid", "active"]
class PopulationMedication(InputClass):
def fields(self):
return ["medicationid", "encounterid", "empiPersonId", "intendeddispenser", "startdate", "stopdate", "doseunit_code",
"doseunit_code_oid", "doseunit_code_text", "category_id", "category_code_oid", "category_code_text",
"frequency_id", "frequency_code_oid", "frequency_code_text", "status_code", "status_code_oid",
"status_code_text", "route_code", "route_code_oid", "route_code_text", "drug_code", "drug_code_oid",
"drug_code_text", "dosequantity", "source"]
class PopulationResult(InputClass):
def fields(self):
return ["resultid", "encounterid", "empiPersonId", "result_code", "result_code_oid", "result_code_text",
"result_type", "servicedate", "value_text", "value_numeric", "value_numeric_modifier", "unit_code",
"unit_code_oid", "unit_code_text", "value_codified_code", "value_codified_code_oid",
"value_codified_code_text", "date", "interpretation_code", "interpretation_code_oid",
"interpretation_code_text", "specimen_type_code", "specimen_type_code_oid", "specimen_type_code_text",
"bodysite_code", "bodysite_code_oid", "bodysite_code_text", "specimen_collection_date",
"specimen_received_date", "measurementmethod_code", "measurementmethod_code_oid",
"measurementmethod_code_text", "recordertype", "issueddate", "year"]
class PopulationObservationPeriod(InputClass):
def fields(self):
return []
class PopulationCareSite(InputClass):
def fields(self):
return []
class AddressLookup(InputClass):
def fields(self):
return []
class PopulationEncounterLocation(InputClass):
def fields(self):
return []
class DuplicateExcludeMapper(MapperClass):
"""Indicates that a row is a duplicate"""
def __init__(self, id_field):
self.id_field = id_field
self.id_dict = {"i_exclude": ""}
def map(self, input_dict):
if self.id_field in input_dict:
id_value = input_dict[self.id_field]
if id_value in self.id_dict:
return {"i_exclude": 1}
else:
self.id_dict[id_value] = 1
return {"i_exclude": ""}
else:
return {}
def main(input_csv_directory, output_csv_directory, file_name_dict):
output_class_obj = OutputClassDirectory()
in_out_map_obj = InputOutputMapperDirectory()
location_lookup_csv = os.path.join(input_csv_directory, "address_lookup.csv")
address_csv = os.path.join(input_csv_directory, "population_address.csv")
md5_func = lambda x: hashlib.md5(x.encode("utf8")).hexdigest()
source_location_csv = os.path.join(output_csv_directory, "source_location.csv")
key_location_mapper = build_name_lookup_csv(address_csv, location_lookup_csv,
["street_1", "street_2", "city", "state", "zip_code"],
["street_1", "street_2", "city", "state", "zip_code"], hashing_func=md5_func)
key_address_name_mapper = FunctionMapper(
build_key_func_dict(["street_1", "street_2", "city", "state", "zip_code"], separator="|"))
# k_location,s_address_1,s_address_2,s_city,s_state,s_zip,s_county,s_location_name
location_rules = [("key_name", "k_location"),
(("street_1", "street_2", "city", "state",
"zip_code"),
key_address_name_mapper,
{"mapped_value": "s_location_name"}),
("street_1", "s_address_1"),
("street_2", "s_address_2"),
("city", "s_city"),
("state", "s_state"),
("zip_code", "s_zip")
]
location_runner_obj = generate_mapper_obj(location_lookup_csv, AddressLookup(), source_location_csv,
SourceLocationObject(), location_rules,
output_class_obj, in_out_map_obj)
location_runner_obj.run()
input_patient_file_name = os.path.join(input_csv_directory, file_name_dict["demographic"])
# Source: https://www.hl7.org/fhir/v3/Race/cs.html
hl7_race_dict = {
"1002-5": "American Indian or Alaska Native",
"1004-1": "American Indian",
"1006-6": "Abenaki",
"1008-2": "Algonquian",
"1010-8": "Apache",
"1011-6": "Chiricahua",
"1012-4": "Fort Sill Apache",
"1013-2": "Jicarilla Apache",
"1014-0": "Lipan Apache",
"1015-7": "Mescalero Apache",
"1016-5": "Oklahoma Apache",
"1017-3": "Payson Apache",
"1018-1": "San Carlos Apache",
"1019-9": "White Mountain Apache",
"1021-5": "Arapaho",
"1022-3": "Northern Arapaho",
"1023-1": "Southern Arapaho",
"1024-9": "Wind River Arapaho",
"1026-4": "Arikara",
"1028-0": "Assiniboine",
"1030-6": "Assiniboine Sioux",
"1031-4": "Fort Peck Assiniboine Sioux",
"1033-0": "Bannock",
"1035-5": "Blackfeet",
"1037-1": "Brotherton",
"1039-7": "Burt Lake Band",
"1041-3": "Caddo",
"1042-1": "Oklahoma Cado",
"1044-7": "Cahuilla",
"1045-4": "Agua Caliente Cahuilla",
"1046-2": "Augustine",
"1047-0": "Cabazon",
"1048-8": "Los Coyotes",
"1049-6": "Morongo",
"1050-4": "Santa Rosa Cahuilla",
"1051-2": "Torres-Martinez",
"1053-8": "California Tribes",
"1054-6": "Cahto",
"1055-3": "Chimariko",
"1056-1": "Coast Miwok",
"1057-9": "Digger",
"1058-7": "Kawaiisu",
"1059-5": "Kern River",
"1060-3": "Mattole",
"1061-1": "Red Wood",
"1062-9": "Santa Rosa",
"1063-7": "Takelma",
"1064-5": "Wappo",
"1065-2": "Yana",
"1066-0": "Yuki",
"1068-6": "Canadian and Latin American Indian",
"1069-4": "Canadian Indian",
"1070-2": "Central American Indian",
"1071-0": "French American Indian",
"1072-8": "Mexican American Indian",
"1073-6": "South American Indian",
"1074-4": "Spanish American Indian",
"1076-9": "Catawba",
"1741-8": "Alatna",
"1742-6": "Alexander",
"1743-4": "Allakaket",
"1744-2": "Alanvik",
"1745-9": "Anvik",
"1746-7": "Arctic",
"1747-5": "Beaver",
"1748-3": "Birch Creek",
"1749-1": "Cantwell",
"1750-9": "Chalkyitsik",
"1751-7": "Chickaloon",
"1752-5": "Chistochina",
"1753-3": "Chitina",
"1754-1": "Circle",
"1755-8": "Cook Inlet",
"1756-6": "Copper Center",
"1757-4": "Copper River",
"1758-2": "Dot Lake",
"1759-0": "Doyon",
"1760-8": "Eagle",
"1761-6": "Eklutna",
"1762-4": "Evansville",
"1763-2": "Fort Yukon",
"1764-0": "Gakona",
"1765-7": "Galena",
"1766-5": "Grayling",
"1767-3": "Gulkana",
"1768-1": "Healy Lake",
"1769-9": "Holy Cross",
"1770-7": "Hughes",
"1771-5": "Huslia",
"1772-3": "Iliamna",
"1773-1": "Kaltag",
"1774-9": "Kluti Kaah",
"1775-6": "Knik",
"1776-4": "Koyukuk",
"1777-2": "Lake Minchumina",
"1778-0": "Lime",
"1779-8": "Mcgrath",
"1780-6": "Manley Hot Springs",
"1781-4": "<NAME>",
"1782-2": "Minto",
"1783-0": "Nenana",
"1784-8": "Nikolai",
"1785-5": "Ninilchik",
"1786-3": "Nondalton",
"1787-1": "Northway",
"1788-9": "Nulato",
"1789-7": "<NAME>",
"1790-5": "Rampart",
"1791-3": "Ruby",
"1792-1": "Salamatof",
"1793-9": "Seldovia",
"1794-7": "Slana",
"1795-4": "Shageluk",
"1796-2": "Stevens",
"1797-0": "<NAME>",
"1798-8": "Takotna",
"1799-6": "Tanacross",
"1800-2": "Tanaina",
"1801-0": "Tanana",
"1802-8": "<NAME>",
"1803-6": "Tazlina",
"1804-4": "Telida",
"1805-1": "Tetlin",
"1806-9": "Tok",
"1807-7": "Tyonek",
"1808-5": "Venetie",
"1809-3": "Wiseman",
"1078-5": "Cayuse",
"1080-1": "Chehalis",
"1082-7": "Chemakuan",
"1083-5": "Hoh",
"1084-3": "Quileute",
"1086-8": "Chemehuevi",
"1088-4": "Cherokee",
"1089-2": "Cherokee Alabama",
"1090-0": "Cherokees of Northeast Alabama",
"1091-8": "Cherokees of Southeast Alabama",
"1092-6": "Eastern Cherokee",
"1093-4": "Echota Cherokee",
"1094-2": "Etowah Cherokee",
"1095-9": "Northern Cherokee",
"1096-7": "Tuscola",
"1097-5": "United Keetowah Band of Cherokee",
"1098-3": "Western Cherokee",
"1100-7": "Cherokee Shawnee",
"1102-3": "Cheyenne",
"1103-1": "Northern Cheyenne",
"1104-9": "Southern Cheyenne",
"1106-4": "Cheyenne-Arapaho",
"1108-0": "Chickahominy",
"1109-8": "Eastern Chickahominy",
"1110-6": "Western Chickahominy",
"1112-2": "Chickasaw",
"1114-8": "Chinook",
"1115-5": "Clatsop",
"1116-3": "Columbia River Chinook",
"1117-1": "Kathlamet",
"1118-9": "Upper Chinook",
"1119-7": "Wakiakum Chinook",
"1120-5": "Willapa Chinook",
"1121-3": "Wishram",
"1123-9": "Chippewa",
"1124-7": "Bad River",
"1125-4": "Bay Mills Chippewa",
"1126-2": "Bois Forte",
"1127-0": "Burt Lake Chippewa",
"1128-8": "Fond du Lac",
"1129-6": "Grand Portage",
"1130-4": "Grand Traverse Band of Ottawa-Chippewa",
"1131-2": "Keweenaw",
"1132-0": "Lac Courte Oreilles",
"1133-8": "Lac du Flambeau",
"1134-6": "Lac Vieux Desert Chippewa",
"1135-3": "Lake Superior",
"1136-1": "Leech Lake",
"1137-9": "Little Shell Chippewa",
"1138-7": "Mille Lacs",
"1139-5": "Minnesota Chippewa",
"1140-3": "Ontonagon",
"1141-1": "Red Cliff Chippewa",
"1142-9": "Red Lake Chippewa",
"1143-7": "<NAME>",
"1144-5": "St. Croix Chippewa",
"1145-2": "Sault Ste. <NAME>",
"1146-0": "<NAME>",
"1147-8": "Turtle Mountain",
"1148-6": "White Earth",
"1150-2": "Chippewa Cree",
"1151-0": "Rocky Boy's Chippewa Cree",
"1153-6": "Chitimacha",
"1155-1": "Choctaw",
"1156-9": "<NAME>",
"1157-7": "<NAME>",
"1158-5": "Mississippi Choctaw",
"1159-3": "Mowa Band of Choctaw",
"1160-1": "Oklahoma Choctaw",
"1162-7": "Chumash",
"1163-5": "Santa Ynez",
"1165-0": "Clear Lake",
"1167-6": "Coeur D'Alene",
"1169-2": "Coharie",
"1171-8": "Colorado River",
"1173-4": "Colville",
"1175-9": "Comanche",
"1176-7": "Oklahoma Comanche",
"1178-3": "Coos, Lower Umpqua, Siuslaw",
"1180-9": "Coos",
"1182-5": "Coquilles",
"1184-1": "Costanoan",
"1186-6": "Coushatta",
"1187-4": "Alabama Coushatta",
"1189-0": "Cowlitz",
"1191-6": "Cree",
"1193-2": "Creek",
"1194-0": "Alabama Creek",
"1195-7": "Alabama Quassarte",
"1196-5": "Eastern Creek",
"1197-3": "Eastern Muscogee",
"1198-1": "Kialegee",
"1199-9": "Lower Muscogee",
"1200-5": "Machis Lower Creek Indian",
"1201-3": "Poarch Band",
"1202-1": "Principal Creek Indian Nation",
"1203-9": "Star Clan of Muscogee Creeks",
"1204-7": "Thlopthlocco",
"1205-4": "Tuckabachee",
"1207-0": "Croatan",
"1209-6": "Crow",
"1211-2": "Cupeno",
"1212-0": "Agua Caliente",
"1214-6": "Delaware",
"1215-3": "Eastern Delaware",
"1216-1": "Lenni-Lenape",
"1217-9": "Munsee",
"1218-7": "Oklahoma Delaware",
"1219-5": "Rampough Mountain",
"1220-3": "Sand Hill",
"1222-9": "Diegueno",
"1223-7": "Campo",
"1224-5": "Capitan Grande",
"1225-2": "Cuyapaipe",
"1226-0": "La Posta",
"1227-8": "Manzanita",
"1228-6": "Mesa Grande",
"1229-4": "<NAME>",
"1230-2": "<NAME>",
"1231-0": "Sycuan",
"1233-6": "Eastern Tribes",
"1234-4": "Attacapa",
"1235-1": "Biloxi",
"1236-9": "Georgetown",
"1237-7": "Moor",
"1238-5": "Nansemond",
"1239-3": "Natchez",
"1240-1": "<NAME>",
"1241-9": "Nipmuc",
"1242-7": "Paugussett",
"1243-5": "<NAME>",
"1244-3": "Southeastern Indians",
"1245-0": "Susquehanock",
"1246-8": "<NAME>",
"1247-6": "Waccamaw-Siousan",
"1248-4": "Wicomico",
"1250-0": "Esselen",
"1252-6": "<NAME>",
"1254-2": "<NAME>",
"1256-7": "<NAME>",
"1258-3": "<NAME>",
"1260-9": "Gabrieleno",
"1262-5": "Grand Ronde",
"1264-1": "<NAME>",
"1265-8": "Atsina",
"1267-4": "Haliwa",
"1269-0": "Hidatsa",
"1271-6": "Hoopa",
"1272-4": "Trinity",
"1273-2": "Whilkut",
"1275-7": "Hoopa Extension",
"1277-3": "Houma",
"1279-9": "Inaja-Cosmit",
"1281-5": "Iowa",
"1282-3": "Iowa of Kansas-Nebraska",
"1283-1": "Iowa of Oklahoma",
"1285-6": "Iroquois",
"1286-4": "Cayuga",
"1287-2": "Mohawk",
"1288-0": "Oneida",
"1289-8": "Onondaga",
"1290-6": "Seneca",
"1291-4": "Seneca Nation",
"1292-2": "Seneca-Cayuga",
"1293-0": "Tonawanda Seneca",
"1294-8": "Tuscarora",
"1295-5": "Wyandotte",
"1297-1": "Juaneno",
"1299-7": "Kalispel",
"1301-1": "Karuk",
"1303-7": "Kaw",
"1305-2": "Kickapoo",
"1306-0": "Oklahoma Kickapoo",
"1307-8": "Texas Kickapoo",
"1309-4": "Kiowa",
"1310-2": "Oklahoma Kiowa",
"1312-8": "Klallam",
"1313-6": "Jamestown",
"1314-4": "Lower Elwha",
"1315-1": "Port Gamble Klallam",
"1317-7": "Klamath",
"1319-3": "Konkow",
"1321-9": "Kootenai",
"1323-5": "Lassik",
"1325-0": "Long Island",
"1326-8": "Matinecock",
"1327-6": "Montauk",
"1328-4": "Poospatuck",
"1329-2": "Setauket",
"1331-8": "Luiseno",
"1332-6": "La Jolla",
"1333-4": "Pala",
"1334-2": "Pauma",
"1335-9": "Pechanga",
"1336-7": "Soboba",
"1337-5": "Twenty-Nine Palms",
"1338-3": "Temecula",
"1340-9": "Lumbee",
"1342-5": "Lummi",
"1344-1": "Maidu",
"1345-8": "Mountain Maidu",
"1346-6": "Nishinam",
"1348-2": "Makah",
"1350-8": "Maliseet",
"1352-4": "Mandan",
"1354-0": "Mattaponi",
"1356-5": "Menominee",
"1358-1": "Miami",
"1359-9": "Illinois Miami",
"1360-7": "Indiana Miami",
"1361-5": "Oklahoma Miami",
"1363-1": "Miccosukee",
"1365-6": "Micmac",
"1366-4": "Aroostook",
"1368-0": "Mission Indians",
"1370-6": "Miwok",
"1372-2": "Modoc",
"1374-8": "Mohegan",
"1376-3": "Mono",
"1378-9": "Nanticoke",
"1380-5": "Narragansett",
"1382-1": "Navajo",
"1383-9": "<NAME>",
"1384-7": "<NAME>",
"1385-4": "<NAME>",
"1387-0": "<NAME>",
"1389-6": "Nomalaki",
"1391-2": "Northwest Tribes",
"1392-0": "Alsea",
"1393-8": "Celilo",
"1394-6": "Columbia",
"1395-3": "Kalapuya",
"1396-1": "Molala",
"1397-9": "Talakamish",
"1398-7": "Tenino",
"1399-5": "Tillamook",
"1400-1": "Wenatchee",
"1401-9": "Yahooskin",
"1403-5": "Omaha",
"1405-0": "<NAME>",
"1407-6": "Osage",
"1409-2": "Otoe-Missouria",
"1411-8": "Ottawa",
"1412-6": "Burt Lake Ottawa",
"1413-4": "Michigan Ottawa",
"1414-2": "Oklahoma Ottawa",
"1416-7": "Paiute",
"1417-5": "Bishop",
"1418-3": "Bridgeport",
"1419-1": "Burns Paiute",
"1420-9": "Cedarville",
"1421-7": "Fort Bidwell",
"1422-5": "Fort Independence",
"1423-3": "Kaibab",
"1424-1": "Las Vegas",
"1425-8": "Lone Pine",
"1426-6": "Lovelock",
"1427-4": "Malheur Paiute",
"1428-2": "Moapa",
"1429-0": "Northern Paiute",
"1430-8": "Owens Valley",
"1431-6": "Pyramid Lake",
"1432-4": "San Juan Southern Paiute",
"1433-2": "Southern Paiute",
"1434-0": "Summit Lake",
"1435-7": "Utu Utu Gwaitu Paiute",
"1436-5": "Walker River",
"1437-3": "Yerington Paiute",
"1439-9": "Pamunkey",
"1441-5": "Passamaquoddy",
"1442-3": "Indian Township",
"1443-1": "Pleasant Point Passamaquoddy",
"1445-6": "Pawnee",
"1446-4": "Oklahoma Pawnee",
"1448-0": "Penobscot",
"1450-6": "Peoria",
"1451-4": "Oklahoma Peoria",
"1453-0": "Pequot",
"1454-8": "Marshantucket Pequot",
"1456-3": "Pima",
"1457-1": "Gila River Pima-Maricopa",
"1458-9": "Salt River Pima-Maricopa",
"1460-5": "Piscataway",
"1462-1": "Pit River",
"1464-7": "Pomo",
"1465-4": "Central Pomo",
"1466-2": "Dry Creek",
"1467-0": "Eastern Pomo",
"1468-8": "Kashia",
"1469-6": "Northern Pomo",
"1470-4": "Scotts Valley",
"1471-2": "Stonyford",
"1472-0": "Sulphur Bank",
"1474-6": "Ponca",
"1475-3": "Nebraska Ponca",
"1476-1": "Oklahoma Ponca",
"1478-7": "Potawatomi",
"1479-5": "Citizen Band Potawatomi",
"1480-3": "Forest County",
"1481-1": "Hannahville",
"1482-9": "Huron Potawatomi",
"1483-7": "Pokagon Potawatomi",
"1484-5": "Prairie Band",
"1485-2": "Wisconsin Potawatomi",
"1487-8": "Powhatan",
"1489-4": "Pueblo",
"1490-2": "Acoma",
"1491-0": "Arizona Tewa",
"1492-8": "Cochiti",
"1493-6": "Hopi",
"1494-4": "Isleta",
"1495-1": "Jemez",
"1496-9": "Keres",
"1497-7": "Laguna",
"1498-5": "Nambe",
"1499-3": "Picuris",
"1500-8": "Piro",
"1501-6": "Pojoaque",
"1502-4": "San Felipe",
"1503-2": "San Ildefonso",
"1504-0": "San Juan Pueblo",
"1505-7": "San Juan De",
"1506-5": "San Juan",
"1507-3": "Sandia",
"1508-1": "Santa Ana",
"1509-9": "Santa Clara",
"1510-7": "Santo Domingo",
"1511-5": "Taos",
"1512-3": "Tesuque",
"1513-1": "Tewa",
"1514-9": "Tigua",
"1515-6": "Zia",
"1516-4": "Zuni",
"1518-0": "Puget Sound Salish",
"1519-8": "Duwamish",
"1520-6": "Kikiallus",
"1521-4": "Lower Skagit",
"1522-2": "Muckleshoot",
"1523-0": "Nisqually",
"1524-8": "Nooksack",
"1525-5": "Port Madison",
"1526-3": "Puyallup",
"1527-1": "Samish",
"1528-9": "Sauk-Suiattle",
"1529-7": "Skokomish",
"1530-5": "Skykomish",
"1531-3": "Snohomish",
"1532-1": "Snoqualmie",
"1533-9": "Squaxin Island",
"1534-7": "Steilacoom",
"1535-4": "Stillaguamish",
"1536-2": "Suquamish",
"1537-0": "Swinomish",
"1538-8": "Tulalip",
"1539-6": "Upper Skagit",
"1541-2": "Quapaw",
"1543-8": "Quinault",
"1545-3": "Rappahannock",
"1547-9": "Reno-Sparks",
"1549-5": "Round Valley",
"1551-1": "Sac and Fox",
"1552-9": "Iowa Sac and Fox",
"1553-7": "Missouri Sac and Fox",
"1554-5": "Oklahoma Sac and Fox",
"1556-0": "Salinan",
"1558-6": "Salish",
"1560-2": "Salish and Kootenai",
"1562-8": "Schaghticoke",
"1564-4": "Scott Valley",
"1566-9": "Seminole",
"1567-7": "Big Cypress",
"1568-5": "Brighton",
"1569-3": "Florida Seminole",
"1570-1": "Hollywood Seminole",
"1571-9": "Oklahoma Seminole",
"1573-5": "Serrano",
"1574-3": "San Manual",
"1576-8": "Shasta",
"1578-4": "Shawnee",
"1579-2": "Absentee Shawnee",
"1580-0": "Eastern Shawnee",
"1582-6": "Shinnecock",
"1584-2": "Shoalwater Bay",
"1586-7": "Shoshone",
"1587-5": "Battle Mountain",
"1588-3": "Duckwater",
"1589-1": "Elko",
"1590-9": "Ely",
"1591-7": "Goshute",
"1592-5": "Panamint",
"1593-3": "Ruby Valley",
"1594-1": "Skull Valley",
"1595-8": "South Fork Shoshone",
"1596-6": "Te-Moak Western Shoshone",
"1597-4": "<NAME>",
"1598-2": "Washakie",
"1599-0": "Wind River Shoshone",
"1600-6": "Yomba",
"1602-2": "Shoshone Paiute",
"1603-0": "Duck Valley",
"1604-8": "Fallon",
"1605-5": "<NAME>",
"1607-1": "Siletz",
"1609-7": "Sioux",
"1610-5": "Blackfoot Sioux",
"1611-3": "Brule Sioux",
"1612-1": "Cheyenne River Sioux",
"1613-9": "Crow Creek Sioux",
"1614-7": "Dakota Sioux",
"1615-4": "Flandreau Santee",
"1616-2": "Fort Peck",
"1617-0": "Lake Traverse Sioux",
"1618-8": "Lower Brule Sioux",
"1619-6": "Lower Sioux",
"1620-4": "Mdewakanton Sioux",
"1621-2": "Miniconjou",
"1622-0": "Oglala Sioux",
"1623-8": "Pine Ridge Sioux",
"1624-6": "Pipestone Sioux",
"1625-3": "Prairie Island Sioux",
"1626-1": "Prior Lake Sioux",
"1627-9": "Rosebud Sioux",
"1628-7": "Sans Arc Sioux",
"1629-5": "Santee Sioux",
"1630-3": "Sisseton-Wahpeton",
"1631-1": "Sisseton Sioux",
"1632-9": "Spirit Lake Sioux",
"1633-7": "Standing Rock Sioux",
"1634-5": "Teton Sioux",
"1635-2": "Two Kettle Sioux",
"1636-0": "Upper Sioux",
"1637-8": "Wahpekute Sioux",
"1638-6": "Wahpeton Sioux",
"1639-4": "Wazhaza Sioux",
"1640-2": "Yankton Sioux",
"1641-0": "Yanktonai Sioux",
"1643-6": "Siuslaw",
"1645-1": "Spokane",
"1647-7": "Stewart",
"1649-3": "Stockbridge",
"1651-9": "Susanville",
"1653-5": "<NAME>",
"1654-3": "Ak-Chin",
"1655-0": "<NAME>",
"1656-8": "<NAME>",
"1657-6": "Sells",
"1659-2": "Tolowa",
"1661-8": "Tonkawa",
"1663-4": "Tygh",
"1665-9": "Umatilla",
"1667-5": "Umpqua",
"1668-3": "Cow Creek Umpqua",
"1670-9": "Ute",
"1671-7": "<NAME>",
"1672-5": "Uintah Ute",
"1673-3": "Ute Mountain Ute",
"1675-8": "Wailaki",
"1677-4": "Walla-Walla",
"1679-0": "Wampanoag",
"1680-8": "Gay Head Wampanoag",
"1681-6": "<NAME>",
"1683-2": "Warm Springs",
"1685-7": "Wascopum",
"1687-3": "Washoe",
"1688-1": "Alpine",
"1689-9": "Carson",
"1690-7": "Dresslerville",
"1692-3": "Wichita",
"1694-9": "Wind River",
"1696-4": "Winnebago",
"1697-2": "Ho-chunk",
"1698-0": "Nebraska Winnebago",
"1700-4": "Winnemucca",
"1702-0": "Wintun",
"1704-6": "Wiyot",
"1705-3": "Table Bluff",
"1707-9": "Yakama",
"1709-5": "Yakama Cowlitz",
"1711-1": "Yaqui",
"1712-9": "Bar<NAME>",
"1713-7": "Pascua Yaqui",
"1715-2": "Yavapai Apache",
"1717-8": "Yokuts",
"1718-6": "Chukchansi",
"1719-4": "Tachi",
"1720-2": "Tule River",
"1722-8": "Yuchi",
"1724-4": "Yuman",
"1725-1": "Cocopah",
"1726-9": "Havasupai",
"1727-7": "Hualapai",
"1728-5": "Maricopa",
"1729-3": "Mohave",
"1730-1": "Quechan",
"1731-9": "Yavapai",
"1732-7": "Yurok",
"1733-5": "Coast Yurok",
"1735-0": "Alaska Native",
"1737-6": "Alaska Indian",
"1739-2": "Alaskan Athabascan",
"1740-0": "Ahtna",
"1811-9": "Southeast Alaska",
"1813-5": "Tlingit-Haida",
"1814-3": "Angoon",
"1815-0": "Central Council of Tlingit and Haida Tribes",
"1816-8": "Chilkat",
"1817-6": "Chilkoot",
"1818-4": "Craig",
"1819-2": "Douglas",
"1820-0": "Haida",
"1821-8": "Hoonah",
"1822-6": "Hydaburg",
"1823-4": "Kake",
"1824-2": "Kasaan",
"1825-9": "Kenaitze",
"1826-7": "Ketchikan",
"1827-5": "Klawock",
"1828-3": "Pelican",
"1829-1": "Petersburg",
"1830-9": "Saxman",
"1831-7": "Sitka",
"1832-5": "Tenakee Springs",
"1833-3": "Tlingit",
"1834-1": "Wrangell",
"1835-8": "Yakutat",
"1837-4": "Tsimshian",
"1838-2": "Metlakatla",
"1840-8": "Eskimo",
"1842-4": "Greenland Eskimo",
"1844-0": "Inupiat Eskimo",
"1845-7": "Ambler",
"1846-5": "Anaktuvuk",
"1847-3": "Anaktuvuk Pass",
"1848-1": "Arctic Slope Inupiat",
"1849-9": "Arctic Slope Corporation",
"1850-7": "Atqasuk",
"1851-5": "Barrow",
"1852-3": "Bering Straits Inupiat",
"1853-1": "Brevig Mission",
"1854-9": "Buckland",
"1855-6": "Chinik",
"1856-4": "Council",
"1857-2": "Deering",
"1858-0": "Elim",
"1859-8": "Golovin",
"1860-6": "<NAME>",
"1861-4": "Inupiaq",
"1862-2": "Kaktovik",
"1863-0": "Kawerak",
"1864-8": "Kiana",
"1865-5": "Kivalina",
"1866-3": "Kobuk",
"1867-1": "Kotzebue",
"1868-9": "Koyuk",
"1869-7": "Kwiguk",
"1870-5": "<NAME>",
"1871-3": "<NAME>",
"1872-1": "Noatak",
"1873-9": "Nome",
"1874-7": "Noorvik",
"1875-4": "Nuiqsut",
"1876-2": "Point Hope",
"1877-0": "Point Lay",
"1878-8": "Selawik",
"1879-6": "Shaktoolik",
"1880-4": "Shishmaref",
"1881-2": "Shungnak",
"1882-0": "Solomon",
"1883-8": "Teller",
"1884-6": "Unalakleet",
"1885-3": "Wainwright",
"1886-1": "Wales",
"1887-9": "White Mountain",
"1888-7": "White Mountain Inupiat",
"1889-5": "<NAME>",
"1891-1": "<NAME>",
"1892-9": "Gambell",
"1893-7": "Savoonga",
"1894-5": "<NAME>",
"1896-0": "<NAME>",
"1897-8": "Akiachak",
"1898-6": "Akiak",
"1899-4": "Alakanuk",
"1900-0": "Aleknagik",
"1901-8": "Andreafsky",
"1902-6": "Aniak",
"1903-4": "Atmautluak",
"1904-2": "Bethel",
"1905-9": "<NAME>",
"1906-7": "Bristol Bay Yupik",
"1907-5": "<NAME>",
"1908-3": "Chefornak",
"1909-1": "Chevak",
"1910-9": "Chuathbaluk",
"1911-7": "Clark's Point",
"1912-5": "Crooked Creek",
"1913-3": "Dillingham",
"1914-1": "Eek",
"1915-8": "Ekuk",
"1916-6": "Ekwok",
"1917-4": "Emmonak",
"1918-2": "Goodnews Bay",
"1919-0": "Hooper Bay",
"1920-8": "Iqurmuit (Russian Mission)",
"1921-6": "Kalskag",
"1922-4": "Kasigluk",
"1923-2": "Kipnuk",
"1924-0": "Koliganek",
"1925-7": "Kongiganak",
"1926-5": "Kotlik",
"1927-3": "Kwethluk",
"1928-1": "Kwigillingok",
"1929-9": "Levelock",
"1930-7": "Lower Kalskag",
"1931-5": "Manokotak",
"1932-3": "Marshall",
"1933-1": "Mekoryuk",
"1934-9": "Mountain Village",
"1935-6": "Naknek",
"1936-4": "Napaumute",
"1937-2": "Napakiak",
"1938-0": "Napaskiak",
"1939-8": "Newhalen",
"1940-6": "New Stuyahok",
"1941-4": "Newtok",
"1942-2": "Nightmute",
"1943-0": "Nunapitchukv",
"1944-8": "Oscarville",
"1945-5": "Pilot Station",
"1946-3": "Pitkas Point",
"1947-1": "Platinum",
"1948-9": "Portage Creek",
"1949-7": "Quinhagak",
"1950-5": "Red Devil",
"1951-3": "St. Michael",
"1952-1": "<NAME>",
"1953-9": "Sheldon's Point",
"1954-7": "Sleetmute",
"1955-4": "Stebbins",
"1956-2": "Togiak",
"1957-0": "Toksook",
"1958-8": "Tulukskak",
"1959-6": "Tuntutuliak",
"1960-4": "Tununak",
"1961-2": "<NAME>",
"1962-0": "Georgetown",
"1963-8": "St. Mary's",
"1964-6": "Umkumiate",
"1966-1": "Aleut",
"1968-7": "Alutiiq Aleut",
"1969-5": "Tatitlek",
"1970-3": "Ugashik",
"1972-9": "Bristol Bay Aleut",
"1973-7": "Chignik",
"1974-5": "Chignik Lake",
"1975-2": "Egegik",
"1976-0": "Igiugig",
"1977-8": "Ivanof Bay",
"1978-6": "King Salmon",
"1979-4": "Kokhanok",
"1980-2": "Perryville",
"1981-0": "Pilot Point",
"1982-8": "Port Heiden",
"1984-4": "Chugach Aleut",
"1985-1": "Chenega",
"1986-9": "Chugach Corporation",
"1987-7": "English Bay",
"1988-5": "Port Graham",
"1990-1": "Eyak",
"1992-7": "Koniag Aleut",
"1993-5": "Akhiok",
"1994-3": "Agdaagux",
"1995-0": "Karluk",
"1996-8": "Kodiak",
"1997-6": "Larsen Bay",
"1998-4": "Old Harbor",
"1999-2": "Ouzinkie",
"2000-8": "Port Lions",
"2002-4": "Sugpiaq",
"2004-0": "Suqpigaq",
"2006-5": "Unangan Aleut",
"2007-3": "Akutan",
"2008-1": "Aleut Corporation",
"2009-9": "Aleutian",
"2010-7": "Aleutian Islander",
"2011-5": "Atka",
"2012-3": "Belkofski",
"2013-1": "<NAME>",
"2014-9": "King Cove",
"2015-6": "False Pass",
"2016-4": "<NAME>",
"2017-2": "Nikolski",
"2018-0": "<NAME>",
"2019-8": "<NAME>",
"2020-6": "Qawalangin",
"2021-4": "St. George",
"2022-2": "St. Paul",
"2023-0": "Sand Point",
"2024-8": "South Naknek",
"2025-5": "Unalaska",
"2026-3": "Unga",
"2028-9": "Asian",
"2029-7": "Asian Indian",
"2030-5": "Bangladeshi",
"2031-3": "Bhutanese",
"2032-1": "Burmese",
"2033-9": "Cambodian",
"2034-7": "Chinese",
"2035-4": "Taiwanese",
"2036-2": "Filipino",
"2037-0": "Hmong",
"2038-8": "Indonesian",
"2039-6": "Japanese",
"2040-4": "Korean",
"2041-2": "Laotian",
"2042-0": "Malaysian",
"2043-8": "Okinawan",
"2044-6": "Pakistani",
"2045-3": "Sri Lankan",
"2046-1": "Thai",
"2047-9": "Vietnamese",
"2048-7": "Iwo Jiman",
"2049-5": "Maldivian",
"2050-3": "Nepalese",
"2051-1": "Singaporean",
"2052-9": "Madagascar",
"2054-5": "Black or African American",
"2056-0": "Black",
"2058-6": "African American",
"2060-2": "African",
"2061-0": "Botswanan",
"2062-8": "Ethiopian",
"2063-6": "Liberian",
"2064-4": "Namibian",
"2065-1": "Nigerian",
"2066-9": "Zairean",
"2067-7": "Bahamian",
"2068-5": "Barbadian",
"2069-3": "Dominican",
"2070-1": "Dominica Islander",
"2071-9": "Haitian",
"2072-7": "Jamaican",
"2073-5": "Tobagoan",
"2074-3": "Trinidadian",
"2075-0": "West Indian",
"2076-8": "Native Hawaiian or Other Pacific Islander",
"2078-4": "Polynesian",
"2079-2": "Native Hawaiian",
"2080-0": "Samoan",
"2081-8": "Tahitian",
"2082-6": "Tongan",
"2083-4": "Tokelauan",
"2085-9": "Micronesian",
"2086-7": "Guamanian or Chamorro",
"2087-5": "Guamanian",
"2088-3": "Chamorro",
"2089-1": "Mariana Islander",
"2090-9": "Marshallese",
"2091-7": "Palauan",
"2092-5": "Carolinian",
"2093-3": "Kosraean",
"2094-1": "Pohnpeian",
"2095-8": "Saipanese",
"2096-6": "Kiribati",
"2097-4": "Chuukese",
"2098-2": "Yapese",
"2100-6": "Melanesian",
"2101-4": "Fijian",
"2102-2": "Papua New Guinean",
"2103-0": "Solomon Islander",
"2104-8": "New Hebrides",
"2500-7": "Other Pacific Islander",
"2106-3": "White",
"2108-9": "European",
"2109-7": "Armenian",
"2110-5": "English",
"2111-3": "French",
"2112-1": "German",
"2113-9": "Irish",
"2114-7": "Italian",
"2115-4": "Polish",
"2116-2": "Scottish",
"2118-8": "Middle Eastern or North African",
"2119-6": "Assyrian",
"2120-4": "Egyptian",
"2121-2": "Iranian",
"2122-0": "Iraqi",
"2123-8": "Lebanese",
"2124-6": "Palestinian",
"2125-3": "Syrian",
"2126-1": "Afghanistani",
"2127-9": "Israeili",
"2129-5": "Arab",
"2131-1": "Other Race"
}
hl7_ethnicity_dict = {
"2135-2": "Hispanic or Latino",
"2137-8": "Spaniard",
"2138-6": "Andalusian",
"2139-4": "Asturian",
"2140-2": "Castillian",
"2141-0": "Catalonian",
"2142-8": "Belearic Islander",
"2143-6": "Gallego",
"2144-4": "Valencian",
"2145-1": "Canarian",
"2146-9": "Spanish Basque",
"2148-5": "Mexican",
"2149-3": "Mexican American",
"2150-1": "Mexicano",
"2151-9": "Chicano",
"2152-7": "La Raza",
"2153-5": "Mexican American Indian",
"2155-0": "Central American",
"2156-8": "Costa Rican",
"2157-6": "Guatemalan",
"2158-4": "Honduran",
"2159-2": "Nicaraguan",
"2160-0": "Panamanian",
"2161-8": "Salvadoran",
"2162-6": "Central American Indian",
"2163-4": "Canal Zone",
"2165-9": "South American",
"2166-7": "Argentinean",
"2167-5": "Bolivian",
"2168-3": "Chilean",
"2169-1": "Colombian",
"2170-9": "Ecuadorian",
"2171-7": "Paraguayan",
"2172-5": "Peruvian",
"2173-3": "Uruguayan",
"2174-1": "Venezuelan",
"2175-8": "South American Indian",
"2176-6": "Criollo",
"2178-2": "Latin American",
"2180-8": "Puerto Rican",
"2182-4": "Cuban",
"2184-0": "Dominican",
"2186-5": "Not Hispanic or Latino",
}
def gender_correct(input_dict):
if "gender_code" in input_dict:
gender_code = input_dict["gender_code"]
if gender_code == "248152002":
return {"gender_code": "F"}
elif gender_code == "248153007":
return {"gender_code": "M"}
else:
return input_dict
else:
return {}
person_id_duplicate_mapper = DuplicateExcludeMapper("empiPersonId")
population_patient_rules = [("empiPersonId", "s_person_id"),
("gender_code", PassThroughFunctionMapper(gender_correct), {"gender_code": "s_gender"}),
("gender_code", "m_gender"),
("birthdate", "s_birth_datetime"),
("dateofdeath", "s_death_datetime"),
("race_code", "s_race"),
("race_code", CodeMapperDictClass(hl7_race_dict, key_to_map_to="m_race"), {"m_race": "m_race"}),
("ethnicity_code", "s_ethnicity"),
("ethnicity_code", CodeMapperDictClass(hl7_ethnicity_dict, key_to_map_to="m_ethnicity"), {"m_ethnicity": "m_ethnicity"}),
(("street_1", "street_2", "city", "state", "zip_code"),
key_location_mapper, {"mapped_value": "k_location"}),
("empiPersonId", person_id_duplicate_mapper, {"i_exclude": "i_exclude"})
]
output_person_csv = os.path.join(output_csv_directory, "source_person.csv")
source_person_runner_obj = generate_mapper_obj(input_patient_file_name, PopulationDemographics(), output_person_csv,
SourcePersonObject(), population_patient_rules,
output_class_obj, in_out_map_obj)
source_person_runner_obj.run() # Run the mapper
# Care site
care_site_csv = os.path.join(input_csv_directory, "care_site.csv")
md5_func = lambda x: hashlib.md5(x.encode("utf8")).hexdigest()
population_care_site = os.path.join(input_csv_directory, "population_care_site.csv")
key_care_site_mapper = build_name_lookup_csv(population_care_site, care_site_csv,
["facility_name", "building_name",
"nurseunit_name", "hospitalservice_code_text"],
["facility_name", "building_name",
"nurseunit_name", "hospitalservice_code_text"], hashing_func=md5_func)
care_site_name_mapper = FunctionMapper(
build_key_func_dict(["facility_name", "building_name",
"nurseunit_name", "hospitalservice_code_text"], separator=" -- "))
care_site_rules = [("key_name", "k_care_site"),
(("facility_name", "building_name", "nurseunit_name", "hospitalservice_code_text"),
care_site_name_mapper,
{"mapped_value": "s_care_site_name"})]
source_care_site_csv = os.path.join(output_csv_directory, "source_care_site.csv")
care_site_runner_obj = generate_mapper_obj(care_site_csv, PopulationCareSite(), source_care_site_csv,
SourceCareSiteObject(), care_site_rules,
output_class_obj, in_out_map_obj)
care_site_runner_obj.run()
# Encounters
encounter_file_name = os.path.join(input_csv_directory, file_name_dict["encounter"])
encounter_id_duplicate_mapper = DuplicateExcludeMapper("encounterid")
encounter_rules = [
("encounterid", "s_encounter_id"),
("empiPersonId", "s_person_id"),
("servicedate", "s_visit_start_datetime"),
("dischargedate", "s_visit_end_datetime"),
("type_code_text", "s_visit_type"),
("classification_code_text", "m_visit_type"),
("dischargedisposition_code_text", "s_discharge_to"),
("dischargedisposition_code", "m_discharge_to"),
("admissionsource_code_text", "s_admitting_source"),
("admissionsource_code", "m_admitting_source"),
(("facility_name", "building_name", "nurseunit_name", "hospitalservice_code_text"), key_care_site_mapper, {"mapped_value": "k_care_site"}),
("encounterid", encounter_id_duplicate_mapper, {"i_exclude": "i_exclude"})
]
source_encounter_csv = os.path.join(output_csv_directory, "source_encounter.csv")
# Generate care site combination of tenant and hospitalservice_code_text
encounter_runner_obj = generate_mapper_obj(encounter_file_name, PopulationEncounter(), source_encounter_csv,
SourceEncounterObject(), encounter_rules,
output_class_obj, in_out_map_obj)
encounter_runner_obj.run()
observation_csv_file = os.path.join(input_csv_directory, "population_observation.csv")
generate_observation_period(source_encounter_csv, observation_csv_file,
"s_person_id", "s_visit_start_datetime", "s_visit_end_datetime")
observation_period_rules = [("s_person_id", "s_person_id"),
("s_visit_start_datetime", "s_start_observation_datetime"),
("s_visit_end_datetime", "s_end_observation_datetime")]
source_observation_period_csv = os.path.join(output_csv_directory, "source_observation_period.csv")
observation_runner_obj = generate_mapper_obj(observation_csv_file, PopulationObservationPeriod(),
source_observation_period_csv,
SourceObservationPeriodObject(), observation_period_rules,
output_class_obj, in_out_map_obj)
observation_runner_obj.run()
# Encounter plan or insurance coverage
source_encounter_coverage_csv = os.path.join(output_csv_directory, "source_encounter_coverage.csv")
encounter_coverage_rules = [("empiPersonId", "s_person_id"),
("encounterid", "s_encounter_id"),
("servicedate", "s_start_payer_date"),
("dischargedate", "s_end_payer_date"),
("financialclass_code_text", "s_payer_name"),
("financialclass_code_text", "m_payer_name"),
("financialclass_code_text", "s_plan_name"),
("financialclass_code_text", "m_plan_name")]
encounter_benefit_runner_obj = generate_mapper_obj(encounter_file_name,
PopulationEncounter(),
source_encounter_coverage_csv, SourceEncounterCoverageObject(),
encounter_coverage_rules, output_class_obj, in_out_map_obj)
encounter_benefit_runner_obj.run()
population_location_csv = os.path.join(input_csv_directory, "population_encounter_location.csv")
source_encounter_detail_csv = os.path.join(output_csv_directory, "source_encounter_detail.csv")
def check_if_not_empty(input_dict):
if "begindate" in input_dict:
if not len(input_dict["begindate"]):
return {"i_exclude": 1}
else:
return {"i_exclude": ""}
else:
return {"i_exclude": 1}
source_encounter_detail_rules = [
("encounterid", "s_encounter_id"),
("encounterid", "s_encounter_detail_id"),
("empiPersonId", "s_person_id"),
("begindate", "s_start_datetime"),
("enddate", "s_end_datetime"),
#("classification_display", "s_visit_detail_type"),
#("classification_display", "m_visit_detail_type"),
(("facility_name", "building_name", "nurseunit_name", "hospitalservice_code_text"), key_care_site_mapper, {"mapped_value": "k_care_site"}),
("begindate", PassThroughFunctionMapper(check_if_not_empty), {"i_exclude": "i_exclude"})
]
encounter_detail_runner_obj = generate_mapper_obj(population_location_csv, PopulationEncounterLocation(),
source_encounter_detail_csv,
SourceEncounterDetailObject(),
source_encounter_detail_rules, output_class_obj, in_out_map_obj)
encounter_detail_runner_obj.run()
def m_rank_func(input_dict):
if input_dict["billingrank"] == "PRIMARY":
return {"m_rank": "Primary"}
elif input_dict["billingrank"] == "SECONDARY":
return {"m_rank": "Secondary"}
else:
return {}
condition_rules = [("empiPersonId", "s_person_id"),
("encounterid", "s_encounter_id"),
("effectiveDate", "s_start_condition_datetime"),
("condition_code", "s_condition_code"),
("condition_code_oid", "m_condition_code_oid"),
("billingrank", PassThroughFunctionMapper(m_rank_func), {"m_rank": "m_rank"}),
("source", "s_condition_type"),
("presentonadmission_code", "s_present_on_admission_indicator")]
condition_csv = os.path.join(input_csv_directory, file_name_dict["condition"])
source_condition_csv = os.path.join(output_csv_directory, "source_condition.csv")
condition_mapper_obj = generate_mapper_obj(condition_csv, PopulationCondition(), source_condition_csv,
SourceConditionObject(),
condition_rules, output_class_obj, in_out_map_obj)
condition_mapper_obj.run()
procedure_csv = os.path.join(input_csv_directory, file_name_dict["procedure"])
source_procedure_csv = os.path.join(output_csv_directory, "source_procedure.csv")
procedure_rules = [("empiPersonId", "s_person_id"),
("encounterid", "s_encounter_id"),
("servicestartdate", "s_start_procedure_datetime"),
("serviceenddate", "s_end_procedure_datetime"),
("procedure_code", "s_procedure_code"),
("procedure_code_oid", "s_procedure_code_type"),
("procedure_code_oid", "m_procedure_code_oid")
]
procedure_mapper_obj = generate_mapper_obj(procedure_csv, PopulationProcedure(), source_procedure_csv,
SourceProcedureObject(),
procedure_rules, output_class_obj, in_out_map_obj)
procedure_mapper_obj.run()
def active_medications(input_dict):
if "status_code_text" in input_dict:
if input_dict["status_code_text"] not in ('Complete', 'Discontinued', 'Active', 'Suspended'):
return {"i_exclude": 1}
else:
return {}
else:
return {}
["medicationid", "encounterid", "empiPersonId", "intendeddispenser", "startdate", "stopdate", "doseunit_code",
"doseunit_code_oid", "doseunit_code_text", "category_id", "category_code_oid", "category_code_text",
"frequency_id", "frequency_code_oid", "frequency_code_text", "status_code", "status_code_oid",
"status_code_text", "route_code", "route_code_oid", "route_code_text", "drug_code", "drug_code_oid",
"drug_code_text", "dosequantity", "source"]
medication_rules = [("empiPersonId", "s_person_id"),
("encounterid", "s_encounter_id"),
("drug_code", "s_drug_code"),
("drug_code_oid", "m_drug_code_oid"),
("drug_code_text", "s_drug_text"),
("startdate", "s_start_medication_datetime"),
("stopdate", "s_end_medication_datetime"),
("route_code_text", "s_route"),
("route_code", "m_route"),
("dosequantity", "s_quantity"),
("doseunit_code_text", "s_dose_unit"),
("doseunit_code", "m_dose_unit"),
("intendeddispenser", "s_drug_type"),
("intendeddispenser", "m_drug_type"),
("status_code", "s_status"),
("status_code_text", PassThroughFunctionMapper(active_medications),
{"i_exclude": "i_exclude"})
]
medication_csv = os.path.join(input_csv_directory, file_name_dict["medication"])
source_medication_csv = os.path.join(output_csv_directory, "source_medication.csv")
medication_mapper_obj = generate_mapper_obj(medication_csv, PopulationMedication(), source_medication_csv,
SourceMedicationObject(), medication_rules,
output_class_obj, in_out_map_obj)
medication_mapper_obj.run()
result_csv = os.path.join(input_csv_directory, file_name_dict["result"])
source_result_csv = os.path.join(output_csv_directory, "source_result.csv")
["resultid", "encounterid", "empiPersonId", "result_code", "result_code_oid", "result_code_text",
"result_type", "servicedate", "value_text", "value_numeric", "value_numeric_modifier", "unit_code",
"unit_code_oid", "unit_code_text", "value_codified_code", "value_codified_code_oid",
"value_codified_code_text", "date", "interpretation_code", "interpretation_code_oid",
"interpretation_code_text", "specimen_type_code", "specimen_type_code_oid", "specimen_type_code_text",
"bodysite_code", "bodysite_code_oid", "bodysite_code_text", "specimen_collection_date",
"specimen_received_date", "measurementmethod_code", "measurementmethod_code_oid",
"measurementmethod_code_text", "recordertype", "issueddate", "year"]
def remove_equals(input):
return "".join(input["value_text"].split("="))
result_rules = [("empiPersonId", "s_person_id"),
("encounterid", "s_encounter_id"),
("servicedate", "s_obtained_datetime"),
("result_code_text", "s_name"),
("result_code", "s_code"),
("result_code_oid", "m_type_code_oid"),
("value_text", "s_result_text"),
(("value_codified_code_text", "interpretation_code_text"),
FilterHasKeyValueMapper(["value_codified_code_text", "interpretation_code_text"]),
{"value_codified_code_text": "m_result_text", "interpretation_code_text": "m_result_text"}),
(("value_numeric", "value_text"), CascadeMapper(FilterHasKeyValueMapper(["value_numeric"]),
ChainMapper(FunctionMapper(remove_equals, "value_text"), IntFloatMapper(), KeyTranslator({"value_text": "value_numeric"}))),
{"value_numeric": "s_result_numeric"}),
("date", "s_result_datetime"),
("value_codified_code", "s_result_code"),
("value_codified_code_oid", "m_result_code_oid"),
("unit_code", "s_result_unit"),
("unit_code", "s_result_unit_code"),
("unit_code_oid", "m_result_unit_code_oid"),
#("norm_unit_of_measure_code", "s_result_unit_code")
("lower_limit", "s_result_numeric_lower"),
("upper_limit", "s_result_numeric_upper")
]
result_mapper_obj = generate_mapper_obj(result_csv, PopulationResult(), source_result_csv, SourceResultObject(),
result_rules, output_class_obj, in_out_map_obj)
result_mapper_obj.run()
if __name__ == "__main__":
arg_parse_obj = argparse.ArgumentParser(description="Mapping Realworld CSV files to Prepared source format for OHDSI mapping")
arg_parse_obj.add_argument("-c", "--config-file-name", dest="config_file_name", help="JSON config file",
default="sbm_config.json")
arg_obj = arg_parse_obj.parse_args()
print("Reading config file '%s'" % arg_obj.config_file_name)
with open(arg_obj.config_file_name, "r") as f:
config_dict = json.load(f)
file_name_dict = {
"demographic": "population_demographics.consolidated.csv",
"encounter": "population_encounter.csv",
"condition": "population_condition.csv",
"measurement": "population_measurement.csv",
"medication": "population_medication.csv",
"procedure": "population_procedure.csv",
"result": "population_results.csv"
}
main(config_dict["csv_input_directory"], config_dict["csv_input_directory"], file_name_dict) | StarcoderdataPython |
59561 | from __future__ import annotations
from typing import Dict, Final, Literal, Set, Union
import pylox.lox_types as lt
import pylox.token_classes as tc
from pylox.token_classes import TokenType as tt
import pylox.lox_class as lc
import pylox.functions as fn
import pylox.lox_builtins as lb
# TODO: Consider moving some of these to other files
PY_LOX_STR_TYPES: Final[Dict[Union[lt.NilType, type], str]] = {
lt.nil: "nil",
lt.NilType: "nil",
float: "number",
int: "number",
str: "string",
bool: "boolean",
lc.LoxInstance: "instance",
fn.LoxFunction: "function",
lb.LoxNativeFunction: "native function",
}
StrTokenDict = Dict[str, tc.TokenType]
VALUE_WORDS: Final[Dict[str, Union[Literal[True, False], lt.NilType]]] = {
"true": True,
"false": False,
"nil": lt.nil,
}
RESERVED_WORDS: Final[StrTokenDict] = {
"and": tt.AND,
"class": tt.CLASS,
"else": tt.ELSE,
"false": tt.FALSE,
"for": tt.FOR,
"fun": tt.FUN,
"if": tt.IF,
"nil": tt.NIL,
"or": tt.OR,
"print": tt.PRINT,
"return": tt.RETURN,
"super": tt.SUPER,
"true": tt.TRUE,
"var": tt.VAR,
"while": tt.WHILE,
"break": tt.BREAK,
}
STR_TOKENS: Final[StrTokenDict] = {
**RESERVED_WORDS,
"(": tt.LEFT_PAREN,
")": tt.RIGHT_PAREN,
"{": tt.LEFT_BRACE,
"}": tt.RIGHT_BRACE,
",": tt.COMMA,
".": tt.DOT,
"-": tt.MINUS,
"+": tt.PLUS,
";": tt.SEMICOLON,
"*": tt.STAR,
"!": tt.BANG,
"=": tt.EQUAL,
"<": tt.LESS,
">": tt.GREATER,
"!=": tt.BANG_EQUAL,
"==": tt.EQUAL_EQUAL,
"<=": tt.LESS_EQUAL,
">=": tt.GREATER_EQUAL,
"/": tt.SLASH,
" ": tt.EMPTY,
"\r": tt.EMPTY,
"\t": tt.EMPTY,
"//": tt.EMPTY,
"": tt.EMPTY,
"\n": tt.EMPTY,
"/*": tt.EMPTY,
"\\*": tt.EMPTY,
}
SYNC_TOKENS: Final[Set[tc.TokenType]] = {
tt.CLASS,
tt.FUN,
tt.VAR,
tt.FOR,
tt.IF,
tt.WHILE,
tt.PRINT,
tt.RETURN,
}
| StarcoderdataPython |
160225 | <reponame>mikealgj/pulumi-digitalocean<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetImagesResult',
'AwaitableGetImagesResult',
'get_images',
]
@pulumi.output_type
class GetImagesResult:
"""
A collection of values returned by getImages.
"""
def __init__(__self__, filters=None, id=None, images=None, sorts=None):
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if images and not isinstance(images, list):
raise TypeError("Expected argument 'images' to be a list")
pulumi.set(__self__, "images", images)
if sorts and not isinstance(sorts, list):
raise TypeError("Expected argument 'sorts' to be a list")
pulumi.set(__self__, "sorts", sorts)
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetImagesFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def images(self) -> Sequence['outputs.GetImagesImageResult']:
"""
A set of images satisfying any `filter` and `sort` criteria. Each image has the following attributes:
- `slug`: Unique text identifier of the image.
- `id`: The ID of the image.
- `name`: The name of the image.
- `type`: Type of the image.
"""
return pulumi.get(self, "images")
@property
@pulumi.getter
def sorts(self) -> Optional[Sequence['outputs.GetImagesSortResult']]:
return pulumi.get(self, "sorts")
class AwaitableGetImagesResult(GetImagesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetImagesResult(
filters=self.filters,
id=self.id,
images=self.images,
sorts=self.sorts)
def get_images(filters: Optional[Sequence[pulumi.InputType['GetImagesFilterArgs']]] = None,
sorts: Optional[Sequence[pulumi.InputType['GetImagesSortArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetImagesResult:
"""
Get information on images for use in other resources (e.g. creating a Droplet
based on a snapshot), with the ability to filter and sort the results. If no filters are specified,
all images will be returned.
This data source is useful if the image in question is not managed by the provider or you need to utilize any
of the image's data.
Note: You can use the `getImage` data source to obtain metadata
about a single image if you already know the `slug`, unique `name`, or `id` to retrieve.
:param Sequence[pulumi.InputType['GetImagesFilterArgs']] filters: Filter the results.
The `filter` block is documented below.
:param Sequence[pulumi.InputType['GetImagesSortArgs']] sorts: Sort the results.
The `sort` block is documented below.
"""
__args__ = dict()
__args__['filters'] = filters
__args__['sorts'] = sorts
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('digitalocean:index/getImages:getImages', __args__, opts=opts, typ=GetImagesResult).value
return AwaitableGetImagesResult(
filters=__ret__.filters,
id=__ret__.id,
images=__ret__.images,
sorts=__ret__.sorts)
| StarcoderdataPython |
108479 | # Copyright 2020 <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Code to reconstruct the purity via Importance Sampling
import numpy as np
import math
import cmath
from qutip import *
import random
from scipy import linalg
from src.ObtainMeasurements import *
from src.AnalyzeMeasurements import *
from src.PreprocessingImportanceSampling import *
### This script estimates the purity of a noisy GHZ realized in the experiment using uniform sampling and importance sampling from an ideal pure GHZ state
### Capable of simulating noisy GHZ state till N = 25 qubits !!!
### Importance sampling provides best performances for Nu ~ O(N) and NM ~O(2^N) !!
## Parameters
N = 16 # Number of qubits to analyze
d = 2**N
Nu = 50 # Number of random unitaries to be used
NM = d*4 # Number of projective measurements (shots) per random unitary
mode = 'CUE'
burn_in = 1 # determines the number of samples to be rejected during metropolis: (nu*burn_in)
### Step 1:: Create a quantum state
# The quantum state qstate is stored as numpy.array of type numpy.complex_
# qstate can be
# - a pure state |psi> represented by a numpy array of shape (2**N,)
# - a mixed state rho reprresented by a numpy array of shape (2**N, 2**N)
# An additional parameter p can be specified to admix the identity
# - |psi><psi| -> (1-p)*|psi><psi| + p*1/2**N or
# - rho -> (1-p)*rho + p*1/2**N
## An ideal GHZ state
qstate = np.zeros(2**N,dtype=np.complex_)
qstate[0] = 1./np.sqrt(2)
qstate[-1] = 1./np.sqrt(2)
### A random mixed state
#import qutip
#qstate = qutip.rand_dm(2**N).full()
#p_depo = 0.1
# Consider realizing a noisy version of the GHZ state experimentally. Noise given by depolarization noise strength p_depo
p_depo = 0.2
## Theoretical estimations:
p2_exp = (1-p_depo)**2 + (1-(1-p_depo)**2)/d ## purity of the realized noisy GHZ state
p2_theory = 1 ## Purity of the ideal pure GHZ state
fid = (1-p_depo) + p_depo/d ## Fidelity between the ideal and the experimenetal GHZ state
### Initiate Random Generator
a = random.SystemRandom().randrange(2 ** 32 - 1) #Init Random Generator
random_gen = np.random.RandomState(a)
### Perform Randomized measurements
print('Randomized measurements using uniform sampling with Nu = '+str(Nu)+' and NM = '+str(NM))
### Generate Random Unitaries
unitaries=np.zeros((Nu,N,2,2),dtype=np.complex_)
for iu in range(Nu):
for i in range(N):
unitaries[iu,i]=SingleQubitRotation(random_gen,mode)
print('Random unitaries generated using uniform sampling')
### Simulate the randomized measurements
Meas_Data_uni = np.zeros((Nu,NM),dtype='int64') ## array to store the measurement results as integers representing the measured bitstrings
for iu in range(Nu):
print('Data acquisition {:d} % \r'.format(int(100*iu/(Nu))),end = "",flush=True)
prob = ObtainOutcomeProbabilities(N, qstate, unitaries[iu] , p_depo)
Meas_Data_uni[iu,:] = Sampling_Meas(prob,N,NM)
print('Measurement data generated for uniform sampling \n')
## Estimate the uniform sampled purity
X_uni = np.zeros(Nu)
for iu in range(Nu):
print('Postprocessing {:d} % \r'.format(int(100*iu/(Nu))),end = "",flush=True)
probe = get_prob(Meas_Data_uni[iu,:], N)
X_uni[iu] = get_X(probe,N)
p2_uni = 0 # purity given by uniform sampling
p2_uni = unbias(np.mean(X_uni),N,NM)
print('Randomized measurements using importance sampling with Nu = '+str(Nu)+' and NM = '+str(NM))
### Step 1: Preprocessing step for importance sampling. Sample Y and Z rotation angles (2N angles for each unitary u)
# Importance sampling of the angles (theta_is) and (phi_is) using metropolis algorithm from an ideal GHZ state
theta_is, phi_is, n_r, N_s, p_IS = MetropolisSampling_pure(N, qstate,Nu, burn_in)
### Step: Randomized measurements
## Step 2a: Perform the actual experiment on your quantum machine
# Store angles theta_is, phi_is on the hard drive
# np.savetxt('theta_is.txt',theta_is) ## text file with Nu rows and N columns containing angles
# np.savetxt('phi_is.txt',phi_is) ## text file with Nu rows and N columns containing angles
# >>>> Run your quantum machine <<<<
# Load measurement results from hard drive as an array of shape (Nu,NM) containing integers
#Meas_Data_IS = np.load('MeasurementResults.npy',dtype='int64')
## Step 2b: Simulate randomized measurements with the generated importance sampled unitaries
### Generate the local importance sampled Random Unitaries
unitaries=np.zeros((Nu,N,2,2),dtype=np.complex_)
for iu in range(Nu):
for i in range(N):
unitaries[iu,i]=SingleQubitRotationIS(theta_is[i,iu],phi_is[i,iu])
print('Importance sampled random unitaries generated')
### Simulate the randomized measurements
Meas_Data_IS = np.zeros((Nu,NM),dtype='int64') ## array to store the measurement results as integers representing the measured bitstrings
for iu in range(Nu):
print('Data acquisition {:d} % \r'.format(int(100*iu/(Nu))),end = "",flush=True)
prob = ObtainOutcomeProbabilities(N, qstate, unitaries[iu] , p_depo)
Meas_Data_IS[iu,:] = Sampling_Meas(prob,N,NM)
print('Measurement data generated for importance sampling')
## Step 3: Estimation of the purity given by importance sampling
X_imp = np.zeros(Nu)
for iu in range(Nu):
print('Postprocessing {:d} % \r'.format(int(100*iu/(Nu))),end = "",flush=True)
probe = get_prob(Meas_Data_IS[iu,:], N)
X_imp[iu] = unbias(get_X(probe,N),N,NM)
p2_IS = 0 # purity given by importance sampling
for iu in range(Nu):
p2_IS += X_imp[iu]*n_r[iu]/p_IS[iu,0]/N_s
### some performance illustrations
print('Fidelity of the importance sampler: ', np.round(100*fid,2), '%')
print('p2 (True value) = ', p2_exp)
print('p2 (uniform sampling) = ', p2_uni)
print('p2 (Importance sampling) = ', p2_IS)
print ('Error uniform: ', np.round(100*(np.abs(p2_uni-p2_exp)/p2_exp),2), '%')
print ('Error IS: ', np.round(100*(np.abs(p2_IS-p2_exp)/p2_exp),2), '%')
| StarcoderdataPython |
1773426 | from .BuiltTeam import BuiltTeam
from .BuiltUnit import BuiltUnit
from .BuiltClass import BuiltClass
from .BuiltWeapon import BuiltWeapon
from .BuiltItem import BuiltItem
from .RankedSupport import RankedSupport
| StarcoderdataPython |
3215115 | class Articles:
'''
Articles class that determines the instance of new articles
'''
def __init__(self,id,name,author,title,description,url,urlToImage,publishedAt,content):
self.id = id
self.name = name
self.author = author
self.title = title
self.description = description
self.url = url
self.urlToImage = urlToImage
self.publishedAt = publishedAt
self.content = content
class Sources:
'''
Class to define all news sources
'''
def __init__(self,id,name,description,url,category,language):
self.id = id
self.name = name
self.description = description
self.url = url
self.category = category
self.language = language | StarcoderdataPython |
84635 | class Solution:
def getSmallestString(self, n: int, k: int) -> str:
def toChar(n):
return chr(97 + n - 1)
ans = [''] * n
i = 0
while i < n:
# how many spaces left to fill?
left = n - i
if k < left:
# you got `left` spaces to fill,
# but k is not enough
# won't happen with a valid input
pass
if k - 26 < (left - 1):
# if you subtract 26 from k,
# there won't be enough k to
# fill the remaining spaces.
# subtract just enough so that
# the remaining spaces can be filled
# with just a's
# k - x = left - 1
ans[i] = toChar(k - (left - 1))
i += 1
while i < n:
ans[i] = 'a'
i += 1
break
ans[i] = 'z'
k -= 26
i += 1
return ''.join(ans)[::-1]
| StarcoderdataPython |
23432 | import os
from flask_unchained import AppConfig
class Config(AppConfig):
WEBPACK_MANIFEST_PATH = os.path.join(
AppConfig.STATIC_FOLDER, 'assets', 'manifest.json')
class ProdConfig:
# use relative paths by default, ie, the same host as the backend
WEBPACK_ASSETS_HOST = ''
class StagingConfig(ProdConfig):
pass
| StarcoderdataPython |
4828010 | # ----------------------------------------------------------------------------
# Copyright (c) 2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import pandas as pd
import skbio
import qiime2
from functools import partial
def clean(metadata, y_coord, x_coord):
if y_coord not in metadata:
raise ValueError("Must have y_coord in metadata to use quadtrees")
if x_coord not in metadata:
raise ValueError("Must have x_coord in metadata to use quadtrees")
df = metadata[[x_coord, y_coord]]
# as global or local in function, define all null or this:
df[y_coord] = pd.to_numeric(df[y_coord], errors='coerce')
df[x_coord] = pd.to_numeric(df[x_coord], errors='coerce')
# drop nan values (formerly strings) from dataframe
df = df.dropna(subset=[x_coord, y_coord])
if df.empty is True:
raise ValueError("x coordinates and/or y coordinates have "
"no numeric values, please check your data.")
# resolve points shifted left or down
xmin = df[x_coord].min()
if xmin > 0:
df[x_coord] = df[x_coord] - xmin
xmin = 0
ymin = df[y_coord].min()
if ymin > 0:
df[y_coord] = df[y_coord] - ymin
ymin = 0
df[x_coord] = df[x_coord] - xmin
df[y_coord] = df[y_coord] - ymin
return df
class Point():
def __init__(self, x, y, sample_id):
self.x = float(x)
self.y = float(y)
self.sample_id = sample_id
class Node():
def __init__(self, x0, y0, w, h, points, _id):
self.x0 = x0
self.y0 = y0
self.width = float(w)
self.height = float(h)
self.points = points
self.children = []
self.id = _id
def get_width(self):
return self.width
def get_height(self):
return self.height
def get_points(self):
return self.points
def get_id(self):
return self.id
def set_id(self, _id):
self.id = self.id+_id
class QTree():
def __init__(self, threshold, data):
self.threshold = threshold
self.points = [Point(x, y, sample_id) for sample_id, x, y in data]
x_max = max(x for _, x, _ in data)
y_max = max(y for _, _, y in data)
self.root = Node(0, 0, x_max, y_max, self.points, "0")
def add_point(self, x, y, sample_id):
self.points.append(Point(x, y, sample_id))
def get_points(self):
return self.points
def subdivide(self, threshold):
depth = 0
node_id = ""
bins = []
bins = recursive_subdivide(self.root, threshold, depth, node_id, bins)
return bins
def recursive_subdivide(node, k, depth, node_id, bins):
if len(node.points) < k:
return
elif len(node.points)/k >= len(node.points):
raise ValueError("The threshold for subdivision is less than "
"the amount of points, "
"please chose a larger threshold for division")
w_ = node.width/2
h_ = node.height/2
depth += 1
nodes = []
for i in range(4):
# northwest
if(i == 0):
quad = "1"
node.x0 = node.x0
node.y0 = node.y0+h_
# northeast
elif(i == 1):
quad = "2"
node.x0 = node.x0+w_
node.y0 = node.y0
# southwest
elif(i == 2):
quad = "3"
node.x0 = node.x0-w_
node.y0 = node.y0-h_
# southeast
elif(i == 3):
quad = "4"
node.x0 = node.x0+w_
node.y0 = node.y0
p = contains(node.x0, node.y0, w_, h_, node.points)
new_id = node_id + quad + "."
quad_node = Node(node.x0, node.y0, w_, h_, p, new_id)
nodes.append(quad_node)
for pt in p:
bins.append((pt.sample_id, depth, new_id))
recursive_subdivide(quad_node, k, depth, new_id, bins)
node.children = nodes
return bins
def contains(x, y, w, h, points):
pts = []
for point in points:
if point.x >= x and point.x <= x+w and point.y >= y and point.y <= y+h:
pts.append(point)
return pts
def create_tree_df(bins, index):
# create df for trees and df
df = pd.DataFrame(bins, columns=[index, 'depth', 'lineage'])
try:
max_depth = max([lineage.count('.') for lineage in df['lineage']])+1
except ValueError:
raise ValueError("The threshold for subdivision is greater than "
"the amount of samples, "
"please chose a smaller threshold for division")
# for df only
def lineage_chopper(depth, lineage):
lin = '.'.join(lineage.split('.', depth)[:depth])
if lineage.count('.') < depth:
lin = None
return lin
for depth in range(1, max_depth):
name = 'split-depth-%d' % depth
df[name] = df['lineage'].apply(partial(lineage_chopper, depth))
# tree and df
longest_lineages = []
for sample_id, sample_grp in df.groupby(index):
sample_grp_sorted = sample_grp.sort_values('depth', ascending=False)
longest_lineages.append(sample_grp_sorted.iloc[0])
longest_lineages = pd.DataFrame(longest_lineages)
# tree only
lineage_bit = longest_lineages['lineage'].apply(
lambda lin: lin.split('.')[:-1])
taxonomy = [(i, lin) for i, lin in zip(longest_lineages[index],
lineage_bit)]
# df formatting
longest_lineages = pd.DataFrame(longest_lineages).set_index(index)
longest_lineages.index.name = index
return skbio.TreeNode.from_taxonomy(taxonomy), longest_lineages
def get_results(cleaned_df, threshold, index):
cleaned_df = cleaned_df.reset_index()
xy = cleaned_df.to_numpy()
q = QTree(threshold, xy)
bins = q.subdivide(threshold)
tree, samples = create_tree_df(bins, index)
return tree, samples
def quadtree(metadata: qiime2.Metadata,
y_coord: str,
x_coord: str,
threshold: int) -> (skbio.TreeNode, pd.DataFrame):
metadata = metadata.to_dataframe()
index = metadata.index.name
cleaned_df = clean(metadata, y_coord, x_coord)
tree, samples = get_results(cleaned_df, threshold, index)
return tree, samples
| StarcoderdataPython |
4835255 | <reponame>chrisseto/pyjwe
class PyJWEException(Exception):
pass
class MalformedData(PyJWEException):
pass
class MalformedHeader(MalformedData):
pass
class UnsupportedOption(PyJWEException):
pass
class UnsupportedAlgorithm(UnsupportedOption):
pass
class UnsupportedEncryption(UnsupportedOption):
pass
| StarcoderdataPython |
3242851 | class Tarea:
def __init__(self, args = None, resultados = None):
if args is None:
args = {}
if resultados is None:
resultados = {}
self.args = args
self.resultados = resultados
| StarcoderdataPython |
1790881 | import pytest
from tape import Tape
def test_get_content_of_non_empty_tape():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
assert tape.get_content() == 'a'
def test_get_content_of_empty_tape():
tape = Tape('B', ['a', 'b', 'X', 'B'], [])
assert tape.get_content() == 'B'
def test_get_content_of_non_empty_tape_at_start_with_head_moved_to_left():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_left()
assert tape.get_content() == 'B'
assert tape.position == 0
def test_get_content_of_non_empty_tape_with_head_moved_to_right_left():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_right()
tape.move_left()
assert tape.get_content() == 'a'
def test_get_content_of_non_empty_tape_with_head_moved_to_right():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_right()
assert tape.get_content() == 'b'
def test_get_content_of_non_empty_tape_at_end_with_head_moved_to_right():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_right()
tape.move_right()
assert tape.get_content() == 'B'
def test_move_head_left():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_head('L')
assert tape.get_content() == 'B'
def test_move_head_right():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_head('R')
assert tape.get_content() == 'b'
def test_move_head_stay():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_head('S')
assert tape.get_content() == 'a'
def test_move_head_right_left():
tape = Tape('B', ['a', 'b', 'X', 'B'], ['a', 'b'])
tape.move_head('R')
tape.move_head('L')
assert tape.get_content() == 'a'
def test_set_content_empty_tape():
tape = Tape('B', ['a', 'b', 'X', 'B'], [])
tape.set_content('a')
assert tape.get_content() == 'a'
def test_set_content_empty_tape_left_left_right():
tape = Tape('B', ['a', 'b', 'X', 'B'], [])
tape.move_left()
tape.move_left()
tape.move_right()
tape.set_content('a')
assert tape.get_content() == 'a'
assert tape.position == 1
def test_set_string_empty_tape_left_left_right_a():
tape = Tape('B', ['a', 'b', 'X', 'B'], [])
tape.move_left()
tape.move_left()
tape.move_right()
tape.set_content('a')
assert "(['B', 'a'])@1" == str(tape)
| StarcoderdataPython |
4834191 | <gh_stars>0
try:
from typing import ClassVar
from tkinter import Toplevel, ttk
from time import sleep
except ImportError as err:
exit(err)
class SongMenu(Toplevel):
def __init__(self: ClassVar, parent: ClassVar) -> None:
super().__init__(parent)
# expose variables to this class
self.parent: ClassVar = parent
self.playlists: dict = parent.settings['playlists']
self.icons: dict = parent.icons
self.playlist_menu: ClassVar = parent.menu_playlist
# variables
self.playlist_panels: dict = {}
self.animation: ClassVar = None
self.song: str = ''
self.disabled_playlists: list = []
# hide window
self.withdraw()
# configure window
self.overrideredirect(True)
self.protocol('WM_DELETE_WINDOW', self.hide())
self.bind('<FocusOut>', lambda _:self.hide())
self.configure(background=parent['background'])
self.init_ui()
def init_ui(self: ClassVar) -> None:
# add playlist and append song
ttk.Button(self, image=self.icons['plus'], text='Add playlist', compound='left', command=self.add_playlist).pack(side='top', fill='x', padx=5, pady=(5, 5))
# playlist buttons
self.playlist_panel: ClassVar = ttk.Frame(self)
for playlist in self.playlists:
if playlist != 'Favorites':
self.playlist_panels[playlist]: ClassVar = ttk.Button(self.playlist_panel, image=self.icons['playlist'], text=self.playlists[playlist]['Name'], compound='left', command=lambda: self.add_to_playlist(playlist))
self.playlist_panels[playlist].pack(side='top', fill='x', padx=5, pady=(0, 5))
self.playlist_panel.pack(side='top', fill='both')
# delete button
self.delete_button: ClassVar = ttk.Button(self, image=self.icons['delete'], text='Remove', compound='left', command=self.remove_from_playlist)
self.delete_button.pack(side='top', fill='x', padx=5, pady=(5, 5))
def show(self: ClassVar, song: str) -> None:
self.song = song
self.update_options()
self.set_position()
if not self.animation:
self.deiconify()
self.animation = self.after(0, self.animate)
self.focus_set()
def hide(self: ClassVar) -> None:
self.withdraw()
def update_options(self: ClassVar) -> None:
selected_playlist: str = self.playlist_menu.get()
# block playlists
for playlist in self.playlists:
if playlist != 'Favorites':
if self.song in self.playlists[playlist]['Songs']:
self.playlist_panels[playlist].state(['disabled'])
self.disabled_playlists.append(playlist)
elif playlist in self.disabled_playlists:
self.playlist_panels[playlist].state(['!disabled'])
self.disabled_playlists.remove(playlist)
# block buttons
if selected_playlist == 'Favorites':
self.delete_button.state(['disabled'])
else:
self.delete_button.state(['!disabled'])
def set_position(self: ClassVar) -> None:
# get mouse position
mouse_pos: tuple = self.parent.winfo_pointerxy()
# get window dimensions
dimensions: tuple = (self.winfo_width(), self.winfo_height())
# get button class
button: ClassVar = self.parent.winfo_containing(mouse_pos[0], mouse_pos[1])
if button:
button_position: tuple = (button.winfo_rootx(), button.winfo_rooty())
if button_position[0] >= self.winfo_screenwidth() - dimensions[0] - 45:
self.geometry(f'+{button_position[0] - dimensions[0] - 10}+{button_position[1] - 6}')
else:
self.geometry(f'+{button_position[0] + 45}+{button_position[1] - 6}')
else:
self.geometry(f'+{mouse_pos[0]}+{mouse_pos[1]}')
def animate(self: ClassVar) -> None:
# get window dimensions
dimensions: tuple = (self.winfo_width(), self.winfo_height())
num_of_panels: int = len(self.playlists) + 1
speed: int = 5
for step in range(int(dimensions[1] / num_of_panels / speed)):
sleep(.0001)
self.geometry(f'{dimensions[0]}x{step * num_of_panels * speed}')
self.update()
# reset geometry after animation
self.geometry('')
# ready
self.animation = None
def append(self: ClassVar, playlist: str) -> None:
self.playlist_panels[playlist]: ClassVar = ttk.Button(self.playlist_panel, image=self.icons['playlist'], text=self.playlists[playlist]['Name'], compound='left', command=lambda: self.add_to_playlist(playlist))
self.playlist_panels[playlist].pack(side='top', fill='x', padx=5, pady=(0, 5))
def remove(self: ClassVar, playlist: str) -> None:
if playlist in self.playlist_panels:
self.playlist_panels[playlist].destroy()
del self.playlist_panels[playlist]
if playlist in self.disabled_playlists:
self.disabled_playlists.remove(playlist)
def rename(self: ClassVar, playlist: str, name: str) -> None:
self.playlist_panels[playlist]['text'] = name
def remove_from_playlist(self: ClassVar) -> None:
selected_playlist: str = self.playlist_menu.get()
if selected_playlist == 'Library':
self.parent.remove_song(self.song)
elif selected_playlist != 'Favorites' and selected_playlist in self.playlists and self.song in self.playlists[selected_playlist]['Songs']:
self.playlists[selected_playlist]['Songs'].remove(self.song)
if self.song in self.parent.songs:
self.parent.songs.remove(self.song)
if self.song in self.parent.song_panels:
self.parent.song_panels[self.song].pack_forget()
if not self.playlists[selected_playlist]['Songs']:
self.parent.search_panel.pack(side='top', fill='x', pady=5, padx=10)
self.hide()
def add_to_playlist(self: ClassVar, playlist: str) -> None:
if playlist in self.playlists and self.song in self.parent.library and not self.song in self.playlists[playlist]['Songs']:
self.playlists[playlist]['Songs'].append(self.song)
self.hide()
def add_playlist(self: ClassVar) -> None:
self.parent.add_playlist()
self.add_to_playlist(list(self.playlists.keys())[-1])
| StarcoderdataPython |
30824 | <filename>nerddiary/user/user.py
""" User model """
from __future__ import annotations
from datetime import tzinfo
import pytz
from pydantic import BaseModel, PrivateAttr, validator
from pydantic.fields import Field
from ..poll.poll import Poll
from ..primitive.timezone import TimeZone
from ..report.report import Report
from typing import Dict, List, Optional
class User(BaseModel):
id: str = Field(description="This user id", regex=r"^\w{1,64}$")
username: str | None = Field(default=None, description="Optional user name")
lang_code: str = Field(
default="en", min_length=2, max_length=2, description="User preferred language (2 letter code)"
)
timezone: TimeZone = pytz.timezone("US/Eastern") # type: ignore
polls: Optional[List[Poll]] = Field(min_items=1)
reports: Optional[List[Report]] = Field(min_items=1)
_polls_dict: Dict[str, Poll] = PrivateAttr(default={})
""" Dictionary of polls for workflow convinience
"""
class Config:
title = "User Configuration"
extra = "forbid"
json_encoders = {tzinfo: lambda t: str(t)}
def __init__(self, **data) -> None:
super().__init__(**data)
# convert_reminder_times_to_local_if_set
if self.polls:
# Create help mappings for workflow processing
self._polls_dict = {}
for poll in self.polls:
self._polls_dict |= {poll.poll_name: poll}
if poll.reminder_time:
poll.reminder_time = poll.reminder_time.replace(tzinfo=self.timezone)
@validator("polls")
def poll_names_must_be_unique(cls, v: List[Poll]):
if v:
poll_names = [p.poll_name for p in v]
poll_names_set = set(poll_names)
if len(poll_names_set) != len(poll_names):
raise ValueError("Poll names must be unique")
return v
@validator("polls")
def poll_commands_must_be_unique(cls, v: List[Poll]):
if v:
poll_commands = [p.command for p in v if p.command is not None]
poll_commands_set = set(poll_commands)
if len(poll_commands_set) != len(poll_commands):
raise ValueError("Poll commands must be unique")
return v
| StarcoderdataPython |
3282473 | __author__ = 'chris'
"""
Package for two way communication between nodes.
Primarily used for buyer-vendor communication.
"""
| StarcoderdataPython |
3370526 | <gh_stars>1-10
"""Fetch official PATCO feed."""
import logging
import requests
from bs4 import BeautifulSoup
from FeedSource import FeedSource
DEVPAGE_URL = 'http://www.ridepatco.org/developers/'
FILE_NAME = 'PortAuthorityTransitCorporation.zip'
LOG = logging.getLogger(__name__)
class Patco(FeedSource):
"""Fetch official PATCO feed."""
def __init__(self):
super(Patco, self).__init__()
url = self.find_download_url()
if url:
self.urls = {'patco.zip': url}
else:
LOG.error('Could not scrape PATCO GTFS download URL from developer page')
self.urls = {}
def find_download_url(self):
"""Helper to scrape developer's page for the download URL, which changes"""
devpage = requests.get(DEVPAGE_URL)
soup = BeautifulSoup(devpage.text, 'html.parser')
rt = soup.find(id='rightcolumn')
anchors = rt.findAll('a')
for anchor in anchors:
href = anchor.attrs['href']
if href.endswith('.zip'):
return href
# if got this far, no GTFS download link found
return None
| StarcoderdataPython |
3233630 | <reponame>zjjott/html
def ensure_utf8(s):
"""
unicode to ascii
u'\u554a'->'\xe5\x95\x8a'
"""
if not isinstance(s, basestring):
return str(s)
if isinstance(s, unicode):
return s.encode("u8")
return s
def ensure_unicode(s):
"""
ascii to unicode
'\xe5\x95\x8a'->u'\u554a'
"""
if not isinstance(s, basestring):
return unicode(s)
if isinstance(s, str):
return s.decode("u8")
return s
| StarcoderdataPython |
157735 | #!/usr/bin/env python
import os
import shutil
import sys
src, dst = sys.argv[1:]
if os.path.exists(dst):
if os.path.isdir(dst):
shutil.rmtree(dst)
else:
os.remove(dst)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copy2(src, dst)
| StarcoderdataPython |
3268983 | import os
from setuptools import setup, find_packages
def read(file_name):
return open(os.path.join(os.path.dirname(__file__), file_name)).read()
setup(
name="fate-of-dice",
version_config=True,
setup_requires=['setuptools-git-versioning'],
author="<NAME>",
author_email="<EMAIL>",
description="Discord dice bot",
long_description=read('README.md'),
long_description_content_type="text/markdown",
url="https://github.com/bonczeq/FateOfDice",
packages=find_packages(where='src'),
package_dir={'': 'src'},
package_data={'': ['resources/icons/*.png']},
classifiers=[
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License"
],
python_requires='>=3.8',
entry_points={
'console_scripts': ['fate_of_dice = fate_of_dice.__main__:main'],
}
)
| StarcoderdataPython |
179425 | <reponame>zevaverbach/epcon
"""
This module takes care of a lot of user related things.
It's a good place to put validators and user management functions/classes
"""
import os
RANDOM_USERNAME_LENGTH = 10
def generate_random_username():
"""Returns random username of length set by RANDOM_USERNAME_LENGTH"""
return os.urandom(100).hex()[:RANDOM_USERNAME_LENGTH]
| StarcoderdataPython |
1680622 | # Generated by Django 3.2.4 on 2021-07-27 13:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('property', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='property',
options={'verbose_name': 'Property', 'verbose_name_plural': 'Properties'},
),
]
| StarcoderdataPython |
65363 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import random
import socket
import mock
from neutron_lib import constants
from neutron_lib.tests import _base as base
from neutron_lib.utils import net
class TestGetHostname(base.BaseTestCase):
@mock.patch.object(socket, 'gethostname',
return_value='fake-host-name')
def test_get_hostname(self, mock_gethostname):
self.assertEqual('fake-host-name',
net.get_hostname())
mock_gethostname.assert_called_once_with()
class TestGetRandomMac(base.BaseTestCase):
@mock.patch.object(random, 'getrandbits', return_value=0xa2)
def test_first_4_octets_unchanged(self, mock_rnd):
mac = net.get_random_mac(['aa', 'bb', '00', 'dd', 'ee', 'ff'])
self.assertEqual('aa:bb:00:dd:a2:a2', mac)
mock_rnd.assert_called_with(8)
@mock.patch.object(random, 'getrandbits', return_value=0xa2)
def test_first_4th_octet_generated(self, mock_rnd):
mac = net.get_random_mac(['aa', 'bb', 'cc', '00', 'ee', 'ff'])
self.assertEqual('aa:bb:cc:a2:a2:a2', mac)
mock_rnd.assert_called_with(8)
class TestRandomMacGenerator(base.BaseTestCase):
def test_all_macs_generated(self):
mac = ['aa', 'bb', 'cc', 'dd', 'ee', 'ff']
generator = itertools.islice(net.random_mac_generator(mac), 70000)
self.assertEqual(2**16, len(list(generator)))
@mock.patch.object(random, 'getrandbits', return_value=0xa2)
def test_first_generated_mac(self, mock_rnd):
mac = ['aa', 'bb', 'cc', '00', 'ee', 'ff']
generator = itertools.islice(net.random_mac_generator(mac), 1)
self.assertEqual(['aa:bb:cc:a2:a2:a2'], list(generator))
mock_rnd.assert_called_with(8)
@mock.patch.object(random, 'getrandbits', return_value=0xa2)
def test_respected_early_zeroes_generated_mac(self, mock_rnd):
mac1 = ['00', 'bb', 'cc', '00', 'ee', 'ff']
generator = itertools.islice(net.random_mac_generator(mac1), 1)
self.assertEqual(['00:bb:cc:a2:a2:a2'], list(generator))
mac2 = ['aa', '00', 'cc', '00', 'ee', 'ff']
generator = itertools.islice(net.random_mac_generator(mac2), 1)
self.assertEqual(['aa:00:cc:a2:a2:a2'], list(generator))
mac3 = ['aa', 'bb', '00', '00', 'ee', 'ff']
generator = itertools.islice(net.random_mac_generator(mac3), 1)
self.assertEqual(['aa:bb:00:a2:a2:a2'], list(generator))
mock_rnd.assert_called_with(8)
@mock.patch.object(random, 'getrandbits', return_value=0xa2)
def test_short_supplied_mac(self, mock_rnd):
mac_base = '12:34:56:78'
mac = mac_base.split(':')
generator = itertools.islice(net.random_mac_generator(mac), 1)
self.assertEqual(['12:34:56:78:a2:a2'], list(generator))
mock_rnd.assert_called_with(8)
class TestPortDeviceOwner(base.BaseTestCase):
def test_is_port_trusted(self):
self.assertTrue(net.is_port_trusted(
{'device_owner':
constants.DEVICE_OWNER_NETWORK_PREFIX + 'dev'}))
def test_is_port_not_trusted(self):
self.assertFalse(net.is_port_trusted(
{'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX + 'dev'}))
| StarcoderdataPython |
1709915 | <reponame>ktncktnc/SpaceNet_Off_Nadir_Solutions
import argparse
import os
import numpy as np
import pandas as pd
from numpy.random.mtrand import RandomState
from sklearn.model_selection import KFold
def get_id(f):
return "_".join(f.rstrip(".tif").split("_")[-2:])
def get_nadir(f):
return int(f.split("_")[2][5:])
if __name__ == '__main__':
parser = argparse.ArgumentParser("PyTorch Segmentation Pipeline")
arg = parser.add_argument
arg('--data-path', type=str, default='/media/selim/sota/datasets/spacenet/SpaceNet-Off-Nadir_Train', help='Path to dataset folder')
arg('--folds', type=int, default=16, help='Num folds')
arg('--seed', type=int, default=777, help='Seed')
args = parser.parse_args()
train_dir = args.data_path
nadir_ranges = [range(0, 11), range(11, 21), range(21, 31), range(31, 41), range(41, 55)]
dirs = sorted([d for d in os.listdir(train_dir) if d.startswith("Atlanta")])
folds = []
ids = None
data = {}
np.random.seed(args.seed)
for d in dirs:
if os.path.isdir(os.path.join(train_dir, d)):
dir = os.path.join(train_dir, d, "Pan-Sharpen")
files = sorted([f for f in os.listdir(dir) if f.endswith("tif")])
if ids is None:
ids = list(set([get_id(f) for f in files]))
kfold = KFold(n_splits=args.folds, shuffle=True, random_state=RandomState(args.seed))
for fold, splits in enumerate(kfold.split(files)):
for idx in splits[1]:
data[ids[idx]] = fold
for f in files:
id = get_id(f)
fold = data[id]
folds.append([f, fold])
frame = pd.DataFrame(folds, columns=["id", "fold"])
print(frame.groupby('fold').count())
frame.to_csv("folds_angles.csv", index=False)
| StarcoderdataPython |
3384591 | # DO NOT ERASE
from tethys_datasets.utilities import get_dataset_engine, get_spatial_dataset_engine
from tethys_wps.utilities import get_wps_service_engine, list_wps_service_engines | StarcoderdataPython |
1722282 | """Third party Python libraries.
Third party libraries are placed here when they are not available on the platform by normal means.
FlashAirMusic is a service and not a library. It is intended to be installed through the platform's packaging system
(e.g. RPM files). Because of this some dependencies of FlashAirMusic may not be available in all platforms.
This directory mitigates the problem by allowing packaging scripts to place third party libraries here. This directory
is added to `sys.path` which allows Python to find those libraries here since they won't be installed on the system.
"""
| StarcoderdataPython |
1605728 | <reponame>trowa88/commstr
from rest_framework import serializers
from rest_framework.exceptions import PermissionDenied
from rest_framework.relations import StringRelatedField
from building.serializers import BuildingSerializer
from building_post.models import BuildingPost, BuildingPostHistory
class BuildingPostSerializer(serializers.ModelSerializer):
creator = StringRelatedField()
class Meta:
model = BuildingPost
fields = (
'building',
'creator',
'title',
'content',
)
def update(self, instance, validated_data):
if self.context['request'].user != instance.creator:
raise PermissionDenied()
new_instance = super(BuildingPostSerializer, self).update(instance, validated_data)
BuildingPostHistory.objects.create(
building_post=instance,
building=instance.building,
creator=instance.creator,
title=instance.title,
content=instance.content
)
return new_instance
class BuildingPostReadSerializer(BuildingPostSerializer):
building = BuildingSerializer(many=False)
class Meta(BuildingPostSerializer.Meta):
fields = (
'id',
'building',
'creator',
'title',
'content',
'is_enabled',
'created',
'updated',
)
| StarcoderdataPython |
3367224 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""Handling datasets.
For the moment, is initialized with a torch Tensor of size (n_cells, nb_genes)"""
import copy
import os
import urllib.request
from collections import defaultdict
import numpy as np
import scipy.sparse as sp_sparse
import torch
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset
def rank(x):
uniq = np.unique(x)
lookup = dict(zip(uniq, np.arange(len(uniq))))
x = np.asarray([lookup[i] for i in x])
return (x)
class GeneExpressionDataset(Dataset):
"""Gene Expression dataset. It deals with:
- log_variational expression -> torch.log(1 + X)
- local library size normalization (mean, var) per batch
"""
def __init__(self, X, local_means, local_vars, batch_indices, labels,
gene_names=None, cell_types=None, x_coord=None, y_coord=None):
# Args:
# Xs: a list of numpy tensors with .shape[1] identical (total_size*nb_genes)
# or a list of scipy CSR sparse matrix,
# or transposed CSC sparse matrix (the argument sparse must then be set to true)
self.dense = type(X) is np.ndarray
self._X = np.ascontiguousarray(X, dtype=np.float32) if self.dense else X
self.nb_genes = self.X.shape[1]
self.local_means = local_means
self.local_vars = local_vars
self.batch_indices, self.n_batches = arrange_categories(batch_indices)
self.labels, self.n_labels = arrange_categories(labels)
self.x_coord, self.y_coord = x_coord, y_coord
if gene_names is not None:
assert self.nb_genes == len(gene_names)
self.gene_names = np.array(gene_names, dtype=np.str)
if cell_types is not None:
assert self.n_labels == len(cell_types)
self.cell_types = np.array(cell_types, dtype=np.str)
@property
def X(self):
return self._X
@X.setter
def X(self, X):
self._X = X
self.library_size_batch()
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
return idx
def download_and_preprocess(self):
self.download()
return self.preprocess()
def collate_fn(self, batch):
indexes = np.array(batch)
X = self.X[indexes]
return self.collate_fn_end(X, indexes)
def collate_fn_corrupted(self, batch):
'''On the fly corruption is slow, but might be optimized in pytorch. Numpy code left here.'''
indexes = np.array(batch)
# i, j, corrupted = [], [], []
# for k, i_idx in enumerate(indexes):
# j += [self.corrupted[i_idx]['j']]
# corrupted += [self.corrupted[i_idx]['corrupted']]
# i += [np.ones_like(j[-1]) * k]
# i, j, corrupted = np.concatenate(i), np.concatenate(j), np.concatenate(corrupted)
# X = self.X[indexes]
# X[i, j] = corrupted
X = self.corrupted_X[indexes]
return self.collate_fn_end(X, indexes)
def corrupt(self, rate=0.1, corruption="uniform"):
'''On the fly corruption is slow, but might be optimized in pytorch. Numpy code left here.'''
self.corrupted_X = copy.deepcopy(self.X)
self.corrupted = defaultdict(lambda: {'j': [], 'corrupted': []})
if corruption == "uniform": # multiply the entry n with a Ber(0.9) random variable.
i, j = np.nonzero(self.X)
ix = np.random.choice(range(len(i)), int(np.floor(rate * len(i))), replace=False)
i, j = i[ix], j[ix]
corrupted = self.X[i, j] * np.random.binomial(n=np.ones(len(ix), dtype=np.int32), p=0.9) # maybe rate
elif corruption == "binomial": # multiply the entry n with a Bin(n, 0.9) random variable.
i, j = (k.ravel() for k in np.indices(self.X.shape))
ix = np.random.choice(range(len(i)), int(np.floor(rate * len(i))), replace=False)
i, j = i[ix], j[ix]
corrupted = np.random.binomial(n=(self.X[i, j]).astype(np.int32), p=0.2)
self.corrupted_X[i, j] = corrupted
# for idx_i, idx_j, corrupted in zip(i, j, corrupted):
# self.corrupted[idx_i]['j'] += [idx_j]
# self.corrupted[idx_i]['corrupted'] += [corrupted]
# for k, v in self.corrupted.items():
# v['j'] = np.array(v['j'])
# v['corrupted'] = np.array(v['corrupted'])
def collate_fn_end(self, X, indexes):
if self.dense:
X = torch.from_numpy(X)
else:
X = torch.FloatTensor(X.toarray())
if self.x_coord is None or self.y_coord is None:
return X, torch.FloatTensor(self.local_means[indexes]), \
torch.FloatTensor(self.local_vars[indexes]), \
torch.LongTensor(self.batch_indices[indexes]), \
torch.LongTensor(self.labels[indexes])
else:
return X, torch.FloatTensor(self.local_means[indexes]), \
torch.FloatTensor(self.local_vars[indexes]), \
torch.LongTensor(self.batch_indices[indexes]), \
torch.LongTensor(self.labels[indexes]), \
torch.FloatTensor(self.x_coord[indexes]), \
torch.FloatTensor(self.y_coord[indexes])
def update_genes(self, subset_genes):
new_n_genes = len(subset_genes) if subset_genes.dtype is not np.dtype('bool') else subset_genes.sum()
print("Downsampling from %i to %i genes" % (self.nb_genes, new_n_genes))
if hasattr(self, 'gene_names'):
self.gene_names = self.gene_names[subset_genes]
if hasattr(self, 'gene_symbols'):
self.gene_symbols = self.gene_symbols[subset_genes]
self.nb_genes = self.X.shape[1]
self.update_cells(np.array(self.X.sum(axis=1) > 0).ravel())
def update_cells(self, subset_cells):
new_n_cells = len(subset_cells) if subset_cells.dtype is not np.dtype('bool') else subset_cells.sum()
print("Downsampling from %i to %i cells" % (len(self), new_n_cells))
for attr_name in ['_X', 'labels', 'batch_indices', 'local_means', 'local_vars']:
setattr(self, attr_name, getattr(self, attr_name)[subset_cells])
self.library_size_batch()
if hasattr(self,'cell_types') and len(self.cell_types)>1:
self.cell_types = self.cell_types[np.unique(self.labels.ravel())]
self.labels = rank(self.labels.ravel()).reshape(len(self.labels),1)
self.n_labels = len(self.cell_types)
self.n_batches = len(np.unique(self.batch_indices.ravel()))
def subsample_genes(self, new_n_genes=None, subset_genes=None):
nonzero = (np.asarray(np.mean(self.X,axis=0)).ravel()>0)
self.X = self.X[:, nonzero]
self.update_genes(nonzero)
n_cells, n_genes = self.X.shape
n_batches = self.n_batches
if subset_genes is not None:
new_n_genes = len(subset_genes) if subset_genes.dtype is not np.dtype('bool') else subset_genes.sum()
print("Downsampling from %i to %i genes" % (n_genes, new_n_genes))
if subset_genes is None and \
(new_n_genes is False or new_n_genes >= n_genes):
return None # Do nothing if subsample more genes than total number of genes
elif subset_genes is None:
if n_batches==1:
print("Downsampling from %i to %i genes" % (n_genes, new_n_genes))
std_scaler = StandardScaler(with_mean=False)
std_scaler.fit(self.X.astype(np.float64))
self.X = self.X[:, std_scaler.mean_>0]
self.update_genes(std_scaler.mean_>0)
std_scaler.fit(self.X.astype(np.float64))
subset_genes = np.argsort(std_scaler.var_ / std_scaler.mean_) [::-1][:new_n_genes]
else:
subsets = []
# self.X = self.X.tocsr()
for i in np.arange(n_batches):
std_scaler = StandardScaler(with_mean=False)
std_scaler.fit(self.X.astype(np.float64)[self.batch_indices.ravel()==i,:])
subset_genes = np.argsort(std_scaler.var_ / std_scaler.mean_)[::-1][:new_n_genes]
subsets.append(set(subset_genes))
subset_genes = set.union(*subsets)
subset_genes = np.asarray(list(subset_genes))
new_n_genes = len(subset_genes)
print("Downsampling from %i to %i genes" % (n_genes, new_n_genes))
self.X = self.X[:, subset_genes]
self.update_genes(subset_genes)
def filter_genes(self, gene_names_ref, on='gene_names'):
"""
Same as _filter_genes but overwrites on current dataset instead of returning data,
and updates genes names and symbols
"""
self.X, subset_genes = GeneExpressionDataset._filter_genes(self, gene_names_ref, on=on)
self.update_genes(subset_genes)
def subsample_cells(self, size=1.):
n_cells, n_genes = self.X.shape
new_n_cells = int(size * n_genes) if type(size) is not int else size
indices = np.argsort(np.array(self.X.sum(axis=1)).ravel())[::-1][:new_n_cells]
self.update_cells(indices)
def _cell_type_idx(self, cell_types):
if type(cell_types[0]) is not int:
cell_types_idx = [np.where(cell_type == self.cell_types)[0][0] for cell_type in cell_types]
else:
cell_types_idx = cell_types
return np.array(cell_types_idx, dtype=np.int64)
def _gene_idx(self, genes):
if type(genes[0]) is not int:
genes_idx = [np.where(gene == self.gene_names)[0][0] for gene in genes]
else:
genes_idx = genes
return np.array(genes_idx, dtype=np.int64)
def filter_cell_types(self, cell_types):
"""
:param cell_types: numpy array of type np.int (indices) or np.str (cell-types names)
:return:
"""
cell_types_idx = self._cell_type_idx(cell_types)
# if hasattr(self, 'cell_types'):
# self.cell_types = self.cell_types[cell_types_idx]
# print("Only keeping cell types: \n" + '\n'.join(list(self.cell_types)))
idx_to_keep = []
for idx in cell_types_idx:
idx_to_keep += [np.where(self.labels == idx)[0]]
self.update_cells(np.concatenate(idx_to_keep))
# self.labels, self.n_labels = arrange_categories(self.labels, mapping_from=cell_types_idx)
def merge_cell_types(self, cell_types, new_cell_type_name):
"""
Merge some cell types into a new one, a change the labels accordingly.
:param merge_cell_types: numpy array of type np.int (indices) or np.str (cell-types names)
:return:
"""
cell_types_idx = self._cell_type_idx(cell_types)
for idx_from in zip(cell_types_idx):
self.labels[self.labels == idx_from] = len(self.labels) # Put at the end the new merged cell-type
self.labels, self.n_labels = arrange_categories(self.labels)
if hasattr(self, 'cell_types') and type(cell_types[0]) is not int:
new_cell_types = list(self.cell_types)
for cell_type in cell_types:
new_cell_types.remove(cell_type)
new_cell_types.append(new_cell_type_name)
self.cell_types = np.array(new_cell_types)
def map_cell_types(self, cell_types_dict):
"""
A map for the cell types to keep, and optionally merge together under a new name (value in the dict)
:param cell_types_dict: a dictionary with tuples (str or int) as input and value (str or int) as output
"""
keys = [(key,) if type(key) is not tuple else key for key in cell_types_dict.keys()]
cell_types = [cell_type for cell_types in keys for cell_type in cell_types]
self.filter_cell_types(cell_types)
for cell_types, new_cell_type_name in cell_types_dict.items():
self.merge_cell_types(cell_types, new_cell_type_name)
def download(self):
if hasattr(self, 'urls') and hasattr(self, 'download_names'):
for url, download_name in zip(self.urls, self.download_names):
GeneExpressionDataset._download(url, self.save_path, download_name)
elif hasattr(self, 'url') and hasattr(self, 'download_name'):
GeneExpressionDataset._download(self.url, self.save_path, self.download_name)
@staticmethod
def _download(url, save_path, download_name):
if os.path.exists(save_path + download_name):
print("File %s already downloaded" % (save_path + download_name))
return
r = urllib.request.urlopen(url)
print("Downloading file at %s" % save_path + download_name)
def readIter(f, blocksize=1000):
"""Given a file 'f', returns an iterator that returns bytes of
size 'blocksize' from the file, using read()."""
while True:
data = f.read(blocksize)
if not data:
break
yield data
# Create the path to save the data
if not os.path.exists(save_path):
os.makedirs(save_path)
with open(save_path + download_name, 'wb') as f:
for data in readIter(r):
f.write(data)
def library_size_batch(self):
for i_batch in range(self.n_batches):
idx_batch = (self.batch_indices == i_batch).ravel()
self.local_means[idx_batch], self.local_vars[idx_batch] = self.library_size(self.X[idx_batch])
@staticmethod
def library_size(X):
log_counts = np.log(X.sum(axis=1))
local_mean = (np.mean(log_counts) * np.ones((X.shape[0], 1))).astype(np.float32)
local_var = (np.var(log_counts) * np.ones((X.shape[0], 1))).astype(np.float32)
return local_mean, local_var
@staticmethod
def get_attributes_from_matrix(X, batch_indices=0, labels=None):
to_keep = np.array((X.sum(axis=1) > 0)).ravel()
X = X[to_keep]
local_mean, local_var = GeneExpressionDataset.library_size(X)
batch_indices = batch_indices * np.ones((X.shape[0], 1)) if type(batch_indices) is int \
else batch_indices[to_keep]
labels = labels[to_keep].reshape(-1, 1) if labels is not None else np.zeros_like(batch_indices)
return X, local_mean, local_var, batch_indices, labels
@staticmethod
def get_attributes_from_list(Xs, list_batches=None, list_labels=None):
nb_genes = Xs[0].shape[1]
assert all(X.shape[1] == nb_genes for X in Xs), "All tensors must have same size"
new_Xs = []
local_means = []
local_vars = []
batch_indices = []
labels = []
for i, X in enumerate(Xs):
new_Xs += [X]
local_mean, local_var = GeneExpressionDataset.library_size(X)
local_means += [local_mean]
local_vars += [local_var]
batch_indices += [list_batches[i] if list_batches is not None else i * np.ones((X.shape[0], 1))]
labels += [list_labels[i] if list_labels is not None else np.zeros((X.shape[0], 1))]
X = np.concatenate(new_Xs) if type(new_Xs[0]) is np.ndarray else sp_sparse.vstack(new_Xs)
batch_indices = np.concatenate(batch_indices)
local_means = np.concatenate(local_means)
local_vars = np.concatenate(local_vars)
labels = np.concatenate(labels)
return X, local_means, local_vars, batch_indices, labels
@staticmethod
def concat_datasets(*gene_datasets, on='gene_names', shared_labels=True, shared_batches=False):
"""
Combines multiple unlabelled gene_datasets based on the intersection of gene names intersection.
Datasets should all have gene_dataset.n_labels=0.
Batch indices are generated in the same order as datasets are given.
:param gene_datasets: a sequence of gene_datasets object
:return: a GeneExpressionDataset instance of the concatenated datasets
"""
assert all([hasattr(gene_dataset, on) for gene_dataset in gene_datasets])
gene_names_ref = set.intersection(*[set(getattr(gene_dataset, on)) for gene_dataset in gene_datasets])
# keep gene order of the first dataset
gene_names_ref = [gene_name for gene_name in getattr(gene_datasets[0], on) if gene_name in gene_names_ref]
print("Keeping %d genes" % len(gene_names_ref))
Xs = [GeneExpressionDataset._filter_genes(dataset, gene_names_ref, on=on)[0] for dataset in gene_datasets]
if gene_datasets[0].dense:
X = np.concatenate([X if type(X) is np.ndarray else X.A for X in Xs])
else:
X = sp_sparse.vstack([X if type(X) is not np.ndarray else sp_sparse.csr_matrix(X) for X in Xs])
batch_indices = np.zeros((X.shape[0], 1))
n_batch_offset = 0
current_index = 0
for gene_dataset in gene_datasets:
next_index = current_index + len(gene_dataset)
batch_indices[current_index:next_index] = gene_dataset.batch_indices + n_batch_offset
n_batch_offset += (gene_dataset.n_batches if not shared_batches else 0)
current_index = next_index
cell_types = None
if shared_labels:
if all([hasattr(gene_dataset, "cell_types") for gene_dataset in gene_datasets]):
cell_types = list(
set([cell_type for gene_dataset in gene_datasets for cell_type in gene_dataset.cell_types])
)
labels = []
for gene_dataset in gene_datasets:
mapping = [cell_types.index(cell_type) for cell_type in gene_dataset.cell_types]
labels += [arrange_categories(gene_dataset.labels, mapping_to=mapping)[0]]
labels = np.concatenate(labels)
else:
labels = np.concatenate([gene_dataset.labels for gene_dataset in gene_datasets])
else:
labels = np.zeros((X.shape[0], 1))
n_labels_offset = 0
current_index = 0
for gene_dataset in gene_datasets:
next_index = current_index + len(gene_dataset)
labels[current_index:next_index] = gene_dataset.labels + n_labels_offset
n_labels_offset += gene_dataset.n_labels
current_index = next_index
local_means = np.concatenate([gene_dataset.local_means for gene_dataset in gene_datasets])
local_vars = np.concatenate([gene_dataset.local_vars for gene_dataset in gene_datasets])
result = GeneExpressionDataset(X, local_means, local_vars, batch_indices, labels,
gene_names=gene_names_ref, cell_types=cell_types)
result.barcodes = [gene_dataset.barcodes if hasattr(gene_dataset, 'barcodes') else None
for gene_dataset in gene_datasets]
return result
@staticmethod
def _filter_genes(gene_dataset, gene_names_ref, on='gene_names'):
"""
:return: gene_dataset.X filtered by the corresponding genes ( / columns / features), idx_genes
"""
gene_names = list(getattr(gene_dataset, on))
subset_genes = np.array([gene_names.index(gene_name) for gene_name in gene_names_ref], dtype=np.int64)
return gene_dataset.X[:, subset_genes], subset_genes
def arrange_categories(original_categories, mapping_from=None, mapping_to=None):
unique_categories = np.unique(original_categories)
n_categories = len(unique_categories)
if mapping_to is None:
mapping_to = range(n_categories)
if mapping_from is None:
mapping_from = unique_categories
assert n_categories <= len(mapping_from) # one cell_type can have no instance in dataset
assert len(mapping_to) == len(mapping_from)
new_categories = np.copy(original_categories)
for idx_from, idx_to in zip(mapping_from, mapping_to):
new_categories[original_categories == idx_from] = idx_to
return new_categories, n_categories
# todo the following two functions are only used for consistency in gene use when using both Seurat and scVI
def subsetByGenenames(dataset, subsetnames):
genenames = dataset.gene_names
filter = np.asarray([x in subsetnames for x in genenames])
dataset.X = dataset.X[:, filter]
dataset.update_genes(np.arange(len(filter))[filter])
return dataset
def SubsetGenes(dataset1,dataset2,gene_dataset,plotname,ngenes=1000):
import pandas as pd
genes1 = pd.read_csv('../harmonization/Seurat_data/' + plotname + '.1.hvg_info.csv')
geneid1 = np.asarray([x.replace('gene_', '') for x in genes1[genes1.keys()[0]]]).astype('int')
genenames1 = genes1['genename']
genes2 = pd.read_csv('../harmonization/Seurat_data/' + plotname + '.2.hvg_info.csv')
geneid2 = np.asarray([x.replace('gene_', '') for x in genes2[genes2.keys()[0]]]).astype('int')
genenames2 = genes2['genename']
assert np.sum(np.asarray(genenames1) == gene_dataset.gene_names) == len(gene_dataset.gene_names)
assert np.sum(np.asarray(genenames2) == gene_dataset.gene_names) == len(gene_dataset.gene_names)
geneid = np.union1d(geneid1[:ngenes], geneid2[:ngenes]) - 1
genes = gene_dataset.gene_names[geneid]
dataset1 = subsetByGenenames(dataset1,genes)
dataset2 = subsetByGenenames(dataset2,genes)
gene_dataset = subsetByGenenames(gene_dataset,genes)
return dataset1,dataset2,gene_dataset
| StarcoderdataPython |
1736819 | import django.forms as forms
from .models import Subject, Comment, Answer
class SubjectForm(forms.ModelForm):
class Meta:
model = Subject
fields = [
'title',
'subtitle',
'content'
]
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = [
'content'
]
class AnswerForm(forms.ModelForm):
class Meta:
model = Answer
fields = [
'content'
] | StarcoderdataPython |
55949 | from gstat_classroom.index import app
app.run_server(debug=True) | StarcoderdataPython |
3316472 | <filename>calm/dsl/builtins/models/ref.py
from .entity import EntityType, Entity
from .validator import PropertyValidator
from calm.dsl.store import Cache
# Ref
class RefType(EntityType):
__schema_name__ = "Ref"
__openapi_type__ = "app_ref"
class RefValidator(PropertyValidator, openapi_type="app_ref"):
__default__ = None
__kind__ = RefType
def _ref(**kwargs):
name = kwargs.get("name", None)
bases = (Entity,)
return RefType(name, bases, kwargs)
def ref(cls):
if isinstance(cls, RefType):
return cls
return cls.get_ref()
class Ref:
def __new__(cls, *args, **kwargs):
raise TypeError("'{}' is not callable".format(cls.__name__))
class Subnet:
def __new__(cls, name, **kwargs):
cluster = kwargs.get("cluster")
account_uuid = kwargs.get("account_uuid")
subnet_cache_data = Cache.get_entity_data(
entity_type="ahv_subnet",
name=name,
cluster=cluster,
account_uuid=account_uuid,
)
if not subnet_cache_data:
raise Exception(
"AHV Subnet {} not found. Please run: calm update cache".format(
name
)
)
return {"kind": "subnet", "name": name, "uuid": subnet_cache_data["uuid"]}
class User:
def __new__(cls, name, **kwargs):
directory = kwargs.get("directory") or ""
display_name = kwargs.get("display_name") or ""
user_cache_data = Cache.get_entity_data(
entity_type="user",
name=name,
directory=directory,
display_name=display_name,
)
if not user_cache_data:
raise Exception(
"User {} not found. Please run: calm update cache".format(name)
)
return {"kind": "user", "name": name, "uuid": user_cache_data["uuid"]}
class Group:
def __new__(cls, name, **kwargs):
directory = kwargs.get("directory") or ""
display_name = kwargs.get("display_name") or ""
user_group_cache_data = Cache.get_entity_data(
entity_type="user_group",
name=name,
directory=directory,
display_name=display_name,
)
if not user_group_cache_data:
raise Exception(
"User Group {} not found. Please run: calm update cache".format(
name
)
)
return {
"kind": "user_group",
"name": name,
"uuid": user_group_cache_data["uuid"],
}
class Account:
def __new__(cls, name, **kwargs):
provider_type = kwargs.get("provider_type") or ""
account_cache_data = Cache.get_entity_data(
entity_type="account", name=name, provider_type=provider_type
)
if not account_cache_data:
raise Exception(
"Account {} not found. Please run: calm update cache".format(name)
)
return {"kind": "account", "name": name, "uuid": account_cache_data["uuid"]}
class Role:
def __new__(cls, name, **kwargs):
role_cache_data = Cache.get_entity_data(entity_type="role", name=name)
if not role_cache_data:
raise Exception(
"Role {} not found. Please run: calm update cache".format(name)
)
return {"kind": "role", "name": name, "uuid": role_cache_data["uuid"]}
class Project:
def __new__(cls, name, **kwargs):
project_cache_data = Cache.get_entity_data(entity_type="project", name=name)
if not project_cache_data:
raise Exception(
"Project {} not found. Please run: calm update cache".format(name)
)
return {"kind": "project", "name": name, "uuid": project_cache_data["uuid"]}
class DirectoryService:
def __new__(cls, name, **kwargs):
ds_cache_data = Cache.get_entity_data(
entity_type="directory_service", name=name
)
if not ds_cache_data:
raise Exception(
"Directory Service {} not found. Please run: calm update cache".format(
name
)
)
return {
"kind": "directory_service",
"name": name,
"uuid": ds_cache_data["uuid"],
}
| StarcoderdataPython |
4838245 | #!/usr/bin/env python3
"""
Hydrogen molecule in Ground State (sto, 3g)
Originally used by River Lane Research for testing Rigetti VQE.
"""
label_to_hamiltonian_coeff = {
"ZZ": 0.011236585210827765,
"II": -0.3399536172489041,
"ZI": 0.39398367743432866,
"IZ": 0.39398367743432866,
"XX": 0.18128880839426165,
}
classical_energy = -1.137270174625326
| StarcoderdataPython |
181525 | # visualize.py - convert lattice to graphviz dot
import os
import glob
import graphviz
__all__ = ['lattice', 'render_all']
SORTKEYS = [lambda c: c.index]
NAME_GETTERS = [lambda c: 'c%d' % c.index]
def lattice(lattice, filename, directory, render, view):
"""Return graphviz source for visualizing the lattice graph."""
dot = graphviz.Digraph(
name=lattice.__class__.__name__,
comment=repr(lattice),
filename=filename,
directory=directory,
node_attr=dict(shape='circle', width='.25', style='filled', label=''),
edge_attr=dict(dir='none', labeldistance='1.5', minlen='2')
)
sortkey = SORTKEYS[0]
node_name = NAME_GETTERS[0]
for concept in lattice._concepts:
name = node_name(concept)
dot.node(name)
if concept.objects:
dot.edge(name, name,
headlabel=' '.join(concept.objects),
labelangle='270', color='transparent')
if concept.properties:
dot.edge(name, name,
taillabel=' '.join(concept.properties),
labelangle='90', color='transparent')
dot.edges((name, node_name(c))
for c in sorted(concept.lower_neighbors, key=sortkey))
if render or view:
dot.render(view=view) # pragma: no cover
return dot
def render_all(filepattern='*.cxt', frmat=None, directory=None, out_format=None):
from concepts import Context
if directory is not None:
get_name = lambda filename: os.path.basename(filename)
else:
get_name = lambda filename: filename
if frmat is None:
from concepts.formats import Format
get_frmat = Format.by_extension.get
else:
get_frmat = lambda filename: frmat
for cxtfile in glob.glob(filepattern):
name, ext = os.path.splitext(cxtfile)
filename = '%s.gv' % get_name(name)
c = Context.fromfile(cxtfile, get_frmat(ext))
l = c.lattice
dot = l.graphviz(filename, directory)
if out_format is not None:
dot.format = out_format
dot.render()
| StarcoderdataPython |
3397598 | <reponame>GokulDas027/Chrome-dino-auto-run
import cv2
import numpy as np
from PIL import ImageGrab # windows and mac
# import pyscreenshot as ImageGrab # linux
import ctypes
# import os # for linux key press
# bbox = (450, 230, 500, 265) fix
# bbox = (Left, Top, Right, Bottom)
bbox = (865, 240, 960, 275)
shape = (bbox[3]-bbox[1], bbox[2]-bbox[0], 3)
bg = 255
ref_frame = np.full(shape, bg)
i = 1
while True:
# capturing the frame.
img = ImageGrab.grab(bbox)
frame = np.array(img)
cv2.imshow("frame", frame)
# updating the reference frame with the background change.
# toggling between white frame and black frame.
if bg != frame[0][0][0]:
bg = frame[0][0][0]
ref_frame = np.full(shape, bg)
i += 1
# comparing the captured frame and reference frame.
frame_diff = np.subtract(ref_frame, frame).sum()
# if frames aren't the same, obstacle detected and jump.
if frame_diff != 0:
ctypes.windll.user32.keybd_event(0x20, 0, 0, 0) # Space is down
# os.system('xdotool key space') # for linux
# updating the frame capture region to adapt with the increasing speed.
if i % 4 == 0:
bbox = (bbox[0]+1, bbox[1], bbox[2]+1, bbox[3])
shape = (bbox[3]-bbox[1], bbox[2]-bbox[0], 3)
ref_frame = np.full(shape, bg)
print(f"update {i}")
i += 1
# listen for ESC key to exit.
if cv2.waitKey(1) == 27: # when ESC is pressed
break
cv2.destroyAllWindows()
| StarcoderdataPython |
93659 | <filename>gears/listentomysong.py
from . import geffects
from pbge import effects
import pbge
import random
from . import aitargeters
from . import enchantments
SONG_REACH = 12
MENTAL_COST = 9
class _Info(object):
def __init__(self, att_stat, att_skill, def_stat, def_skill):
self.att_stat = att_stat
self.att_skill = att_skill
self.def_stat = def_stat
self.def_skill = def_skill
# This is the entry point to this module.
class Invocation( effects.Invocation ):
def __init__(self, att_stat, att_skill, def_stat, def_skill):
info = _Info(att_stat, att_skill, def_stat, def_skill)
super().__init__( name = 'Captivate Audience'
, fx = _top_fx(info)
, area = pbge.scenes.targetarea.SelfCentered(radius = SONG_REACH)
, used_in_combat = True
, used_in_exploration = False
, ai_tar = aitargeters.GenericTargeter( targetable_types = (pbge.scenes.PlaceableThing,)
, conditions = [ aitargeters.CasterIsSurrounded(reach = SONG_REACH) ]
)
, shot_anim = geffects.OriginSpotShotFactory(geffects.ListenToMySongAnim)
, data = _attack_data()
, price = [geffects.MentalPrice(MENTAL_COST)]
, targets = 1
)
def _attack_data():
'''Defines the icon for this action.'''
# TODO: Something more appropriate.
return geffects.AttackData(pbge.image.Image('sys_skillicons.png', 32, 32), 0)
def _top_fx(info):
'''Primary effect when cast.'''
# The invocation has the entire area, so the effect will apply to
# the entire area.
# However, we want the song effect to be triggered once only,
# at the originator.
return geffects.CheckConditions( [aitargeters.TargetIsOriginator()]
, on_success = [_sing_fx(info)]
)
def _sing_fx(info):
return geffects.SkillRoll( info.att_stat, info.att_skill
, on_success = [_positive_fx(info)]
, on_failure = [_fail_fx(info)]
)
def _fail_fx(info):
# On failing, do another skill roll.
# If they fail again, trigger a negaive effect.
return geffects.SkillRoll( info.att_stat, info.att_skill
, on_success = [effects.NoEffect(anim = geffects.FailAnim)]
, on_failure = [_negative_fx(info)]
)
###############################################################################
# Base classes for positive and negative invocations.
# These are invoked at the originator only, so has to redo the area.
class _SongInvocation(effects.Invocation):
'''Common class for all invocations'''
def __init__(self, fx):
# The name is not actually seen.
super().__init__( name = "Song Effect"
, fx = fx
, area = pbge.scenes.targetarea.SelfCentered( radius = SONG_REACH
, exclude_middle = True
, delay_from = -1
)
)
# Actual base classes for positive and negative song results.
# Derived classes must accept a single argument, info, in
# their constructors.
class _PositiveSongInvocation(_SongInvocation):
def __init__(self, fx):
wrapfx = effects.NoEffect( anim = geffects.MusicAnim
, children = [fx]
)
super().__init__(wrapfx)
class _NegativeSongInvocation(_SongInvocation):
def __init__(self, fx):
wrapfx = effects.NoEffect( anim = geffects.BadMusicAnim
, children = [fx]
)
super().__init__(wrapfx)
###############################################################################
# Utility functions for creating song effects.
def _affect_enemies(fx, ally_anim = None):
"""Give an effect that only affects enemies"""
if ally_anim:
on_failure = [geffects.CheckConditions( [ aitargeters.TargetIsOperational()
, aitargeters.TargetIsAlly()
]
, on_success = [effects.NoEffect(anim = ally_anim)]
)]
else:
on_failure = []
return geffects.CheckConditions( [ aitargeters.TargetIsOperational()
, aitargeters.TargetIsEnemy()
]
, on_success = [fx]
, on_failure = on_failure
)
def _affect_allies(fx, enemy_anim = None):
"""Give an effect that only affects allies"""
if enemy_anim:
on_failure = [geffects.CheckConditions( [ aitargeters.TargetIsOperational()
, aitargeters.TargetIsEnemy()
]
, on_success = [effects.NoEffect(anim = enemy_anim)]
)]
else:
on_failure = []
return geffects.CheckConditions( [ aitargeters.TargetIsOperational()
, aitargeters.TargetIsAlly()
]
, on_success = [fx]
, on_failure = on_failure
)
def _double_skill_roll(info, fx, roll_mod = 35, min_chance = 25):
"""Do an opposed skill roll, if it fails do another skill roll."""
return geffects.OpposedSkillRoll( info.att_stat, info.att_skill
, info.def_stat, info.def_skill
, roll_mod = roll_mod
, min_chance = min_chance
, on_success = [fx]
, on_failure = [ geffects.OpposedSkillRoll( info.att_stat, info.att_skill
, info.def_stat, info.def_skill
, roll_mod = 25
, min_chance = 25
, on_success = [fx]
, on_failure = [effects.NoEffect(anim = geffects.ResistAnim)]
)
]
)
########################
### Positive Effects ###
########################
class _HaywireEnemies(_PositiveSongInvocation):
# THis is mostly a placeholder for now.
def __init__(self, info):
super().__init__(_affect_enemies( self._get_fx(info)
, geffects.CheerAnim
))
def _get_fx(self, info):
return _double_skill_roll(info, geffects.AddEnchantment( geffects.HaywireStatus
, anim = geffects.InflictHaywireAnim
))
class _InspireAllies(_PositiveSongInvocation):
def __init__(self, info):
super().__init__(_affect_allies(self._get_fx(info)))
def _get_fx(self, info):
return geffects.AddEnchantment( geffects.Inspired
, anim = geffects.BurnAnim
)
class _DemoralizeEnemies(_PositiveSongInvocation):
def __init__(self, info):
super().__init__(_affect_enemies( self._get_fx(info)
, geffects.CheerAnim
))
def _get_fx(self, info):
return _double_skill_roll(info, geffects.AddEnchantment( geffects.Demoralized
, anim = geffects.SuperBoom
))
class _EnergizeAllies(_PositiveSongInvocation):
def __init__(self, info):
super().__init__(_affect_allies(self._get_fx(info)))
def _get_fx(self, info):
children = [ geffects.DoEncourage(info.att_stat, info.att_skill)
, geffects.DispelEnchantments(enchantments.ON_DISPEL_NEGATIVE)
]
return effects.NoEffect( children = children
, anim = geffects.OverloadAnim
)
########################
### Negative Effects ###
########################
class _DemoralizeAllies(_NegativeSongInvocation):
# THis is mostly a placeholder for now.
def __init__(self, info):
super().__init__(_affect_allies( self._get_fx(info)
, geffects.HeckleAnim
))
def _get_fx(self, info):
return geffects.AddEnchantment( geffects.Demoralized
, anim = geffects.SuperBoom
)
###############################################################################
def _random_fx(BaseSongInvocation, info):
'''Returns a RandomEffect that randomly chooses the subclasses of the given
BaseSongInvocation.
'''
possible_fx = list()
for cls in BaseSongInvocation.__subclasses__():
possible_fx.append(effects.InvokeEffect(invocation = cls(info)))
return geffects.RandomEffect(possible_fx = possible_fx)
def _positive_fx(info):
return _random_fx(_PositiveSongInvocation, info)
def _negative_fx(info):
return _random_fx(_NegativeSongInvocation, info)
| StarcoderdataPython |
185571 | <gh_stars>10-100
# @sp: newly created
import os
import sys
sys.path.append(os.getcwd()) # puts all uploaded python modules into the python path
sys.path.append('/input/src/')
import params.polyaxon_parsing_iitnet_cnn_lstm as pp3
import random
import shutil
from data.process_data import process_data
from params.Params_Winslow import Params, winslow_params
from data.DataInterface import DataInterface as DataInt
def preprocess_physionet_data():
"""
Only do preprocessing for all data to save storage space when training the model on
large datasets (e.g. physionet). Save the data to the local disk (set as save_path in params).
"""
print("Setup parameters ... ", end=" ")
# get parameters
params = Params()
# get additional parameters for iitnet
if params.plx.get('mdl_architecture') == "iitnet_cnn_bilstm":
plx: dict = pp3.get_parameters()
params.plx.update(plx)
# adjust winslow parameters
is_winslow = False
if 'WINSLOW_PIPELINE_NAME' in os.environ:
is_winslow = True
winslow_params(params)
params.plx['subject_batch'] = 1 # ! wichtig für IITNet
print("done")
print("\nBuild Data interpreter object: \n")
# Get data
data_int = DataInt(save_path=params.plx["save_path"],
perform_save_raw=params.plx["save_raw_data"],
key_labels=params.plx["key_labels"],
uuid=params.plx["experiment_uuid"])
total_subjects = params.plx.get('train_count') + params.plx.get('val_count') + params.plx.get('test_count')
print("\nProcessing Data from", str(total_subjects), "subjects.")
print("\nStart Data Processing ... ")
# Process Data
process_data(params, data_int, params.plx["data_count"])
print("\n All Data processed.")
def preprocess_sleepedf_data():
"""
Only do preprocessing for all data to save storage space when training the model on
large datasets (e.g. physionet). Save the data to the local disk (set as save_path in params).
"""
print("Setup parameters ... ", end=" ")
# get parameters
params = Params()
# get additional parameters for iitnet
if params.plx.get('mdl_architecture') == "iitnet_cnn_bilstm":
plx: dict = pp3.get_parameters()
params.plx.update(plx)
is_winslow = False
if 'WINSLOW_PIPELINE_NAME' in os.environ:
is_winslow = True
winslow_params(params)
params.plx['subject_batch'] = 1 # ! wichtig für IITNet
print("done")
print("\nBuild Data interpreter object: \n")
# Set in polyaxon-params: load=0, experiment-uuid=iitnet_0, get_raw_data_from_local_path=1,
# data_already_processed=False, dataset_name=deep_sleep,
# channel-types, channel-names, frequency, ch_idx_list
# Set in preprocess_data_task_ssc: line 106 --> 7
# input_path_utils: base_path = get_src_parent_dir() + "src/data/" (only local)
if is_winslow:
params.plx['save_path'] = '/output/sleep-edf-v1/sleep-cassette/processed/training/'
else:
params.plx['save_path'] = "D:/sleep-edf-v1/sleep-cassette/processed/training/"
# Get data
data_int = DataInt(save_path=params.plx["save_path"],
perform_save_raw=params.plx["save_raw_data"],
key_labels=params.plx["key_labels"],
uuid=params.plx["experiment_uuid"])
total_subjects = params.plx.get('train_count') + params.plx.get('val_count') + params.plx.get('test_count')
print("\nProcessing Data from", str(total_subjects), "subjects.")
print("\nStart Data Processing ... ")
# Process Data
process_data(params, data_int, params.plx["data_count"])
print("\n All Data processed.")
# Delete unnecessary files and separate test data
cleanup_data(params=params, is_winslow=is_winslow)
def preprocess_shhs_data():
"""
Only do preprocessing for all data to save storage space when training the model on
large datasets (e.g. physionet). Save the data to the local disk (set as save_path in params).
"""
print("Setup parameters ... ", end=" ")
# get parameters
params = Params()
# get additional parameters for iitnet
if params.plx.get('mdl_architecture') == "iitnet_cnn_bilstm":
plx: dict = pp3.get_parameters()
params.plx.update(plx)
is_winslow = False
if 'WINSLOW_PIPELINE_NAME' in os.environ:
is_winslow = True
winslow_params(params)
params.plx['subject_batch'] = 1 # ! wichtig für IITNet
print("done")
print("\nBuild Data interpreter object: \n")
# Set in polyaxon-params: load=0, experiment-uuid=iitnet_0, get_raw_data_from_local_path=1,
# data_already_processed=False, dataset_name=shhs1,
# channel-types, channel-names, frequency, ch_idx_list
# Set in preprocess_data_task_ssc: line 106 --> 14/15/16
# input_path_utils: base_path = "Z:/"
# Get data
data_int = DataInt(save_path=params.plx["save_path"],
perform_save_raw=params.plx["save_raw_data"],
key_labels=params.plx["key_labels"],
uuid=params.plx["experiment_uuid"])
total_subjects = params.plx.get('train_count') + params.plx.get('val_count') + params.plx.get('test_count')
print("\nProcessing Data from", str(total_subjects), "subjects.")
print("\nStart Data Processing ... ")
# Process Data
process_data(params, data_int, params.plx["data_count"])
print("\n All Data processed.")
# Delete unnecessary files and separate test data
cleanup_data(params=params, is_winslow=is_winslow)
def cleanup_data(params, is_winslow):
"""
Delete unnecessary large files, that are not needed for the model training.
Separate Training and Testing Data.
"""
# Delete unnecessary files
folder = params.plx.get('save_path') + params.plx.get('experiment_uuid')
subfolders = [f.path for f in os.scandir(folder) if f.is_dir()]
print("found", str(len(subfolders)), "folders in directory.")
for folder in subfolders:
x_file = folder + "/0_data_x_preprocessed.npy"
print(x_file)
if os.path.exists(x_file):
os.remove(x_file)
print("deleted")
else:
print("does not exist!")
y_file = folder + "/0_data_y_preprocessed.npy"
print(y_file)
if os.path.exists(y_file):
os.remove(y_file)
print("deleted")
else:
print("does not exist!")
# put randomly selected samples from training to test folder
num_test = params.plx.get('test_count')
test_subjects = random.sample(subfolders, num_test)
print("Selected", str(len(test_subjects)), "test subjects.")
if params.plx.get('dataset_name') == 'shhs1': # only work with this dataset offline!
dest_folder = 'D:/shhs1/processed/test/' + params.plx.get('experiment_uuid')
elif params.plx.get('dataset_name') == 'deep_sleep':
dest_folder = 'D:/sleep-edf-v1/sleep-cassette/processed/test/' + params.plx.get('experiment_uuid')
elif params.plx.get('dataset_name') == 'physionet_challenge':
dest_folder = 'D:/physionet_challenge/test/' + params.plx.get('experiment_uuid')
for test_subject in test_subjects:
print("Moving", test_subject, "to", dest_folder)
shutil.move(test_subject, dest_folder)
print("done.")
print("Moved", str(len(test_subjects)), "subjects.")
if __name__ == "__main__":
preprocess_shhs_data()
# preprocess_sleepedf_data()
# preprocess_physionet_data()
| StarcoderdataPython |
1730551 | <gh_stars>10-100
from builtins import str
from django.test import TestCase
from measure_mate.tests.factories import TemplateFactory
class TemplateTestCases(TestCase):
def test_has_up_to_five_in_running_set(self):
template = TemplateFactory()
template.clean()
self.assertEqual(template.name, str(template))
| StarcoderdataPython |
173595 | <reponame>lccasagrande/Hashtag-Monitor<filename>hashtag_monitor/apps/monitor/migrations/0020_auto_20191227_1803.py
# Generated by Django 3.0 on 2019-12-27 18:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monitor', '0019_auto_20191226_2112'),
]
operations = [
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, default='', max_length=500, verbose_name='Username'),
),
migrations.AlterField(
model_name='user',
name='screen_name',
field=models.CharField(blank=True, default='', max_length=500, verbose_name='Username'),
),
]
| StarcoderdataPython |
12051 | import datetime
import logging
import os
import re
from collections import OrderedDict
from html import escape
from html.parser import HTMLParser
from io import StringIO
import docutils
import docutils.core
import docutils.io
from docutils.parsers.rst.languages import get_language as get_docutils_lang
from docutils.writers.html4css1 import HTMLTranslator, Writer
from pelican import rstdirectives # NOQA
from pelican.cache import FileStampDataCacher
from pelican.contents import Author, Category, Page, Tag
from pelican.plugins import signals
from pelican.utils import get_date, pelican_open, posixize_path
try:
from markdown import Markdown
except ImportError:
Markdown = False # NOQA
# Metadata processors have no way to discard an unwanted value, so we have
# them return this value instead to signal that it should be discarded later.
# This means that _filter_discardable_metadata() must be called on processed
# metadata dicts before use, to remove the items with the special value.
_DISCARD = object()
DUPLICATES_DEFINITIONS_ALLOWED = {
'tags': False,
'date': False,
'modified': False,
'status': False,
'category': False,
'author': False,
'save_as': False,
'url': False,
'authors': False,
'slug': False
}
METADATA_PROCESSORS = {
'tags': lambda x, y: ([
Tag(tag, y)
for tag in ensure_metadata_list(x)
] or _DISCARD),
'date': lambda x, y: get_date(x.replace('_', ' ')),
'modified': lambda x, y: get_date(x),
'status': lambda x, y: x.strip() or _DISCARD,
'category': lambda x, y: _process_if_nonempty(Category, x, y),
'author': lambda x, y: _process_if_nonempty(Author, x, y),
'authors': lambda x, y: ([
Author(author, y)
for author in ensure_metadata_list(x)
] or _DISCARD),
'slug': lambda x, y: x.strip() or _DISCARD,
}
logger = logging.getLogger(__name__)
def ensure_metadata_list(text):
"""Canonicalize the format of a list of authors or tags. This works
the same way as Docutils' "authors" field: if it's already a list,
those boundaries are preserved; otherwise, it must be a string;
if the string contains semicolons, it is split on semicolons;
otherwise, it is split on commas. This allows you to write
author lists in either "<NAME>, <NAME>" or "<NAME>; <NAME>"
format.
Regardless, all list items undergo .strip() before returning, and
empty items are discarded.
"""
if isinstance(text, str):
if ';' in text:
text = text.split(';')
else:
text = text.split(',')
return list(OrderedDict.fromkeys(
[v for v in (w.strip() for w in text) if v]
))
def _process_if_nonempty(processor, name, settings):
"""Removes extra whitespace from name and applies a metadata processor.
If name is empty or all whitespace, returns _DISCARD instead.
"""
name = name.strip()
return processor(name, settings) if name else _DISCARD
def _filter_discardable_metadata(metadata):
"""Return a copy of a dict, minus any items marked as discardable."""
return {name: val for name, val in metadata.items() if val is not _DISCARD}
class BaseReader:
"""Base class to read files.
This class is used to process static files, and it can be inherited for
other types of file. A Reader class must have the following attributes:
- enabled: (boolean) tell if the Reader class is enabled. It
generally depends on the import of some dependency.
- file_extensions: a list of file extensions that the Reader will process.
- extensions: a list of extensions to use in the reader (typical use is
Markdown).
"""
enabled = True
file_extensions = ['static']
extensions = None
def __init__(self, settings):
self.settings = settings
def process_metadata(self, name, value):
if name in METADATA_PROCESSORS:
return METADATA_PROCESSORS[name](value, self.settings)
return value
def read(self, source_path):
"No-op parser"
content = None
metadata = {}
return content, metadata
class _FieldBodyTranslator(HTMLTranslator):
def __init__(self, document):
super().__init__(document)
self.compact_p = None
def astext(self):
return ''.join(self.body)
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
pass
def render_node_to_html(document, node, field_body_translator_class):
visitor = field_body_translator_class(document)
node.walkabout(visitor)
return visitor.astext()
class PelicanHTMLWriter(Writer):
def __init__(self):
super().__init__()
self.translator_class = PelicanHTMLTranslator
class PelicanHTMLTranslator(HTMLTranslator):
def visit_abbreviation(self, node):
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_image(self, node):
# set an empty alt if alt is not specified
# avoids that alt is taken from src
node['alt'] = node.get('alt', '')
return HTMLTranslator.visit_image(self, node)
class RstReader(BaseReader):
"""Reader for reStructuredText files
By default the output HTML is written using
docutils.writers.html4css1.Writer and translated using a subclass of
docutils.writers.html4css1.HTMLTranslator. If you want to override it with
your own writer/translator (e.g. a HTML5-based one), pass your classes to
these two attributes. Look in the source code for details.
writer_class Used for writing contents
field_body_translator_class Used for translating metadata such
as article summary
"""
enabled = bool(docutils)
file_extensions = ['rst']
writer_class = PelicanHTMLWriter
field_body_translator_class = _FieldBodyTranslator
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
lang_code = self.settings.get('DEFAULT_LANG', 'en')
if get_docutils_lang(lang_code):
self._language_code = lang_code
else:
logger.warning("Docutils has no localization for '%s'."
" Using 'en' instead.", lang_code)
self._language_code = 'en'
def _parse_metadata(self, document, source_path):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
output = {}
if document.first_child_matching_class(docutils.nodes.title) is None:
logger.warning(
'Document title missing in file %s: '
'Ensure exactly one top level section',
source_path)
for docinfo in document.traverse(docutils.nodes.docinfo):
for element in docinfo.children:
if element.tagname == 'field': # custom fields (e.g. summary)
name_elem, body_elem = element.children
name = name_elem.astext()
if name.lower() in formatted_fields:
value = render_node_to_html(
document, body_elem,
self.field_body_translator_class)
else:
value = body_elem.astext()
elif element.tagname == 'authors': # author list
name = element.tagname
value = [element.astext() for element in element.children]
else: # standard fields (e.g. address)
name = element.tagname
value = element.astext()
name = name.lower()
output[name] = self.process_metadata(name, value)
return output
def _get_publisher(self, source_path):
extra_params = {'initial_header_level': '2',
'syntax_highlight': 'short',
'input_encoding': 'utf-8',
'language_code': self._language_code,
'halt_level': 2,
'traceback': True,
'warning_stream': StringIO(),
'embed_stylesheet': False}
user_params = self.settings.get('DOCUTILS_SETTINGS')
if user_params:
extra_params.update(user_params)
pub = docutils.core.Publisher(
writer=self.writer_class(),
destination_class=docutils.io.StringOutput)
pub.set_components('standalone', 'restructuredtext', 'html')
pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source_path=source_path)
pub.publish()
return pub
def read(self, source_path):
"""Parses restructured text"""
pub = self._get_publisher(source_path)
parts = pub.writer.parts
content = parts.get('body')
metadata = self._parse_metadata(pub.document, source_path)
metadata.setdefault('title', parts.get('title'))
return content, metadata
class MarkdownReader(BaseReader):
"""Reader for Markdown files"""
enabled = bool(Markdown)
file_extensions = ['md', 'markdown', 'mkd', 'mdown']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
settings = self.settings['MARKDOWN']
settings.setdefault('extension_configs', {})
settings.setdefault('extensions', [])
for extension in settings['extension_configs'].keys():
if extension not in settings['extensions']:
settings['extensions'].append(extension)
if 'markdown.extensions.meta' not in settings['extensions']:
settings['extensions'].append('markdown.extensions.meta')
self._source_path = None
def _parse_metadata(self, meta):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
# prevent metadata extraction in fields
self._md.preprocessors.deregister('meta')
output = {}
for name, value in meta.items():
name = name.lower()
if name in formatted_fields:
# formatted metadata is special case and join all list values
formatted_values = "\n".join(value)
# reset the markdown instance to clear any state
self._md.reset()
formatted = self._md.convert(formatted_values)
output[name] = self.process_metadata(name, formatted)
elif not DUPLICATES_DEFINITIONS_ALLOWED.get(name, True):
if len(value) > 1:
logger.warning(
'Duplicate definition of `%s` '
'for %s. Using first one.',
name, self._source_path)
output[name] = self.process_metadata(name, value[0])
elif len(value) > 1:
# handle list metadata as list of string
output[name] = self.process_metadata(name, value)
else:
# otherwise, handle metadata as single string
output[name] = self.process_metadata(name, value[0])
return output
def read(self, source_path):
"""Parse content and metadata of markdown files"""
self._source_path = source_path
self._md = Markdown(**self.settings['MARKDOWN'])
with pelican_open(source_path) as text:
content = self._md.convert(text)
if hasattr(self._md, 'Meta'):
metadata = self._parse_metadata(self._md.Meta)
else:
metadata = {}
return content, metadata
class HTMLReader(BaseReader):
"""Parses HTML files as input, looking for meta, title, and body tags"""
file_extensions = ['htm', 'html']
enabled = True
class _HTMLParser(HTMLParser):
def __init__(self, settings, filename):
super().__init__(convert_charrefs=False)
self.body = ''
self.metadata = {}
self.settings = settings
self._data_buffer = ''
self._filename = filename
self._in_top_level = True
self._in_head = False
self._in_title = False
self._in_body = False
self._in_tags = False
def handle_starttag(self, tag, attrs):
if tag == 'head' and self._in_top_level:
self._in_top_level = False
self._in_head = True
elif tag == 'title' and self._in_head:
self._in_title = True
self._data_buffer = ''
elif tag == 'body' and self._in_top_level:
self._in_top_level = False
self._in_body = True
self._data_buffer = ''
elif tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
elif self._in_body:
self._data_buffer += self.build_tag(tag, attrs, False)
def handle_endtag(self, tag):
if tag == 'head':
if self._in_head:
self._in_head = False
self._in_top_level = True
elif self._in_head and tag == 'title':
self._in_title = False
self.metadata['title'] = self._data_buffer
elif tag == 'body':
self.body = self._data_buffer
self._in_body = False
self._in_top_level = True
elif self._in_body:
self._data_buffer += '</{}>'.format(escape(tag))
def handle_startendtag(self, tag, attrs):
if tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
if self._in_body:
self._data_buffer += self.build_tag(tag, attrs, True)
def handle_comment(self, data):
self._data_buffer += '<!--{}-->'.format(data)
def handle_data(self, data):
self._data_buffer += data
def handle_entityref(self, data):
self._data_buffer += '&{};'.format(data)
def handle_charref(self, data):
self._data_buffer += '&#{};'.format(data)
def build_tag(self, tag, attrs, close_tag):
result = '<{}'.format(escape(tag))
for k, v in attrs:
result += ' ' + escape(k)
if v is not None:
# If the attribute value contains a double quote, surround
# with single quotes, otherwise use double quotes.
if '"' in v:
result += "='{}'".format(escape(v, quote=False))
else:
result += '="{}"'.format(escape(v, quote=False))
if close_tag:
return result + ' />'
return result + '>'
def _handle_meta_tag(self, attrs):
name = self._attr_value(attrs, 'name')
if name is None:
attr_list = ['{}="{}"'.format(k, v) for k, v in attrs]
attr_serialized = ', '.join(attr_list)
logger.warning("Meta tag in file %s does not have a 'name' "
"attribute, skipping. Attributes: %s",
self._filename, attr_serialized)
return
name = name.lower()
contents = self._attr_value(attrs, 'content', '')
if not contents:
contents = self._attr_value(attrs, 'contents', '')
if contents:
logger.warning(
"Meta tag attribute 'contents' used in file %s, should"
" be changed to 'content'",
self._filename,
extra={'limit_msg': "Other files have meta tag "
"attribute 'contents' that should "
"be changed to 'content'"})
if name == 'keywords':
name = 'tags'
if name in self.metadata:
# if this metadata already exists (i.e. a previous tag with the
# same name has already been specified then either convert to
# list or append to list
if isinstance(self.metadata[name], list):
self.metadata[name].append(contents)
else:
self.metadata[name] = [self.metadata[name], contents]
else:
self.metadata[name] = contents
@classmethod
def _attr_value(cls, attrs, name, default=None):
return next((x[1] for x in attrs if x[0] == name), default)
def read(self, filename):
"""Parse content and metadata of HTML files"""
with pelican_open(filename) as content:
parser = self._HTMLParser(self.settings, filename)
parser.feed(content)
parser.close()
metadata = {}
for k in parser.metadata:
metadata[k] = self.process_metadata(k, parser.metadata[k])
return parser.body, metadata
class Readers(FileStampDataCacher):
"""Interface for all readers.
This class contains a mapping of file extensions / Reader classes, to know
which Reader class must be used to read a file (based on its extension).
This is customizable both with the 'READERS' setting, and with the
'readers_init' signall for plugins.
"""
def __init__(self, settings=None, cache_name=''):
self.settings = settings or {}
self.readers = {}
self.reader_classes = {}
for cls in [BaseReader] + BaseReader.__subclasses__():
if not cls.enabled:
logger.debug('Missing dependencies for %s',
', '.join(cls.file_extensions))
continue
for ext in cls.file_extensions:
self.reader_classes[ext] = cls
if self.settings['READERS']:
self.reader_classes.update(self.settings['READERS'])
signals.readers_init.send(self)
for fmt, reader_class in self.reader_classes.items():
if not reader_class:
continue
self.readers[fmt] = reader_class(self.settings)
# set up caching
cache_this_level = (cache_name != '' and
self.settings['CONTENT_CACHING_LAYER'] == 'reader')
caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
super().__init__(settings, cache_name, caching_policy, load_policy)
@property
def extensions(self):
return self.readers.keys()
def read_file(self, base_path, path, content_class=Page, fmt=None,
context=None, preread_signal=None, preread_sender=None,
context_signal=None, context_sender=None):
"""Return a content object parsed with the given format."""
path = os.path.abspath(os.path.join(base_path, path))
source_path = posixize_path(os.path.relpath(path, base_path))
logger.debug(
'Read file %s -> %s',
source_path, content_class.__name__)
if not fmt:
_, ext = os.path.splitext(os.path.basename(path))
fmt = ext[1:]
if fmt not in self.readers:
raise TypeError(
'Pelican does not know how to parse %s', path)
if preread_signal:
logger.debug(
'Signal %s.send(%s)',
preread_signal.name, preread_sender)
preread_signal.send(preread_sender)
reader = self.readers[fmt]
metadata = _filter_discardable_metadata(default_metadata(
settings=self.settings, process=reader.process_metadata))
metadata.update(path_metadata(
full_path=path, source_path=source_path,
settings=self.settings))
metadata.update(_filter_discardable_metadata(parse_path_metadata(
source_path=source_path, settings=self.settings,
process=reader.process_metadata)))
reader_name = reader.__class__.__name__
metadata['reader'] = reader_name.replace('Reader', '').lower()
content, reader_metadata = self.get_cached_data(path, (None, None))
if content is None:
content, reader_metadata = reader.read(path)
reader_metadata = _filter_discardable_metadata(reader_metadata)
self.cache_data(path, (content, reader_metadata))
metadata.update(reader_metadata)
if content:
# find images with empty alt
find_empty_alt(content, path)
# eventually filter the content with typogrify if asked so
if self.settings['TYPOGRIFY']:
from typogrify.filters import typogrify
import smartypants
typogrify_dashes = self.settings['TYPOGRIFY_DASHES']
if typogrify_dashes == 'oldschool':
smartypants.Attr.default = smartypants.Attr.set2
elif typogrify_dashes == 'oldschool_inverted':
smartypants.Attr.default = smartypants.Attr.set3
else:
smartypants.Attr.default = smartypants.Attr.set1
# Tell `smartypants` to also replace " HTML entities with
# smart quotes. This is necessary because Docutils has already
# replaced double quotes with said entities by the time we run
# this filter.
smartypants.Attr.default |= smartypants.Attr.w
def typogrify_wrapper(text):
"""Ensures ignore_tags feature is backward compatible"""
try:
return typogrify(
text,
self.settings['TYPOGRIFY_IGNORE_TAGS'])
except TypeError:
return typogrify(text)
if content:
content = typogrify_wrapper(content)
if 'title' in metadata:
metadata['title'] = typogrify_wrapper(metadata['title'])
if 'summary' in metadata:
metadata['summary'] = typogrify_wrapper(metadata['summary'])
if context_signal:
logger.debug(
'Signal %s.send(%s, <metadata>)',
context_signal.name,
context_sender)
context_signal.send(context_sender, metadata=metadata)
return content_class(content=content, metadata=metadata,
settings=self.settings, source_path=path,
context=context)
def find_empty_alt(content, path):
"""Find images with empty alt
Create warnings for all images with empty alt (up to a certain number),
as they are really likely to be accessibility flaws.
"""
imgs = re.compile(r"""
(?:
# src before alt
<img
[^\>]*
src=(['"])(.*?)\1
[^\>]*
alt=(['"])\3
)|(?:
# alt before src
<img
[^\>]*
alt=(['"])\4
[^\>]*
src=(['"])(.*?)\5
)
""", re.X)
for match in re.findall(imgs, content):
logger.warning(
'Empty alt attribute for image %s in %s',
os.path.basename(match[1] + match[5]), path,
extra={'limit_msg': 'Other images have empty alt attributes'})
def default_metadata(settings=None, process=None):
metadata = {}
if settings:
for name, value in dict(settings.get('DEFAULT_METADATA', {})).items():
if process:
value = process(name, value)
metadata[name] = value
if 'DEFAULT_CATEGORY' in settings:
value = settings['DEFAULT_CATEGORY']
if process:
value = process('category', value)
metadata['category'] = value
if settings.get('DEFAULT_DATE', None) and \
settings['DEFAULT_DATE'] != 'fs':
if isinstance(settings['DEFAULT_DATE'], str):
metadata['date'] = get_date(settings['DEFAULT_DATE'])
else:
metadata['date'] = datetime.datetime(*settings['DEFAULT_DATE'])
return metadata
def path_metadata(full_path, source_path, settings=None):
metadata = {}
if settings:
if settings.get('DEFAULT_DATE', None) == 'fs':
metadata['date'] = datetime.datetime.fromtimestamp(
os.stat(full_path).st_mtime)
metadata['modified'] = metadata['date']
# Apply EXTRA_PATH_METADATA for the source path and the paths of any
# parent directories. Sorting EPM first ensures that the most specific
# path wins conflicts.
epm = settings.get('EXTRA_PATH_METADATA', {})
for path, meta in sorted(epm.items()):
# Enforce a trailing slash when checking for parent directories.
# This prevents false positives when one file or directory's name
# is a prefix of another's.
dirpath = posixize_path(os.path.join(path, ''))
if source_path == path or source_path.startswith(dirpath):
metadata.update(meta)
return metadata
def parse_path_metadata(source_path, settings=None, process=None):
r"""Extract a metadata dictionary from a file's path
>>> import pprint
>>> settings = {
... 'FILENAME_METADATA': r'(?P<slug>[^.]*).*',
... 'PATH_METADATA':
... r'(?P<category>[^/]*)/(?P<date>\d{4}-\d{2}-\d{2})/.*',
... }
>>> reader = BaseReader(settings=settings)
>>> metadata = parse_path_metadata(
... source_path='my-cat/2013-01-01/my-slug.html',
... settings=settings,
... process=reader.process_metadata)
>>> pprint.pprint(metadata) # doctest: +ELLIPSIS
{'category': <pelican.urlwrappers.Category object at ...>,
'date': datetime.datetime(2013, 1, 1, 0, 0),
'slug': 'my-slug'}
"""
metadata = {}
dirname, basename = os.path.split(source_path)
base, ext = os.path.splitext(basename)
subdir = os.path.basename(dirname)
if settings:
checks = []
for key, data in [('FILENAME_METADATA', base),
('PATH_METADATA', source_path)]:
checks.append((settings.get(key, None), data))
if settings.get('USE_FOLDER_AS_CATEGORY', None):
checks.append(('(?P<category>.*)', subdir))
for regexp, data in checks:
if regexp and data:
match = re.match(regexp, data)
if match:
# .items() for py3k compat.
for k, v in match.groupdict().items():
k = k.lower() # metadata must be lowercase
if v is not None and k not in metadata:
if process:
v = process(k, v)
metadata[k] = v
return metadata
| StarcoderdataPython |
3358905 | <reponame>B-C-WANG/ReinforcementLearningInAutoPilot<filename>src/ReinforcementLearning/train/archive_bad/ddpg_train_waypoints_GAL_v1.py
# coding:utf-8
# Type: Private Author: <NAME>
'''
FIXME ddpg 需要全为负的reward,waypoints环境暂时没有提供!
描述:
和local模型相同,训练得不到较好的结果
'''
import numpy as np
from ReinforcementLearning.Modules.Agents.DDPG_Agent import DDPG_Agent_GAL_v1
from ReinforcementLearning.Modules.Environments.Environments_waypointTarget import CarlaConsecutiveWaypointsTargetEnv_v1
from ReinforcementLearning.Modules.Environments.Actions import ContinuousSteeringVelocityBrakeAction_v1
# 关于以下设置,参考A3c train waypoints global and local
server_config = {
"10.10.9.128": [2000],
}
n_workers_in_each_port = 1
spawn_index_for_each_car_in_worker = (0, 10, 20, 30, 40, 50, 60, 70, 80)
# 用随机数,扩大搜索
spawn_index_for_each_car_in_worker = np.random.randint(0, 100, size=100)
import tensorflow as tf
tf.random.set_random_seed(123)
np.random.seed(123)
env_dict = {}
worker_kwargs = {}
model_kwargs = {}
for ip in server_config:
for port in server_config[ip]:
for i in range(n_workers_in_each_port):
name = 'W_%s' % (str(ip) + "_" + str(port) + "_" + str(i)) # worker name
env = CarlaConsecutiveWaypointsTargetEnv_v1(
carla_egg_path="/home/wang/Desktop/carla/PythonAPI/carla/dist/carla-0.9.5-py2.7-linux-x86_64.egg",
carla_pythonAPI_path="/home/wang/Desktop/carla/PythonAPI/carla",
carla_UE_ip=ip,
carla_UE_port=port,
n_waypoint=100,
# DDPG没有IL相对难训练,所以间隔小一些!
waypoint_spacing=3,
vehicle_start_point_index=spawn_index_for_each_car_in_worker[i],
wait_time_after_apply_action=0.1,
ratio_of_reaching=0.3,
add_center_lane_state=True,
# 这里是DDPG和A3C算法的区别,使用连续空间的action
action_replace=ContinuousSteeringVelocityBrakeAction_v1(),
# 实测DDPG和A3C表现差异很大,因此单独设计它的reward试试?
#reward_replace=
)
env_dict[name] = env
worker_kwargs[name] = {
"start_variance": 0.0,# debug时方差小一些,便于观察走势
"variance_decay": 0.99,
"debug":True
}
# model_kwargs[name] = {
# }
DDPG_Agent_GAL_v1(env_prototype_dict_for_workers=env_dict, save_dir="./a3c_gal_ckpt/",
kwargs_for_worker_dict=worker_kwargs,
).start()
| StarcoderdataPython |
1618627 | <gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Copyright 2019 Ricequant, Inc
#
# * Commercial Usage: please contact <EMAIL>
# * Non-Commercial Usage:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bisect import bisect_right
import numpy as np
from rqalpha.utils.datetime_func import convert_date_to_int
PRICE_FIELDS = {
'open', 'close', 'high', 'low', 'limit_up', 'limit_down', 'acc_net_value', 'unit_net_value'
}
FIELDS_REQUIRE_ADJUSTMENT = set(list(PRICE_FIELDS) + ['volume'])
def _factor_for_date(dates, factors, d):
if d < dates[0]:
return 1
if d > dates[-1]:
return factors[-1]
pos = bisect_right(dates, d)
return factors[pos-1]
def adjust_bars(bars, ex_factors, fields, adjust_type, adjust_orig):
if ex_factors is None or len(bars) == 0:
return bars if fields is None else bars[fields]
dates = ex_factors['start_date']
ex_cum_factors = ex_factors['ex_cum_factor']
if adjust_type == 'pre':
adjust_orig_dt = np.uint64(convert_date_to_int(adjust_orig))
base_adjust_rate = _factor_for_date(dates, ex_cum_factors, adjust_orig_dt)
else:
base_adjust_rate = 1.0
start_date = bars['datetime'][0]
end_date = bars['datetime'][-1]
if (_factor_for_date(dates, ex_cum_factors, start_date) == base_adjust_rate and
_factor_for_date(dates, ex_cum_factors, end_date) == base_adjust_rate):
return bars if fields is None else bars[fields]
factors = ex_cum_factors.take(dates.searchsorted(bars['datetime'], side='right') - 1)
# 复权
factors /= base_adjust_rate
if isinstance(fields, str):
if fields in PRICE_FIELDS:
return bars[fields] * factors
elif fields == 'volume':
return bars[fields] * (1 / factors)
# should not got here
return bars[fields]
result = np.copy(bars if fields is None else bars[fields])
for f in result.dtype.names:
if f in PRICE_FIELDS:
result[f] *= factors
elif f == 'volume':
result[f] *= (1 / factors)
return result
| StarcoderdataPython |
139236 | <reponame>DCorredorM/MarkovDecisionProcess<gh_stars>0
from discrete_world.space import finiteTimeSpace
from discrete_world.Reward import finiteTimeReward
from discrete_world.mdp import finiteTime
import networkx as nx
class SSP_space(finiteTimeSpace):
def __init__(self, actions, states, time_horizon, G):
super(SSP_space, self).__init__(actions, states, time_horizon)
self.G = G
def build_admisible_actions(self):
"""
Builds the admisible actions function for the put MDP
"""
def adm_A(s):
return list(self.G.successors(s))
self.adm_A = adm_A
def build_kernel(self):
"""
Builds the stochastic kernel function for the put MDP
"""
def Q(s, a):
sons = self.adm_A(s)
print(sons)
if len(sons) == 3:
density = {ai: 0.6 if ai == a else 0.2 for ai in sons}
elif len(sons) == 2:
density = {ai: 0.7 if ai == a else 0.3 for ai in sons}
else:
density = {ai: 1 if ai == a else 0 for ai in sons}
return density
self.Q = Q
class SSP_reward(finiteTimeReward):
def __init__(self, space):
super().__init__(space)
def reward(self, t, state, action=None):
"""
Reward function for the put option.
::math..
Parameters
----------
t: int
time
state:
state
action:
Action
Returns
-------
float
the reward for the given (time, state, action) triple.
"""
probs = self.Q(state, action)
G = self.space.G
if t != self.T:
r = sum(p * G[state][j]['c'] for j, p in probs.items())
else:
r = 0
return r
if __name__ == '__main__':
# We create the graph
G = nx.DiGraph()
G.add_nodes_from(range(1, 9))
edges = [(1, 2, 2), (1, 3, 4), (1, 4, 3), (2, 5, 4), (2, 6, 5), (3, 5, 5),
(3, 6, 6), (3, 7, 1), (4, 7, 2), (5, 8, 1), (6, 8, 2), (7, 8, 6)]
G.add_weighted_edges_from(edges, weight='c')
# Create the space object
actions, states, time_horizon = G.nodes(), G.nodes(), 3
ssp_space = SSP_space(actions, states, time_horizon, G)
# Create the reward object
ssp_reward = SSP_reward(ssp_space)
mdp = finiteTime(ssp_space, ssp_reward)
# Solves the MDP and stores its solution
S0 = 1
pol, v = mdp.solve(S0)
# Prints the value of
print(f'The optimal value is {v}')
print('The policy is:', pol, sep='\n')
print(mdp.policy.policy)
| StarcoderdataPython |
3326095 | # Simple NEC remote decode-and-print example
# Prints out the 4-byte code transmitted by NEC remotes
import pulseio
import board
import adafruit_dotstar
import adafruit_irremote
led = adafruit_dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1)
pulsein = pulseio.PulseIn(board.REMOTEIN, maxlen=120, idle_state=True)
decoder = adafruit_irremote.GenericDecode()
# size must match what you are decoding! for NEC use 4
received_code = bytearray(4)
print("Ready for NEC remote input!")
while True:
led[0] = (0, 0, 0) # LED off
pulses = decoder.read_pulses(pulsein)
print("\tHeard", len(pulses), "Pulses:", pulses)
try:
code = decoder.decode_bits(pulses, debug=False)
led[0] = (0, 100, 0) # flash green
print("Decoded:", code)
except adafruit_irremote.IRNECRepeatException: # unusual short code!
led[0] = (100, 100, 0) # flash yellow
print("NEC repeat!")
except adafruit_irremote.IRDecodeException as e: # failed to decode
led[0] = (100, 0, 0) # flash red
print("Failed to decode: ", e.args)
print("----------------------------")
| StarcoderdataPython |
1618816 | import axelrod as axl
from rich import print
from timeit import default_timer as timer
from axelrod import tournament
from axelrod import result_set
TURNS = 200
REPETITIONS = 100
RUN_TYPE = "dev"
player_set = {
"dev_tour": [axl.Cooperator(), axl.Defector(), axl.TitForTat()],
"first_tour": [s() for s in axl.axelrod_first_strategies],
"second_tour": [
s() for s in axl.axelrod_second_strategies
], # this is created by me and can be found in the init file for the axl library; in case of error
"all": [s() for s in axl.all_strategies],
}
def step_run(players, deviation):
tournament_mc = axl.Tournament(
players, turns=TURNS, uniform=True, deviation=deviation, repetitions=REPETITIONS
)
result_set_mc = tournament_mc.play(filename="tournament_mc.csv", processes=4) #
end = timer()
matrix_mc = axl.ResultMatrix(
filename="tournament_mc.csv",
players=players,
repetitions=REPETITIONS,
deviation=deviation,
run_type="montecarlo"
)
winner_matrix = matrix_mc.create()
def main():
### INIT ###
# players = player_set["first_tour"] + player_set["second_tour"]
players = player_set["dev_tour"]
### run basic tour with no dev
tournament_default = axl.Tournament(players, turns=TURNS, repetitions=REPETITIONS)
result_set_default = tournament_default.play(
filename="tournament_default.csv", processes=4
)
matrix_default = axl.ResultMatrix(
filename="tournament_default.csv",
players=players,
repetitions=REPETITIONS,
deviation=None,
run_type="default"
)
winner_matrix = matrix_default.create()
### run basic tour with no dev ###############################
deviation = 100
step = 5
while deviation >= 80:
step_run(players, deviation)
deviation = deviation - step
if __name__ == "__main__":
main()
| StarcoderdataPython |
3315263 | <reponame>bell-bot/audio_adversarial_examples<filename>datasets.py
# -*- coding: future_fstrings -*-
import os
from re import L
from typing import Dict, Tuple
import sys
import logging
import numpy
import regex as re
# import torchaudio.datasets.tedlium as tedlium
import librosa
from Data import tedlium_local as tedlium
import torchaudio
from torch import Tensor
import pandas as pd
from utils import get_git_root
from Preprocessing.pre_processing import resample_audio
######### ------------------ PATHING ------------- ############
"""Specify path to TEDLIUM directory"""
data_paths = os.path.join(get_git_root(os.getcwd()) ,'Data')
DATASET_TEDLIUM_PATH = data_paths
DATASET_MLCOMMONS_PATH = data_paths
KEYWORDS_LINK_CSV_PATH = os.path.join(data_paths, "KeywordPerSample", "keywords.csv")
KEYPHRASES_LINK_CSV_PATH = os.path.join(data_paths, "Keyphrases" , "keyphrases.csv")
LABELS_KEYPHRASES_CSV_PATH = os.path.join(data_paths, "Keyphrases" , "labels.csv")
############# ---------CSV HEADERS --------------################
#TODO! Might be better to have a header called keyword_id, in order to take into account the different varations of keywords and phrases inside the same sample
class KeywordsCSVHeaders:
"""
Represents the fields keywords.csv file
KEYWORD: The keyword linking the two audio files (sample of a TED audio file and an MSWC recording of that keyword)
TED_SAMPLE_ID: Represents the sample id of an audio. In other words, it is a unique id that maps to a segment of a TED audio file.
Hence, this is NOT the same as "talk_id", which represents the id of an entire audio file
TED_DATASET_TYPE: The type of dataset the sample exists in (Train vs Dev vs Test set)
MSWC_ID: The id of the keyword recording
"""
KEYWORD = "Keyword"
TED_SAMPLE_ID= "TEDLIUM_SampleID"
TED_DATASET_TYPE = "TEDLIUM_SET"
MSWC_ID = "MSWC_AudioID"
CSV_header = [KEYWORD, TED_SAMPLE_ID, TED_DATASET_TYPE, MSWC_ID]
class KeyphrasesCSVHeaders:
KEYWORD = "Keyword"
TED_SAMPLE_ID= "TEDLIUM_SampleID"
TED_DATASET_TYPE = "TEDLIUM_SET"
MSWC_ID = "MSWC_AudioID"
KEYWORD_ID = "Word_ID"
CSV_header = [KEYWORD, TED_SAMPLE_ID, TED_DATASET_TYPE, MSWC_ID, KEYWORD_ID]
class LabelsCSVHeaders:
"""
Represents the fields labels.csv file
KEYWORD: The keyword linking the two audio files (sample of a TED audio file and an MSWC recording of that keyword)
TED_SAMPLE_ID: Represents the sample id of an audio. In other words, it is a unique id that maps to a segment of a TED audio file.
Hence, this is NOT the same as "talk_id", which represents the id of an entire audio file
TED_DATASET_TYPE: The type of dataset the sample exists in (Train vs Dev vs Test set)
MSWC_ID: The id of the keyword recording
"""
KEYWORD = "Keyword"
# Keyword_id = "Keyword_id"
TED_SAMPLE_ID= "TEDLIUM_SampleID"
TED_DATASET_TYPE = "TEDLIUM_SET"
TED_TALK_ID = "TED_TALK_ID"
MSWC_ID = "MSWC_AudioID"
START_TIMESTAMP = "start_time"
END_TIMESTAMP = "end_time"
CONFIDENCE = "confidence"
CSV_header = [KEYWORD, TED_SAMPLE_ID,TED_TALK_ID, TED_DATASET_TYPE, MSWC_ID, START_TIMESTAMP, END_TIMESTAMP, CONFIDENCE]
############# --------- DATASETS --------------################
#TODO! Customise for each subset, in speaker-adaptation. Might require changing the metadata
class TEDLIUMCustom(tedlium.TEDLIUM):
"""
Please have a directory with the TEDLIUM dataset downloaded (release-3).
Instance Variables:
self._path:
self._filelist:
self._dict_path:
self._phoneme_dict:
Additional Instance Variables:
self.train_audio_sets
self.dev_audio_sets
self.test_audio_sets
"""
def __init__(self, root=DATASET_TEDLIUM_PATH, release= "release3", subset=None):
super().__init__(root, release=release)
path_to_speaker_adaptation = os.path.join(root, tedlium._RELEASE_CONFIGS[release]["folder_in_archive"], "speaker-adaptation")
train_audio_sets = set(line.strip() for line in open(os.path.join(path_to_speaker_adaptation, "train.lst")))
dev_audio_sets = set(line.strip() for line in open(os.path.join(path_to_speaker_adaptation, "dev.lst")))
test_audio_sets = set(line.strip() for line in open(os.path.join(path_to_speaker_adaptation, "test.lst")))
self.recordings_set_dict = {
"train": train_audio_sets,
"dev": dev_audio_sets,
"test": test_audio_sets
}
def __len__(self) -> int:
"""Get number of items.
Returns:
int: TEDLIUM Dataset Length
"""
return super().__len__()
def _load_audio(self, path: str, start_time: float, end_time: float, sample_rate: int = 16000, to_numpy=True) -> [Tensor, int]:
"""
Returns audio data
Args:
Returns:
"""
waveform, sample_rate = super()._load_audio(path, start_time, end_time, sample_rate)
return (waveform.numpy(), sample_rate) if to_numpy else (waveform , sample_rate)
def __getitem__(self, sampleID: int) -> Dict:
"""Load the n-th sample from the dataset, where n is the audioFileID/fileSampleId
Please note that filesampleID is different from talk_id returned by the function, which denotes the entire recording instead
Args:
AudioFileID (int): The index of the sample to be loaded, which is also termed as the unique ID
Returns:
Dictionary: ``(waveform, sample_rate, transcript, talk_id, speaker_id, identifier, start_time, end_time)``
"""
fileid, line = self._filelist[sampleID]
return self._load_tedlium_item(fileid, line, self._path)
def get_audio_file(self, sampleID:int):
fileid, line = self._filelist[sampleID]
return os.path.join(self._path, "sph", fileid)
def _load_tedlium_item(self, fileid: str, line: int, path: str) -> Dict:
"""Loads a TEDLIUM dataset sample given a file name and corresponding sentence name. Functionality taken from original source code.
----> Custom function returns start time and end time as well
Args:
fileid (str): File id to identify both text and audio files corresponding to the sample
line (int): Line identifier for the sample inside the text file
path (str): Dataset root path
Returns:
Dictionary
(Tensor, int, str, int, int, int):
``(waveform, sample_rate, transcript, talk_id, speaker_id, identifier, start_time, end_time)``
"""
transcript_path = os.path.join(path, "stm", fileid)
with open(transcript_path + ".stm") as f:
transcript = f.readlines()[line]
talk_id, _, speaker_id, start_time, end_time, identifier, transcript = transcript.split(" ", 6)
wave_path = os.path.join(path, "sph", fileid)
waveform, sample_rate = self._load_audio(wave_path + self._ext_audio, start_time=start_time, end_time=end_time)
results_dict = {
"waveform": waveform,
"sample_rate": sample_rate,
"transcript": transcript,
"talk_id": talk_id,
"speaker_id":speaker_id ,
"identifier": identifier ,
"start_time": float(start_time),
"end_time": float(end_time),
}
return results_dict
class MultiLingualSpokenWordsEnglish():
MLCOMMONS_FOLDER_NAME = "Multilingual_Spoken_Words"
AUDIO_DIR_NAME="audio"
SPLITS_DIR_NAME="splits"
ALIGNMENTS_DIR_NAME="alignments"
def raise_directory_error(self):
raise RuntimeError(
"Please configure the path to the Spoken Keywords Dataset, with the directory name \"{}\", containing the three subfolders:".format(self.MLCOMMONS_FOLDER_NAME) \
+ "\n" + \
"\"{}\" for audio, \"{}\" for splits directory, and \"{}\" for alignemnts directory".format(self.AUDIO_DIR_NAME,self.SPLITS_DIR_NAME,self.ALIGNMENTS_DIR_NAME)
)
#TODO! Accept 4 kinds of values: Train vs test vs Dev vs "all"
def __init__(self, root=DATASET_MLCOMMONS_PATH, read_splits_file=False, subset="train") -> None:
"""
Loads the MLCommons MultiLingual dataset (English version).
read_splits_file is used to generate the keywords csv file
"""
if self.MLCOMMONS_FOLDER_NAME not in os.listdir(root):
self.raise_directory_error()
self._path = os.path.join(root, self.MLCOMMONS_FOLDER_NAME)
#Initialise the folder names into dictionaries
self._subfolder_names_dict = {
"audio" : self.AUDIO_DIR_NAME,
"splits" : self.SPLITS_DIR_NAME,
"alignments": self.ALIGNMENTS_DIR_NAME,
}
#Check if all three subfolders are in the directory. Exit if they are not all there
current_subfolders = os.listdir(self._path)
if not all([subfolder_name in current_subfolders for subfolder_name in self._subfolder_names_dict.values()]):
self.raise_directory_error()
#Retrieve the splits csv file from MSWC folder
if read_splits_file:
self._path_to_splits = os.path.join(self._path, self._subfolder_names_dict["splits"])
self.splits_df = pd.read_csv(os.path.join(self._path_to_splits, "en_splits.csv"))
if subset == "train":
self.splits_df = self.splits_df[self.splits_df["SET"] == "TRAIN"]
elif subset == "dev":
self.splits_df = self.splits_df[self.splits_df["SET"] == "VALID"]
else:
self.splits_df = self.splits_df[self.splits_df["SET"] == "TEST"]
#Extra step to preprocesses words to one form of apostrophe
self.splits_df["WORD"].replace("`|’", "'", regex=True, inplace=True)
#Retrieve the words that have been validated as True, affirming that the spoken audio matches the transcription
self.splits_df = self.splits_df[self.splits_df["VALID"] == True]
#Retrieve the keywords in the dataset
self.keywords = set(self.splits_df["WORD"].unique())
def _load_audio(self, path_to_audio, to_numpy=True):
"""Loads audio data from file given file path
Returns:
waveform: Tensor / np.array
sample_rate: int
"""
# waveform, sample_rate = torchaudio.load(path_to_audio)
# return (waveform.numpy(), sample_rate) if to_numpy else (waveform , sample_rate)
waveform, sample_rate = librosa.load(path_to_audio)
return (waveform, sample_rate) if to_numpy else (waveform , sample_rate)
def __getitem__(self, MSWC_AudioID) -> Dict:
"""Retrieves sample data from file given Audio ID
"""
path_to_audio = os.path.join(self._path,self.AUDIO_DIR_NAME ,"en", "clips", MSWC_AudioID)
waveform, sample_rate= self._load_audio(path_to_audio)
results_dict = {
"waveform": waveform,
"sample_rate": sample_rate ,
"MSWC_AudioID": MSWC_AudioID
}
return results_dict
#TODO! Create mapping between talk ids and datatype set (i.e not just sample mapping). Use the defined train_audio_sets, dev_audio_sets, test_audio_sets to help. Might be better to implement this in the TEDLIUMCustom instead of here.
class CTRLF_DatasetWrapper:
COLS_OUTPUT= ['TED_waveform', 'TED_sample_rate', 'TED_transcript', 'TED_talk_id', 'TED_start_time', 'TED_end_time', 'MSWC_audio_waveform', 'MSWC_sample_rate', 'MSWC_ID', 'keyword', 'keyword_start_time', 'keyword_end_time', 'confidence']
"""
Main class wrapper for both TEDLIUM dataset and MSWC dataset. Using the labels csv file, use the functions to retrieve audio samples and their corresponding keywords that was linked to.
Args:
single_keywords_label: Represents a toggle which defines what types of labels we are dealing with.
------------> NOTE: This was added for the time being as handling of multiple keywords may require some changes in the implementation of the code here and elsewhere
"""
def __init__(self,path_to_labels_csv=LABELS_KEYPHRASES_CSV_PATH, path_to_TED=DATASET_TEDLIUM_PATH, path_to_MSWC=DATASET_MLCOMMONS_PATH, single_keywords_labels=True):
self._path_to_TED = path_to_TED
self._path_to_MSWC = path_to_MSWC
self.single_keywords_labels = single_keywords_labels
#Initialise keyword dataframe
self.labels_df = pd.read_csv(path_to_labels_csv)
#Initialise Ted talk dataset
self.TED = TEDLIUMCustom(root=path_to_TED,release="release3")
#Initialise Keyword dataset
self.MSWC = MultiLingualSpokenWordsEnglish(root=path_to_MSWC)
def get(self, TEDSample_id: int, sampling_rate=16000):
"""
Given Ted Sample ID and the dataset type, return three separate corresponding dictionaries.
Returns: DataFrame
Headers:
['TED_waveform', 'TED_sample_rate', 'TED_transcript', 'TED_talk_id', 'TED_start_time', 'TED_end_time', 'MSWC_audio_waveform', 'MSWC_sample_rate', 'MSWC_ID', 'keyword', 'keyword_start_time', 'keyword_end_time', 'confidence']
"""
output_df = pd.DataFrame(columns=self.COLS_OUTPUT)
TED_results_dict = self.TED.__getitem__(TEDSample_id)
TEDSample_id = str(TEDSample_id) #TODO: Return pandas in appropriate form
label_rows = self.labels_df[self.labels_df[LabelsCSVHeaders.TED_SAMPLE_ID] == int(TEDSample_id)].reset_index()
if len(label_rows) == 0:
print("*" * 80)
print("NOT FOUND: \nSample TED Audio ID {} does not exist in the csv file".format(TEDSample_id))
print("If you think it should exist, please check the data types you are comparing with (i.e str vs int) and the csv file itself")
print("*" * 80)
output_rows = []
for i in range(0,len(label_rows)):
MSWC_results_dict = self.MSWC.__getitem__(label_rows[LabelsCSVHeaders.MSWC_ID].iloc[i])
#Resample Audio files into same sampling rate
TED_results_dict, MSWC_results_dict = self.resample_both_audio_files(TED_results_dict, MSWC_results_dict)
#Create new row
new_row = [ \
TED_results_dict["waveform"], \
TED_results_dict["sample_rate"],\
TED_results_dict["transcript"],\
TED_results_dict["talk_id"],\
TED_results_dict["start_time"],\
TED_results_dict["end_time"],\
MSWC_results_dict["waveform"],\
MSWC_results_dict["sample_rate"],\
label_rows[LabelsCSVHeaders.MSWC_ID].iloc[i],\
label_rows[LabelsCSVHeaders.KEYWORD].iloc[i],\
label_rows[LabelsCSVHeaders.START_TIMESTAMP].iloc[i],\
label_rows[LabelsCSVHeaders.END_TIMESTAMP].iloc[i], \
label_rows[LabelsCSVHeaders.CONFIDENCE].iloc[i], \
]
output_rows.append(new_row)
output_df = pd.DataFrame(data=output_rows, columns=self.COLS_OUTPUT)
return output_df
#TODO: Return more results like speaker_id, etc..
def get_verbose(TEDSample_id: int, sampling_rate = 16000):
pass
# Helper function: Preprocessing step to ensure both audio files are on the same sampling rate
def resample_both_audio_files(self, TED_results_dict, MSWC_results_dict, target_rate=16000):
TED_results_dict["waveform"] = resample_audio(TED_results_dict["waveform"], TED_results_dict["sample_rate"], target_rate=target_rate)
TED_results_dict["sample_rate"] = target_rate
MSWC_results_dict["waveform"] = resample_audio(MSWC_results_dict["waveform"], MSWC_results_dict["sample_rate"], target_rate=target_rate)
MSWC_results_dict["sample_rate"] = target_rate
return TED_results_dict, MSWC_results_dict
#Retrieve all the available "samples" of one specific audio file
def get_samples_given_talk_id(self, TED_talk_id, sort=False):
samples_df = self.labels_df[self.labels_df[LabelsCSVHeaders.TED_TALK_ID] == int(TED_talk_id)]
if sort:
samples_df.sort_values(by=['col1'], inplace=True)
return samples_df
if __name__== "__main__":
####### Testing CTRLF_DatasetWrapper
print("-"*20)
print("CTRL_F Wrapper")
x= CTRLF_DatasetWrapper(path_to_labels_csv = './Data/KeywordPerSample/labels_keywords.csv')
output_df = x.get(4)
print(output_df)
####### Testing TEDLIUM
print("-"*20)
print("Tedlium")
y = TEDLIUMCustom()
print(y.__len__())
print(y.__getitem__(1))
####### Testing MultiLingualSpokenWordsEnglish
print("-"*20)
print("Keyword Dataset")
# z = MultiLingualSpokenWordsEnglish(read_splits_file=True)
z= MultiLingualSpokenWordsEnglish(read_splits_file=False)
print(z.__getitem__("aachen/common_voice_en_20127845.opus"))
| StarcoderdataPython |
3330423 | <reponame>berdosi/Workflow-Analyzer
"""
Generate documentation for a UiPath project based on the annotations
within the files.
Create a folder deliverables/documentation, renders the contents in HTML.
"""
import logging
import os
from html import escape as e
from itertools import tee
from typing import Iterable, Optional
from analyzer.analyze.project import Project
from analyzer.analyze.workflow import Workflow, WorkflowArgument
TARGET_DIR = './deliverables/documentation'
TARGET_FILE = 'Documentation.html'
OUTPUT_TEMPLATE = """<!doctype html>
<html>
<head>
<meta charset='utf-8'>
<meta name="viewport" content="width=device-width">
<title>{0} - Documentation</title>
<style>
.annotation {{
white-space: pre-wrap;
}}
</style>
</head>
<body>
<header>
<h1>{0}</h1>
<p>Version: {1}</p>
<p class='description'>{2}</p>
<nav>
<dl>{5}
</dl>
</nav>
</header>
<main>
{3}
{4}
</main>
</body>
</html>
"""
OUTPUT_TEMPLATE_TOC_ITEM = """
<dt><a href="#{0}">{1}</a></dt>
<dd>{2}</dd>
"""
OUTPUT_TEMPLATE_WF = """<h2 id="{4}">{0}</h2>
<p>File name: {1}</p>
<p class='annotation'>{2}</p>
<h3>Arguments</h3>
<table>
<thead>
<tr>
<th>Name</th>
<th>Direction</th>
<th>Type</th>
<th>Annotation</th>
</tr>
</thead>
<tbody>
{3}
</tbody>
</table>
"""
OUTPUT_TEMPLATE_ARG = """
<tr>
<td>{0}</td>
<td>{1}</td>
<td>{2}</td>
<td class='annotation'>{3}</td>
</tr>
"""
OUTPUT_TEMPLATE_LINE = """<p>{0}</p>
"""
class Documentation():
"""Build HTML documentation from a Project and its workflows."""
@staticmethod
def make_directory() -> None:
"""If doesn't yet exist, create an output directory."""
if not os.path.isdir(TARGET_DIR):
os.mkdir(TARGET_DIR)
@staticmethod
def render_arguments(arguments: Iterable[WorkflowArgument]) -> str:
"""Generate a document fragment for the arguments of the workflow."""
render_argument = lambda arg: OUTPUT_TEMPLATE_ARG.format(
e(arg.name),
e(arg.direction),
e(arg.type),
e(arg.annotation if arg.annotation is not None else ""))
return str.join('', map(render_argument, arguments))
def get_workflow_relative_path(self, workflow: Workflow) -> str:
"""Get the path of the workflow file relative to project.json."""
if workflow is not None:
return workflow.file_path[len(self.project.project_directory):]
return ""
def get_workflow_id(self, workflow: Workflow) -> str:
"""Get an ID that can be used for links in the table of contents."""
relative_path = self.get_workflow_relative_path(workflow)
return relative_path.replace('-','--').replace('/','-').replace('.','_')
def render_workflow_documentation(self, workflow: Workflow) -> str:
"""Generate a document fragment containing documentation for a single workflow."""
if workflow is not None:
annotation = workflow.get_annotation() if workflow.get_annotation() is not None else ""
return OUTPUT_TEMPLATE_WF.format(
e(workflow.display_name or ""),
e(self.get_workflow_relative_path(workflow)),
e(annotation),
self.render_arguments(workflow.get_arguments()),
self.get_workflow_id(workflow))
return ""
def render_documentation_toc(self) -> str:
"""Render table of contents with links to sections."""
return self.render_documentation_toc_item(self.get_main_workflow()) + str.join(
'',
[ self.render_documentation_toc_item(wf)
for wf in self.__workflow_toc])
def render_documentation_toc_item(self, workflow: Workflow) -> str:
"""Render one line of the table of contents."""
if workflow is None:
return ""
display_name = workflow.display_name
logging.info(workflow.file_path)
return OUTPUT_TEMPLATE_TOC_ITEM.format(
self.get_workflow_id(workflow),
display_name or "",
self.get_workflow_relative_path(workflow))
def get_main_workflow(self) -> Optional[Workflow]:
"""Find the full path of the main workflow from the relative path found in project.json.
Return None in case there is none - e.g. in case of a library."""
if self.project.main is not None:
main_workflow_path = os.path.normpath(os.path.join(
self.project.project_directory, self.project.main))
for workflow_file in self.project.workflow_files:
if main_workflow_path == os.path.normpath(workflow_file.file_path):
return workflow_file
return None
def build_documentation(self):
"""Generate the documentation from the project."""
self.__workflow_toc, self.__workflow_doc = tee(self.project.workflow_files)
with open(self.target_path, 'w') as documentation_file:
main_workfow: Workflow = self.get_main_workflow()
documentation_file.write(OUTPUT_TEMPLATE.format(
e(self.project.name),
e(self.project.version),
e(self.project.description),
self.render_workflow_documentation(main_workfow),
str.join(
'',
[ self.render_workflow_documentation(wf)
for wf in self.__workflow_doc
if wf.file_path != main_workfow.file_path]),
self.render_documentation_toc()
))
logging.info(self.render_documentation_toc())
def __init__(self, project: Project):
logging.info(project.name)
self.project = project
self.make_directory()
self.target_path = os.path.join(TARGET_DIR, TARGET_FILE)
self.build_documentation()
| StarcoderdataPython |
151482 | import os
import torch
import numpy as np
import json
import dgl
import constants
def read_partitions_file(part_file):
"""
Utility method to read metis partitions, which is the output of
pm_dglpart2
Parameters:
-----------
part_file : string
file name which is the output of metis partitioning
algorithm (pm_dglpart2, in the METIS installation).
This function expects each line in `part_file` to be formatted as
<global_nid> <part_id>
and the contents of this file are sorted by <global_nid>.
Returns:
--------
numpy array
array of part_ids and the idx is the <global_nid>
"""
partitions_map = np.loadtxt(part_file, delimiter=' ', dtype=np.int64)
#as a precaution sort the lines based on the <global_nid>
partitions_map = partitions_map[partitions_map[:,0].argsort()]
return partitions_map[:,1]
def read_json(json_file):
"""
Utility method to read a json file schema
Parameters:
-----------
json_file : string
file name for the json schema
Returns:
--------
dictionary, as serialized in the json_file
"""
with open(json_file) as schema:
val = json.load(schema)
return val
def get_node_types(schema):
"""
Utility method to extract node_typename -> node_type mappings
as defined by the input schema
Parameters:
-----------
schema : dictionary
Input schema from which the node_typename -> node_type
dictionary is created.
Returns:
--------
dictionary, list
dictionary with ntype <-> type_nid mappings
list of ntype strings
"""
#Get the node_id ranges from the schema
global_nid_ranges = schema['nid']
global_nid_ranges = {key: np.array(global_nid_ranges[key]).reshape(1,2)
for key in global_nid_ranges}
#Create an array with the starting id for each node_type and sort
ntypes = [(key, global_nid_ranges[key][0,0]) for key in global_nid_ranges]
ntypes.sort(key=lambda e: e[1])
#Create node_typename -> node_type dictionary
ntypes = [e[0] for e in ntypes]
ntypes_map = {e: i for i, e in enumerate(ntypes)}
return ntypes_map, ntypes
def write_metadata_json(metadata_list, output_dir, graph_name):
"""
Merge json schema's from each of the rank's on rank-0.
This utility function, to be used on rank-0, to create aggregated json file.
Parameters:
-----------
metadata_list : list of json (dictionaries)
a list of json dictionaries to merge on rank-0
output_dir : string
output directory path in which results are stored (as a json file)
graph-name : string
a string specifying the graph name
"""
#Initialize global metadata
graph_metadata = {}
#Merge global_edge_ids from each json object in the input list
edge_map = {}
x = metadata_list[0]["edge_map"]
for k in x:
edge_map[k] = []
for idx in range(len(metadata_list)):
edge_map[k].append(metadata_list[idx]["edge_map"][k][0])
graph_metadata["edge_map"] = edge_map
graph_metadata["etypes"] = metadata_list[0]["etypes"]
graph_metadata["graph_name"] = metadata_list[0]["graph_name"]
graph_metadata["halo_hops"] = metadata_list[0]["halo_hops"]
#Merge global_nodeids from each of json object in the input list
node_map = {}
x = metadata_list[0]["node_map"]
for k in x:
node_map[k] = []
for idx in range(len(metadata_list)):
node_map[k].append(metadata_list[idx]["node_map"][k][0])
graph_metadata["node_map"] = node_map
graph_metadata["ntypes"] = metadata_list[0]["ntypes"]
graph_metadata["num_edges"] = sum([metadata_list[i]["num_edges"] for i in range(len(metadata_list))])
graph_metadata["num_nodes"] = sum([metadata_list[i]["num_nodes"] for i in range(len(metadata_list))])
graph_metadata["num_parts"] = metadata_list[0]["num_parts"]
graph_metadata["part_method"] = metadata_list[0]["part_method"]
for i in range(len(metadata_list)):
graph_metadata["part-{}".format(i)] = metadata_list[i]["part-{}".format(i)]
with open('{}/{}.json'.format(output_dir, graph_name), 'w') as outfile:
json.dump(graph_metadata, outfile, sort_keys=True, indent=4)
def augment_edge_data(edge_data, part_ids):
"""
Add partition-id (rank which owns an edge) column to the edge_data.
Parameters:
-----------
edge_data : numpy ndarray
Edge information as read from the xxx_edges.txt file
part_ids : numpy array
array of part_ids indexed by global_nid
"""
edge_data[constants.OWNER_PROCESS] = part_ids[edge_data[constants.GLOBAL_DST_ID]]
def augment_node_data(node_data, part_ids):
"""
Utility function to add auxilary columns to the node_data numpy ndarray.
Parameters:
-----------
node_data : numpy ndarray
Node information as read from xxx_nodes.txt file
part_ids : numpy array
array of part_ids indexed by global_nid
"""
#add global_nids to the node_data
global_nids = np.arange(len(node_data[constants.GLOBAL_TYPE_NID]), dtype=np.int64)
node_data[constants.GLOBAL_NID] = global_nids
#add owner proc_ids to the node_data
proc_ids = part_ids[node_data[constants.GLOBAL_NID]]
node_data[constants.OWNER_PROCESS] = proc_ids
def read_nodes_file(nodes_file):
"""
Utility function to read xxx_nodes.txt file
Parameters:
-----------
nodesfile : string
Graph file for nodes in the input graph
Returns:
--------
dictionary
Nodes data stored in dictionary where keys are column names
and values are the columns from the numpy ndarray as read from the
xxx_nodes.txt file
"""
if nodes_file == "" or nodes_file == None:
return None
# Read the file from here.
# Assuming the nodes file is a numpy file
# nodes.txt file is of the following format
# <node_type> <weight1> <weight2> <weight3> <weight4> <global_type_nid> <attributes>
# For the ogb-mag dataset, nodes.txt is of the above format.
nodes_data = np.loadtxt(nodes_file, delimiter=' ', dtype='int64')
nodes_datadict = {}
nodes_datadict[constants.NTYPE_ID] = nodes_data[:,0]
nodes_datadict[constants.GLOBAL_TYPE_NID] = nodes_data[:,5]
return nodes_datadict
def read_edges_file(edge_file, edge_data_dict):
"""
Utility function to read xxx_edges.txt file
Parameters:
-----------
edge_file : string
Graph file for edges in the input graph
Returns:
--------
dictionary
edge data as read from xxx_edges.txt file and columns are stored
in a dictionary with key-value pairs as column-names and column-data.
"""
if edge_file == "" or edge_file == None:
return None
#Read the file from here.
#<global_src_id> <global_dst_id> <type_eid> <etype> <attributes>
# global_src_id -- global idx for the source node ... line # in the graph_nodes.txt
# global_dst_id -- global idx for the destination id node ... line # in the graph_nodes.txt
edge_data = np.loadtxt(edge_file , delimiter=' ', dtype = 'int64')
if (edge_data_dict == None):
edge_data_dict = {}
edge_data_dict[constants.GLOBAL_SRC_ID] = edge_data[:,0]
edge_data_dict[constants.GLOBAL_DST_ID] = edge_data[:,1]
edge_data_dict[constants.GLOBAL_TYPE_EID] = edge_data[:,2]
edge_data_dict[constants.ETYPE_ID] = edge_data[:,3]
else:
edge_data_dict[constants.GLOBAL_SRC_ID] = \
np.concatenate((edge_data_dict[constants.GLOBAL_SRC_ID], edge_data[:,0]))
edge_data_dict[constants.GLOBAL_DST_ID] = \
np.concatenate((edge_data_dict[constants.GLOBAL_DST_ID], edge_data[:,1]))
edge_data_dict[constants.GLOBAL_TYPE_EID] = \
np.concatenate((edge_data_dict[constants.GLOBAL_TYPE_EID], edge_data[:,2]))
edge_data_dict[constants.ETYPE_ID] = \
np.concatenate((edge_data_dict[constants.ETYPE_ID], edge_data[:,3]))
return edge_data_dict
def read_node_features_file(nodes_features_file):
"""
Utility function to load tensors from a file
Parameters:
-----------
nodes_features_file : string
Features file for nodes in the graph
Returns:
--------
dictionary
mappings between ntype and list of features
"""
node_features = dgl.data.utils.load_tensors(nodes_features_file, False)
return node_features
def read_edge_features_file(edge_features_file):
"""
Utility function to load tensors from a file
Parameters:
-----------
edge_features_file : string
Features file for edges in the graph
Returns:
--------
dictionary
mappings between etype and list of features
"""
edge_features = dgl.data.utils.load_tensors(edge_features_file, True)
return edge_features
def write_node_features(node_features, node_file):
"""
Utility function to serialize node_features in node_file file
Parameters:
-----------
node_features : dictionary
dictionary storing ntype <-> list of features
node_file : string
File in which the node information is serialized
"""
dgl.data.utils.save_tensors(node_file, node_features)
def write_edge_features(edge_features, edge_file):
"""
Utility function to serialize edge_features in edge_file file
Parameters:
-----------
edge_features : dictionary
dictionary storing etype <-> list of features
edge_file : string
File in which the edge information is serialized
"""
dgl.data.utils.save_tensors(edge_file, edge_features)
def write_graph_dgl(graph_file, graph_obj):
"""
Utility function to serialize graph dgl objects
Parameters:
-----------
graph_obj : dgl graph object
graph dgl object, as created in convert_partition.py, which is to be serialized
graph_file : string
File name in which graph object is serialized
"""
dgl.save_graphs(graph_file, [graph_obj])
def write_dgl_objects(graph_obj, node_features, edge_features, output_dir, part_id):
"""
Wrapper function to create dgl objects for graph, node-features and edge-features
graph_obj : dgl object
graph dgl object as created in convert_partition.py file
node_features : dgl object
Tensor data for node features
edge_features : dgl object
Tensor data for edge features
"""
part_dir = output_dir + '/part' + str(part_id)
os.makedirs(part_dir, exist_ok=True)
write_graph_dgl(os.path.join(part_dir ,'part'+str(part_id)), graph_obj)
if node_features != None:
write_node_features(node_features, os.path.join(part_dir, "node_feat.dgl"))
if (edge_features != None):
write_edge_features(edge_features, os.path.join(part_dir, "edge_feat.dgl"))
| StarcoderdataPython |
4816873 | n1 = int(input('Um valor:'))
n2 = int(input('Outro valor:'))
soma = n1 + n2
multiplicacao = n1 * n2
divisao = n1 / n2
divisaoint = n1 // n2
potencia = n1 ** n2
print('A soma é {} , \no produto é {} e a divisão é {:.3f}'.format(soma, multiplicacao, divisao), end=' ')
print('Divisão inteira {} e potencia {}'.format(divisaoint, potencia)) | StarcoderdataPython |
3383555 | import contextlib
import ctypes
import sys
import time
import sdl2
import sdl2.ext
import sdl2.examples.opengl
import skia
from OpenGL import GL
WIDTH, HEIGHT = 1280, 720
fps_font = None
def main_loop(state):
global fps_font
if sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO) != 0:
print(sdl2.SDL_GetError())
return -1
window = sdl2.SDL_CreateWindow(b"WindowServer",
sdl2.SDL_WINDOWPOS_CENTERED,
sdl2.SDL_WINDOWPOS_CENTERED, WIDTH, HEIGHT,
sdl2.SDL_WINDOW_OPENGL | sdl2.SDL_WINDOW_RESIZABLE)
if not window:
print(sdl2.SDL_GetError())
return -1
context = sdl2.SDL_GL_CreateContext(window)
# sdl2.SDL_GL_SetSwapInterval(0)
event = sdl2.SDL_Event()
running = True
last_measurement = time.time()
fps_counter = 0
compute_frame_times = 0.0
draw_frame_times = 0.0
frame_counter = 0
fps_str = ''
gl_context = skia.GrDirectContext.MakeGL()
assert context is not None
while running:
with skia_surface(window, gl_context) as surface: # type: skia.Surface
resized = False
state.resize(surface.width(), surface.height())
while running and not resized:
with surface as canvas: # type: skia.Canvas
while sdl2.SDL_PollEvent(ctypes.byref(event)) != 0:
if event.type == sdl2.SDL_QUIT:
running = False
break
elif event.type == sdl2.SDL_MOUSEBUTTONDOWN:
state.handle_mouse_down(event.button.x, event.button.y)
elif event.type == sdl2.SDL_MOUSEWHEEL:
x = ctypes.c_int()
y = ctypes.c_int()
sdl2.SDL_GetMouseState(x, y)
state.handle_scroll(x.value, y.value, event.wheel.x, event.wheel.y)
elif event.type == sdl2.SDL_WINDOWEVENT:
if event.window.event == sdl2.SDL_WINDOWEVENT_RESIZED:
# w, h = event.window.data1, event.window.data2
# print(f"Resized to {w} x {h}")
# state.resize(event.window.data1, event.window.data2)
resized = True
start = time.time()
state.draw(canvas)
compute_frame_times += time.time() - start
# Draw FPS meter
fps_paint = skia.Paint(skia.Color(255, 255, 255, 60))
if fps_font is None:
fps_font = skia.Font(skia.Typeface('Cantarell'), 12)
canvas.drawString(fps_str, 6, 16, fps_font, fps_paint)
canvas.flush()
draw_frame_times += time.time() - start
frame_counter += 1
fps_counter += 1
elapsed = time.time() - last_measurement
if elapsed > 0.5:
fps = fps_counter / elapsed
c_ft = compute_frame_times / frame_counter * 1000
d_ft = draw_frame_times / frame_counter * 1000
fps_str = f'FPS: {fps:.01f} C.FT: {c_ft:.02f}ms D.FT: {d_ft:.02f}ms'
fps_counter = 0
last_measurement = time.time()
sdl2.SDL_GL_SwapWindow(window)
sdl2.SDL_Delay(1)
gl_context.abandonContext()
sdl2.SDL_GL_DeleteContext(context)
sdl2.SDL_DestroyWindow(window)
sdl2.SDL_Quit()
return 0
@contextlib.contextmanager
def skia_surface(window: sdl2.SDL_Window, gl_context):
w = ctypes.c_int()
h = ctypes.c_int()
sdl2.SDL_GetWindowSize(window, w, h)
backend_render_target = skia.GrBackendRenderTarget(
w.value,
h.value,
0, # sampleCnt
0, # stencilBits
skia.GrGLFramebufferInfo(0, GL.GL_RGBA8))
surface = skia.Surface.MakeFromBackendRenderTarget(
gl_context, backend_render_target, skia.kBottomLeft_GrSurfaceOrigin,
skia.kRGBA_8888_ColorType, skia.ColorSpace.MakeSRGB())
assert surface is not None
yield surface
| StarcoderdataPython |
3297714 | <reponame>BrianLusina/PyCharm
from .enums import VehicleSize
from .parking_spot import ParkingSpot
from .vehicle import Vehicle
class Car(Vehicle):
def __init__(self, license_plate: str):
super().__init__(
vehicle_size=VehicleSize.COMPACT, license_plate=license_plate, spot_size=1
)
def can_fit_in_spot(self, spot: ParkingSpot) -> bool:
return (
True
if (
spot.spot_size == VehicleSize.LARGE
or spot.spot_size == VehicleSize.Compact
)
else False
)
| StarcoderdataPython |
115579 | <reponame>rpmoseley/clubadmin
'''
This module provides the support for the Options table, and enables the application
to work with a simple mapping given the option text or number without having to access
the underlying database information directly.
'''
import apsw
from ...config import configdb
# Define the default set of options for the program
_default_options = {
'gender' : ('Male', 'Female', 'Other'),
'member' : ('Normal', 'Life', 'New', 'Honourable', 'Banned', 'In-arrears', 'Inactive'),
}
def load_options():
'''Load the Options table and create a mapping for each of the areas found
within the table enabling the translation of internal integers into the
external textual strings that are used by the application'''
mapping = dict()
try:
conn = apsw.connection(configdb.options)
rowset = conn.cursor().execute('select * from Options')
print(rowset.getdescription())
#TODO: Complete this function
except:
mapping = _default_options
return mapping
| StarcoderdataPython |
1601414 | #!/usr/bin/python3
import requests
import math
import os
import json
import threading
import sys
from mutagen.id3 import ID3, APIC, TIT2, TPE1, COMM
from tenacity import retry, stop_after_attempt
from requests import get, head
import platform
media_id = input('media_id:')
ng_str = r'\/:*?"<>|' #win特供文件命名规则
translate_str = r"¦¦:x?'《》¦" #不满意重命名的用户改这里
trantab = str.maketrans(ng_str,translate_str)
#同样是百度的代码
#链接:https://blog.csdn.net/weixin_38587484/article/details/97802917
def SetMp3Info(path, info):
songFile = ID3(path)
songFile['APIC'] = APIC( # 插入封面
encoding=3,
mime='image/png',
type=3,
desc=u'Cover',
data=info['picData']
)
songFile['TIT2'] = TIT2( # 插入歌名
encoding=3,
text=info['title']
)
songFile['TXXX'] = COMM( # 插入详细信息
encoding=3,
text=info['desc']
)
songFile['TPE1'] = TPE1( # 插入第一演奏家、歌手、等
encoding=3,
text=info['artist']
)
songFile
songFile.save()
print('开始处理')
def get_video_list(media_id):
media_id = str(media_id)
print('获取收藏夹数据')
like_list_info = requests.get(f'https://api.bilibili.com/x/v3/fav/resource/list?media_id={media_id}&pn=1&ps=20&jsonp=jsonp').json()
video_count = int(like_list_info.get('data').get('info').get('media_count'))
page_count = math.ceil(video_count/20)
page = 1
like_list = []
while True:
medias = requests.get(f'https://api.bilibili.com/x/v3/fav/resource/list?media_id={media_id}&pn={page}&ps=20&jsonp=jsonp').json().get('data').get('medias')
for i in medias:
like_list.append(i.get('bvid'))
if page == page_count:
break
else:
page = page + 1
return like_list
def get_like_list_title(media_id):
media_id = str(media_id)
print('获取收藏夹标题')
like_list_info = requests.get(f'https://api.bilibili.com/x/v3/fav/resource/list?media_id={media_id}&pn=1&ps=20&jsonp=jsonp').json()
title = like_list_info.get('data').get('info').get('title')
return title
def get_video_info(bvid):
video_info = requests.get(f'http://api.bilibili.com/x/web-interface/view?bvid={bvid}').json()
title = video_info.get('data').get('title')
pic = video_info.get('data').get('pic')
owner = video_info.get('data').get('owner').get('name')
desc = video_info.get('data').get('desc')
pages_cid = []
pages_title = {}
for i in video_info.get('data').get('pages'):
pages_cid.append(i.get('cid'))
if i.get('part') != '':
pages_title[i.get('cid')] = i.get('part').translate(trantab)
else:
pages_title[i.get('cid')] = title.translate(trantab)
return {'title':title,'pic':pic,'pages_cid':pages_cid,'pages_title':pages_title,'owner':owner,'desc':desc}
@retry(stop=stop_after_attempt(1))
def download_video(bvid,cid,like_list_title,mthead):
cid = str(cid)
info = get_video_info(bvid)
print(f'获取视频数据({bvid})')
video_download_info = requests.get(f'http://api.bilibili.com/x/player/playurl?bvid={bvid}&cid={cid}').json()
video_download_url = []
for i in video_download_info.get('data').get('durl'):
video_download_url.append(i.get('url'))
n = 1
title = info.get('title').translate(trantab)
page_title = info.get('pages_title').get(int(cid))
page_num = int(info.get('pages_cid').index(int(cid)))+1
for i in video_download_url:
print(f'正在下载:{title}-{page_title}-{page_num}')
if mthead == True:
if platform.system() == 'Windows':
os.system(f'aria2c.exe "{i}" -d "tmp" -s16 -x16 -k1M -j16 -o "tmp_{n}.flv" --referer "https://www.bilibili.com" -U "my-app/0.0.1" --file-allocation=none')
else:
os.system(f'aria2c "{i}" -d "tmp" -s16 -x16 -k1M -j16 -o "tmp_{n}.flv" --referer "https://www.bilibili.com" -U "my-app/0.0.1" --file-allocation=none')
n = n + 1
else:
video = requests.get(i,headers={'user-agent': 'my-app/0.0.1', 'referer': 'https://www.bilibili.com'}).content
video_file = open(f'tmp/tmp_{n}.flv','wb')
video_file.write(video)
video_file.close()
n = n + 1
video_part_list = os.listdir('tmp')
video_part_list_str = ''
for i in video_part_list:
video_part_list_str = video_part_list_str + "file '" + i +"'\n"
open('tmp/filename.txt','w').write(video_part_list_str)
print('转换中...')
if platform.system() == 'Windows':
os.system('ffmpeg.exe -f concat -i tmp/filename.txt -c copy tmp/output.aac')
else:
os.system('ffmpeg -f concat -i tmp/filename.txt -c copy tmp/output.aac')
path = f'download/{like_list_title}'
try:
os.makedirs(f'download/{like_list_title}')
except:
pass
if platform.system() == 'Windows':
os.system(f'ffmpeg.exe -i tmp/output.aac {path}/output.mp3')
else:
os.system(f'ffmpeg -i tmp/output.aac {path}/output.mp3')
pic_data = requests.get(info.get('pic')).content
artist = info.get('owner')
desc = info.get('desc')
media_info ={'picData': pic_data, 'title': title, 'artist': artist, 'desc': desc}
try:
if len(info.get('pages_cid')) != 1:
os.rename(f'{path}/output.mp3',f'{path}/{title}-{page_title}-{page_num}.mp3')
songPath = f'{path}/{title}-{page_title}-{page_num}.mp3'
else:
os.rename(f'{path}/output.mp3', f'{path}/{title}.mp3')
songPath = f'{path}/{title}.mp3'
SetMp3Info(songPath, media_info)
print('写入ID3Tag...')
except:
os.remove(f'{path}/output.mp3')
already_list = json.loads(open(f'download/{like_list_title_get}/info.json', 'r').read())
already_list.get('info').append(bvid)
for i in os.listdir('tmp'):
os.remove(f'tmp/{i}')
open(f'{path}/info.json','w').write(json.dumps(already_list))
like_list = get_video_list(media_id)
like_list_title_get = get_like_list_title(media_id)
try:
os.makedirs('download')
os.makedirs('tmp')
except:
pass
try:
for i in os.listdir('tmp'):
os.remove(f'tmp/{i}')
except:
pass
try:
info = open(f'download/{like_list_title_get}/info.json','r').read()
already_list_file = json.loads(info)
for i in already_list_file.get('info'):
try:
like_list.remove(str(i))
except:
pass
except:
try:
os.makedirs(f'download/{like_list_title_get}')
except:
pass
error = input('在获取下载记录时出现错误,是否创建新的下载记录?(Y/N):').upper()
if error == 'Y':
init_json = json.dumps({'info': []})
open(f'download/{like_list_title_get}/info.json','w').write(init_json)
print('创建成功')
elif error == 'N':
raise
else:
print('wdnmd你选的什么鬼东西')
sys.exit()
thead = input('多线程(实验性)?(Y/N):').upper()
if thead == 'Y':
mthead = True
elif thead == 'N':
mthead = False
else:
print('wdnmd你选的什么鬼东西')
sys.exit()
for bvid in like_list:
info = get_video_info(bvid)
for cid in info.get('pages_cid'):
download_video(bvid,cid,like_list_title_get,mthead)
print('处理完成')
| StarcoderdataPython |
3330627 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 29 16:24:16 2016
@author: DanielleT
"""
import pandas as pd
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
import matplotlib.pyplot as plt
import PIL
from PIL import Image
import os
import math
from pandas import *
import numpy as np
from sklearn.svm import SVC
from sklearn.preprocessing import MultiLabelBinarizer
import time
from sklearn import datasets
test_photos = pd.read_csv('../../downloads/input/test_photo_to_biz.csv',sep=',')
y = pd.read_csv('../Features_data/labels_train_y.csv', header=None)
x = pd.read_csv('../Features_data/feats_train_x.csv',header=None)
tr = pd.read_csv('../../downloads/input/train.csv')
#del(x['Unnamed: 0'])
#a = tr['labels'][pd.isnull(tr['labels'])==True]
#a.index[:]
#xx = x.drop(a.index)
#del(xx['Unnamed: 0'])
S = OneVsRestClassifier(LinearSVC(random_state=0)).fit(x, y)
print S.score(x,y)
newx = pd.read_csv('../Features_data/feats_test_x.csv',header=None)
#del(newx[0])
predictions = S.predict(newx) #newx[1:]
np.savetxt("pred.csv", predictions, delimiter=",")
this = pd.DataFrame(data=predictions,columns=None) # 1st row as the column names
this.to_csv('pred.csv',index=False)
#print outputfile
bizz = test_photos.business_id.unique()
a=0
arr = [0]*2
while True:
strlabels = ""
for i in range(0,9):
pred = predictions[a]
lab = pred[i]
if lab == 1:
strlabels = strlabels + " " + str(i)
currarr = [bizz[a],strlabels.lstrip()]
arr = np.vstack((np.array(arr),np.array(currarr)))
if bizz[a]==bizz[-1]:
break
a = a+1
arr[0]=['business_id','labels']
this = pd.DataFrame(data=arr[1:,:],columns=arr[0,:]) # 1st row as the column names
this.to_csv('feats_sub_rs1002.csv',index=False) | StarcoderdataPython |
28672 | import json
import time
import requests
import re
from flask import Flask, render_template, jsonify
from pyecharts.charts import Map, Timeline,Kline,Line,Bar,WordCloud
from pyecharts import options as opts
from pyecharts.globals import SymbolType
app = Flask(__name__)
#字典,受限于谷歌调用限制
cn_to_en = {'安哥拉': 'Angola', '阿富汗': 'Afghanistan', '阿尔巴尼亚': 'Albania', '阿尔及利亚': 'Algeria', '安道尔共和国': 'Andorra', '安圭拉岛': 'Anguilla', '安提瓜和巴布达': 'Antigua and Barbuda',
'阿根廷': 'Argentina', '亚美尼亚': 'Armenia', '阿森松': 'Ascension', '澳大利亚': 'Australia', '奥地利': 'Austria', '阿塞拜疆': 'Azerbaijan', '巴哈马': 'Bahamas', '巴林': 'Bahrain',
'孟加拉国': 'Bangladesh', '巴巴多斯': 'Barbados', '白俄罗斯': 'Belarus', '比利时': 'Belgium', '伯利兹': 'Belize', '贝宁': 'Benin', '百慕大群岛': 'Bermuda Is', '玻利维亚': 'Bolivia',
'博茨瓦纳': 'Botswana', '巴西': 'Brazil', '文莱': 'Brunei', '保加利亚': 'Bulgaria', '布基纳法索': 'Burkina Faso', '缅甸': 'Burma', '布隆迪': 'Burundi', '喀麦隆': 'Cameroon',
'加拿大': 'Canada', '开曼群岛': 'Cayman Is', '中非共和国': 'Central African Republic', '乍得': 'Chad', '智利': 'Chile', '中国': 'China', '哥伦比亚': 'Colombia', '刚果': 'Congo',
'库克群岛': 'Cook Is', '哥斯达黎加': 'Costa Rica', '古巴': 'Cuba', '塞浦路斯': 'Cyprus', '捷克': 'Czech Republic', '丹麦': 'Denmark', '吉布提': 'Djibouti', '多米尼加共和国': 'Dominica Rep',
'厄瓜多尔': 'Ecuador', '埃及': 'Egypt', '萨尔瓦多': 'EI Salvador', '爱沙尼亚': 'Estonia', '埃塞俄比亚': 'Ethiopia', '斐济': 'Fiji', '芬兰': 'Finland', '法国': 'France', '法属圭亚那': 'French Guiana',
'法属玻利尼西亚': 'French Polynesia', '加蓬': 'Gabon', '冈比亚': 'Gambia', '格鲁吉亚': 'Georgia', '德国': 'Germany', '加纳': 'Ghana', '直布罗陀': 'Gibraltar', '希腊': 'Greece', '格林纳达': 'Grenada',
'关岛': 'Guam', '危地马拉': 'Guatemala', '几内亚': 'Guinea', '圭亚那': 'Guyana', '海地': 'Haiti', '洪都拉斯': 'Honduras', '香港': 'Hongkong', '匈牙利': 'Hungary', '冰岛': 'Iceland', '印度': 'India',
'印度尼西亚': 'Indonesia', '伊朗': 'Iran', '伊拉克': 'Iraq', '爱尔兰':'Ireland', '以色列': 'Israel', '意大利': 'Italy', '科特迪瓦': 'Ivory Coast', '牙买加': 'Jamaica', '日本': 'Japan', '约旦': 'Jordan',
'柬埔寨': 'Kampuchea (Cambodia )', '哈萨克斯坦': 'Kazakstan', '肯尼亚': 'Kenya', '韩国': 'Korea', '科威特': 'Kuwait', '吉尔吉斯坦': 'Kyrgyzstan', '老挝': 'Laos', '拉脱维亚': 'Latvia', '黎巴嫩': 'Lebanon',
'莱索托': 'Lesotho', '利比里亚': 'Liberia', '利比亚': 'Libya', '列支敦士登': 'Liechtenstein', '立陶宛': 'Lithuania', '卢森堡': 'Luxembourg', '澳门': 'Macao', '马达加斯加': 'Madagascar',
'马拉维': 'Malawi', '马来西亚': 'Malaysia', '马尔代夫': 'Maldives', '马里': 'Mali', '马耳他': 'Malta', '马里亚那群岛': 'Mariana Is', '马提尼克': 'Martinique', '毛里求斯': 'Mauritius', '墨西哥': 'Mexico',
'摩尔多瓦': 'Moldova', '摩纳哥': 'Monaco', '蒙古': 'Mongolia', '蒙特塞拉特岛': 'Montserrat Is', '摩洛哥': 'Morocco', '莫桑比克': 'Mozambique', '纳米比亚': 'Namibia', '瑙鲁': 'Nauru', '尼泊尔': 'Nepal',
'荷属安的列斯': 'Netheriands Antilles', '荷兰': 'Netherlands', '新西兰': 'New Zealand', '尼加拉瓜': 'Nicaragua', '尼日尔': 'Niger', '尼日利亚': 'Nigeria', '朝鲜': 'North Korea', '挪威': 'Norway',
'阿曼': 'Oman', '巴基斯坦': 'Pakistan', '巴拿马':'Panama', '巴布亚新几内亚': 'Papua New Cuinea', '巴拉圭': 'Paraguay', '秘鲁': 'Peru', '菲律宾': 'Philippines', '波兰': 'Poland', '葡萄牙': 'Portugal',
'波多黎各': 'Puerto Rico', '卡塔尔': 'Qatar', '留尼旺': 'Reunion', '罗马尼亚': 'Romania', '俄罗斯': 'Russia', '圣卢西亚': 'St.Lucia', '圣文森特岛': 'Saint Vincent', '东萨摩亚(美)': 'Samoa Eastern',
'西萨摩亚': 'Samoa Western', '圣马力诺': 'San Marino', '圣多美和普林西比': 'Sao Tome and Principe', '沙特阿拉伯': 'Saudi Arabia', '塞内加尔': 'Senegal', '塞舌尔': 'Seychelles', '塞拉利昂': 'Sierra Leone',
'新加坡': 'Singapore', '斯洛伐克': 'Slovakia', '斯洛文尼亚': 'Slovenia', '所罗门群岛': 'Solomon Is', '索马里': 'Somali', '南非': 'South Africa', '西班牙': 'Spain', '斯里兰卡': 'SriLanka',
'圣文森特': 'St.Vincent', '苏丹': 'Sudan', '苏里南': 'Suriname', '斯威士兰': 'Swaziland', '瑞典': 'Sweden', '瑞士': 'Switzerland', '叙利亚': 'Syria', '台湾省': 'Taiwan', '塔吉克斯坦': 'Tajikstan',
'坦桑尼亚': 'Tanzania', '泰国': 'Thailand', '多哥': 'Togo', '汤加': 'Tonga', '特立尼达和多巴哥': 'Trinidad and Tobago', '突尼斯': 'Tunisia', '土耳其': 'Turkey', '土库曼斯坦': 'Turkmenistan',
'乌干达': 'Uganda', '乌克兰': 'Ukraine', '阿联酋': 'United Arab Emirates', '英国': 'United Kiongdom', '美国': 'United States', '乌拉圭': 'Uruguay', '乌兹别克斯坦': 'Uzbekistan',
'委内瑞拉': 'Venezuela', '越南': 'Vietnam', '也门': 'Yemen', '南斯拉夫': 'Yugoslavia', '津巴布韦': 'Zimbabwe', '扎伊尔': 'Zaire', '赞比亚': 'Zambia','克罗地亚':'Croatia','北马其顿':'North Macedonia'}
def update_news():
url = 'https://opendata.baidu.com/data/inner?tn=reserved_all_res_tn&dspName=iphone&from_sf=1&dsp=iphone&resource_id=28565&alr=1&query=%E8%82%BA%E7%82%8E'
r = json.loads(requests.get(url).text)
top10 = r['Result'][0]['items_v2'][0]['aladdin_res']['DisplayData']['result']['items'][:5] #list
news_data = []
for r in top10:
news_data.append({
'title': r['eventDescription'],
'sourceUrl': r['eventUrl'],
'infoSource': time.strftime('%m-%d %H:%M:%S', time.localtime(int(r['eventTime']))) + ' ' + r['siteName'] #时间属性 + 消息来源
}) #构建新的列表
return news_data
def update_overall():
url = 'http://lab.isaaclin.cn/nCoV/api/overall'
overall_data = json.loads(requests.get(url).text) #标准的json数据格式化
overall_data['time'] = time.strftime("%m-%d %H:%M", time.localtime(time.time())) #当前时间
# time.time() --> '1580232854.7124019'
## time.localtime(time.time()) --> 'time.struct_time(tm_year=2020, tm_mon=1, tm_mday=29, tm_hour=1, tm_min=34, tm_sec=36, tm_wday=2, tm_yday=29, tm_isdst=0)'
### time.strftime("%m-%d %H:%M", time.localtime(time.time())) ---> '01-29 01:37' 获得当前月、日、小时、分钟
return overall_data
#
def update_hotnews():
url = 'https://i-lq.snssdk.com/api/feed/hotboard_online/v1/?is_in_channel=1&count=5&fe_source=news_hot&tab_name=stream&is_web_refresh=1&client_extra_params={%22hot_board_source%22:%22news_hot%22,%22fe_version%22:%22v10%22}&extra={%22CardStyle%22:0,%22JumpToWebList%22:true}&category=hotboard_online&update_version_code=75717'
r = requests.get(url).text #标准的json数据格式化
data = re.findall(r'title\\":\\"(.*?)\\',r)[:-1]
# time.time() --> '1580232854.7124019'
## time.localtime(time.time()) --> 'time.struct_time(tm_year=2020, tm_mon=1, tm_mday=29, tm_hour=1, tm_min=34, tm_sec=36, tm_wday=2, tm_yday=29, tm_isdst=0)'
### time.strftime("%m-%d %H:%M", time.localtime(time.time())) ---> '01-29 01:37' 获得当前月、日、小时、分钟
return data #list
def word_cloud() -> WordCloud:
url = 'https://i-lq.snssdk.com/api/feed/hotboard_online/v1/?is_in_channel=1&count=10&fe_source=news_hot&tab_name=stream&is_web_refresh=1&client_extra_params={%22hot_board_source%22:%22news_hot%22,%22fe_version%22:%22v10%22}&extra={%22CardStyle%22:0,%22JumpToWebList%22:true}&category=hotboard_online&update_version_code=75717'
r = requests.get(url).text #标准的json数据格式化
data = re.findall(r'title\\":\\"(.*?)\\',r)[:-1]
datanum = [8,7,6,5,5,4,4,2,1,1]
words = [w for w in zip(data,datanum)]
c = (
WordCloud()
.add("", words, word_size_range=[20, 100], shape=SymbolType.DIAMOND)
.set_global_opts(title_opts=opts.TitleOpts(title="WordCloud-shape-diamond"))
)
return c
def update_china_data(unit=3600 * 2):
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
p_data = {}
#print(data['areaTree'][0]['children'][0])
for i in data['areaTree'][0]['children']: #各个省份
p_data[i['name']] = i['total']['confirm']
# 先对字典进行排序,按照value从大到小
p_data= sorted(p_data.items(), key=lambda x: x[1], reverse=True)
#print(p_data)
return p_data
def update_china_heal_data(unit=3600 * 2):
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
p_data = {}
#print(data['areaTree'][0]['children'][0])
for i in data['areaTree'][0]['children']: #各个省份
p_data[i['name']] = i['total']['confirm'] - i['total']['dead'] - i['total']['heal']
# 先对字典进行排序,按照value从大到小
p_data= sorted(p_data.items(), key=lambda x: x[1], reverse=True)
#print(p_data)
return p_data
def china_map(data)-> Map:
opt= [
{"min":1001,"color":'#731919'},
{"min":500,"max":1000,"color":'red'},
{"min":100,"max":499,"color":'#e26061'},
{"min":10,"max":99,"color":'#f08f7f'},
{"min":1,"max":9,"color":'#ffb86a'},
{"value":0,"color":'#ffffff'}
]
c = (
Map()
.add(
"确诊人数", data, "china", is_map_symbol_show=False,
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False,font_size=8))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(max_=1000,is_piecewise=True,pieces=opt),
legend_opts=opts.LegendOpts(is_show=False),
#title_opts=opts.TitleOpts(title="全国疫情(2019-nCov)")
)
)
return c
# 获取世界数据
def update_world_data(unit=3600 * 2):
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
#print(data['areaTree'][0]['children'][0])
countryEN = []
total_confirm = []
for i in data['areaTree']:
if i['name'] != '钻石号邮轮':
if i['name'] == '日本本土':
countryEN.append('Japan')
total_confirm.append(i['total']['confirm'])
else:
countryEN.append(cn_to_en[i['name']])
total_confirm.append(i['total']['confirm'])
data = [list(z) for z in zip(countryEN, total_confirm)]
return data
def update_world_data1(unit=3600 * 2):
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
#print(data['areaTree'][0]['children'][0])
translate = Translator()
country = [] #中文国家提取
total_confirm = []
for i in data['areaTree']:
country.append(i['name'])
total_confirm.append(i['total']['confirm'])
countryEN = [] #翻译
for i in country:
countryEN.append(translate.translate(i).text)
#今日数据
data = [list(z) for z in zip(countryEN, total_confirm)]
return data
def world_map(data)-> Map:
opt= [
{"min":1001,"color":'#731919'},
{"min":51,"max":1000,"color":'red'},
{"min":11,"max":50,"color":'#e26061'},
{"min":6,"max":10,"color":'#f08f7f'},
{"min":1,"max":5,"color":'#ffb86a'},
{"value":0,"color":'#ffffff'}
]
c = (
Map()
.add("确诊人数", data, "world",is_map_symbol_show=False)
#.add("商家A", [list(z) for z in zip(countryEN, total_confirm)], "world")
.set_series_opts(label_opts=opts.LabelOpts(is_show=False,font_size=8),)
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(max_=1000,is_piecewise=True,pieces=opt),
legend_opts=opts.LegendOpts(is_show=False),
#title_opts=opts.TitleOpts(title="全球疫情(2019-nCov)")
)
)
return c
def kline()-> Kline:
data = get_origin_data() #初始化json数据,为dict ['chinaTotal']
#每日确诊增加数
a = []
c = [x['confirm'] for x in data['chinaDayList']]
for i in range(len(c)):
if i == 0:
a.append(0)
else:
a.append(int(c[i]) - int(c[i-1]))
b = []
for i in range(len(a)):
if i == 0:
b.append([0,0,0,a[i]])
elif i == 1:
b.append([0,0,a[i-1],a[i]])
elif i == 2:
b.append([0,a[i-2],a[i-1],a[i]])
else:
b.append([a[i-3],a[i-2],a[i-1],a[i]])
c = (
Kline()
.add_xaxis([x['date'] for x in data['chinaDayList']])
.add_yaxis("kline", b)
.set_global_opts(
yaxis_opts=opts.AxisOpts(
is_scale=True,
splitarea_opts=opts.SplitAreaOpts(
is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=1)
),
),
xaxis_opts=opts.AxisOpts(is_scale=True),
#title_opts=opts.TitleOpts(title="2019-nCov K线图"),
datazoom_opts=[opts.DataZoomOpts(pos_bottom="-2%",range_end=100)],
)
)
return c
def get_origin_data():
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_other'
r = requests.get(url)
data = json.loads(json.loads(r.text)['data'])
return data
def line_connect_null() -> Line:
data = get_origin_data() #初始化json数据,为dict ['chinaTotal']
#每日确诊增加数
Dailyincrease = []
a = [x['confirm'] for x in data['chinaDayList']]
for i in range(len(a)):
if i == 0:
Dailyincrease.append(0)
else:
Dailyincrease.append(int(a[i]) - int(a[i-1]))
c = (
Line()
.add_xaxis([x['date'] for x in data['chinaDayList']]) #直接列表
.add_yaxis('确诊',[x['confirm'] for x in data['chinaDayList']],label_opts=opts.LabelOpts(is_show=False)) #‘列表名,[]’
.add_yaxis('疑似',[x['suspect'] for x in data['chinaDayList']],label_opts=opts.LabelOpts(is_show=False))
.add_yaxis('治愈',[x['heal'] for x in data['chinaDayList']],label_opts=opts.LabelOpts(is_show=False))
.add_yaxis('死亡',[x['dead'] for x in data['chinaDayList']],label_opts=opts.LabelOpts(is_show=False))
.add_yaxis('每日确诊增加数',Dailyincrease,areastyle_opts=opts.AreaStyleOpts(opacity=0.5),label_opts=opts.LabelOpts(is_show=False)) #areastyle_opts=opts.AreaStyleOpts(opacity=0.5) 投射面积
.set_global_opts(
#title_opts=opts.TitleOpts(title="2019-nCov"),
datazoom_opts=opts.DataZoomOpts(range_end=100),
)
)
return c
def line_heal() -> Line:
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
#每日确诊增加数
Dailyincrease = []
a = [x['confirm'] for x in data['chinaDayList']]
for i in range(len(a)):
if i == 0:
Dailyincrease.append(0)
else:
Dailyincrease.append(int(a[i]) - int(a[i-1]))
#每日疑似增加数
Dailysuspect = []
a = [x['suspect'] for x in data['chinaDayList']]
for i in range(len(a)):
if i == 0:
Dailysuspect.append(0)
else:
Dailysuspect.append(int(a[i]) - int(a[i-1]))
c = (
Line()
.add_xaxis([x['date'] for x in data['chinaDayList']]) #直接列表
.add_yaxis('治愈',[x['heal'] for x in data['chinaDayList']])
.add_yaxis('死亡',[x['dead'] for x in data['chinaDayList']])
.set_global_opts(
#title_opts=opts.TitleOpts(title="2019-nCov"),
datazoom_opts=opts.DataZoomOpts(range_end=100),
)
)
return c
#海外国家统计
def world_bar() -> Bar:
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
country = []
numbers = []
for i in data['areaTree']:
country.append(i['name'])
numbers.append(i['total']['confirm'])
country.reverse()
numbers.reverse()
c = (
Bar()
.add_xaxis(country[:-1])
.add_yaxis("确诊人数", numbers[:-1])
.reversal_axis()
.set_series_opts(label_opts=opts.LabelOpts(position="right",color="black"))
.set_global_opts(
#title_opts=opts.TitleOpts(title="海外国家统计数据"),
yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=-45,font_size=11)),
)
)
return c
#海外国家趋势
def other_line() -> Line:
url = 'https://services1.arcgis.com/0MSEUqKaxRlEPj5g/arcgis/rest/services/cases_time_v3/FeatureServer/0/query?f=json&where=1%3D1&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&orderByFields=Report_Date_String%20asc&resultOffset=0&resultRecordCount=2000&cacheHint=true'
r_data = json.loads(requests.get(url).text)
data = r_data['features'] #初始化json数据,为dict ['chinaTotal']
dates = []
numbers = []
for i in data:
date = time.strftime("%m.%d", time.localtime(i['attributes']['Report_Date'] / 1000))
dates.append(date)
numbers.append(i['attributes']['Other_Locations'])
c = (
Line()
.add_xaxis(dates) #直接列表
.add_yaxis('确诊',numbers)
.set_global_opts(
#title_opts=opts.TitleOpts(title="海外国家疫情趋势", subtitle=""),
)
)
return c
def china_online():
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
r_data = json.loads(requests.get(url).text)
data = json.loads(r_data['data']) #初始化json数据,为dict ['chinaTotal']
#每日确诊增加数
# chinaTotal = data['chinaTotal'] #结果为列表
# chinaAdd = data['chinaAdd']
# lastUpdateTime = data['lastUpdateTime']
return data
@app.route("/")
def index():
other_data = get_origin_data()
return render_template("index.html")
# 全国地图数据
@app.route("/map")
def get_map():
data = update_china_data()
return china_map(data).dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。# 全国地图数据
# 全国地图数据待治愈
@app.route("/map2")
def get_map2():
data = update_china_heal_data()
return china_map(data).dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。# 全国地图数据
#世界地图
@app.route("/maps")
def get_maps():
#countryEN,total_confirm = update_world_data()
data = update_world_data()
return world_map(data).dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。
#疫情播报
@app.route("/news")
def get_news():
news = update_news()
return jsonify(news)
#全国统计数量
@app.route("/online")
def get_online():
onlines = china_online()
return jsonify(onlines)
#实时热榜
@app.route("/hotnews")
def get_hotnews():
hotnews = update_hotnews()
return jsonify(hotnews)
@app.route("/wordcloud")
def get_word_cloud():
word = word_cloud()
return word.dump_options_with_quotes()
# K线
@app.route("/kline")
def get_kline():
c = kline()
return c.dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。# 全国地图数据
@app.route("/line")
def get_line():
c = line_connect_null()
return c.dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。# 全国地图数据
@app.route("/worldbar")
def get_worldbar():
c = world_bar()
return c.dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。# 全国地图数据
@app.route("/worldline")
def get_worldline():
c = other_line()
return c.dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。# 全国地图数据
@app.route("/heal")
def get_heal():
c = line_heal()
return c.dump_options_with_quotes() #其中dump_options_with_quotes()是必备,任意图形。# 全国地图数据
# @app.route("/overall")
# def get_overall():
# overall = update_overall()
# return jsonify(overall)
if __name__ == "__main__":
#app.run(debug=True)
app.run(host="0.0.0.0",port=5000,debug=True)
| StarcoderdataPython |
74393 | #!/usr/bin/env python
############################################################################
#
# Copyright (C) 2004-2005 Trolltech AS. All rights reserved.
#
# This file is part of the example classes of the Qt Toolkit.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file LICENSE.GPL included in the packaging of
# self file. Please review the following information to ensure GNU
# General Public Licensing requirements will be met:
# http://www.trolltech.com/products/qt/opensource.html
#
# If you are unsure which license is appropriate for your use, please
# review the following information:
# http://www.trolltech.com/products/qt/licensing.html or contact the
# sales department at <EMAIL>.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
############################################################################
import random
from PySide import QtCore, QtGui
import puzzle_rc
class PuzzleWidget(QtGui.QWidget):
puzzleCompleted = QtCore.Signal()
def __init__(self, parent=None):
super(PuzzleWidget, self).__init__(parent)
self.piecePixmaps = []
self.pieceRects = []
self.pieceLocations = []
self.highlightedRect = QtCore.QRect()
self.inPlace = 0
self.setAcceptDrops(True)
self.setMinimumSize(400, 400)
self.setMaximumSize(400, 400)
def clear(self):
self.pieceLocations = []
self.piecePixmaps = []
self.pieceRects = []
self.highlightedRect = QtCore.QRect()
self.inPlace = 0
self.update()
def dragEnterEvent(self, event):
if event.mimeData().hasFormat('image/x-puzzle-piece'):
event.accept()
else:
event.ignore()
def dragLeaveEvent(self, event):
updateRect = self.highlightedRect
self.highlightedRect = QtCore.QRect()
self.update(updateRect)
event.accept()
def dragMoveEvent(self, event):
updateRect = self.highlightedRect.unite(self.targetSquare(event.pos()))
if event.mimeData().hasFormat('image/x-puzzle-piece') and self.findPiece(self.targetSquare(event.pos())) == -1:
self.highlightedRect = self.targetSquare(event.pos())
event.setDropAction(QtCore.Qt.MoveAction)
event.accept()
else:
self.highlightedRect = QtCore.QRect()
event.ignore()
self.update(updateRect)
def dropEvent(self, event):
if event.mimeData().hasFormat('image/x-puzzle-piece') and self.findPiece(self.targetSquare(event.pos())) == -1:
pieceData = event.mimeData().data('image/x-puzzle-piece')
stream = QtCore.QDataStream(pieceData, QtCore.QIODevice.ReadOnly)
square = self.targetSquare(event.pos())
pixmap = QtGui.QPixmap()
location = QtCore.QPoint()
stream >> pixmap >> location
self.pieceLocations.append(location)
self.piecePixmaps.append(pixmap)
self.pieceRects.append(square)
self.hightlightedRect = QtCore.QRect()
self.update(square)
event.setDropAction(QtCore.Qt.MoveAction)
event.accept()
if location == QtCore.QPoint(square.x() / 80, square.y() / 80):
self.inPlace += 1
if self.inPlace == 25:
self.puzzleCompleted.emit()
else:
self.highlightedRect = QtCore.QRect()
event.ignore()
def findPiece(self, pieceRect):
try:
return self.pieceRects.index(pieceRect)
except ValueError:
return -1
def mousePressEvent(self, event):
square = self.targetSquare(event.pos())
found = self.findPiece(square)
if found == -1:
return
location = self.pieceLocations[found]
pixmap = self.piecePixmaps[found]
del self.pieceLocations[found]
del self.piecePixmaps[found]
del self.pieceRects[found]
if location == QtCore.QPoint(square.x() + 80, square.y() + 80):
self.inPlace -= 1
self.update(square)
itemData = QtCore.QByteArray()
dataStream = QtCore.QDataStream(itemData, QtCore.QIODevice.WriteOnly)
dataStream << pixmap << location
mimeData = QtCore.QMimeData()
mimeData.setData('image/x-puzzle-piece', itemData)
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.setHotSpot(event.pos() - square.topLeft())
drag.setPixmap(pixmap)
if drag.start(QtCore.Qt.MoveAction) == 0:
self.pieceLocations.insert(found, location)
self.piecePixmaps.insert(found, pixmap)
self.pieceRects.insert(found, square)
self.update(self.targetSquare(event.pos()))
if location == QtCore.QPoint(square.x() / 80, square.y() / 80):
self.inPlace += 1
def paintEvent(self, event):
painter = QtGui.QPainter()
painter.begin(self)
painter.fillRect(event.rect(), QtCore.Qt.white)
if self.highlightedRect.isValid():
painter.setBrush(QtGui.QColor("#ffcccc"))
painter.setPen(QtCore.Qt.NoPen)
painter.drawRect(self.highlightedRect.adjusted(0, 0, -1, -1))
for i, pieceRect in enumerate(self.pieceRects):
painter.drawPixmap(pieceRect, self.piecePixmaps[i])
painter.end()
def targetSquare(self, position):
return QtCore.QRect(position.x() // 80 * 80, position.y() // 80 * 80, 80, 80)
class PiecesModel(QtCore.QAbstractListModel):
def __init__(self, parent=None):
super(PiecesModel, self).__init__(parent)
self.locations = []
self.pixmaps = []
def data(self, index, role=QtCore.Qt.DisplayRole):
if not index.isValid():
return None
if role == QtCore.Qt.DecorationRole:
return QtGui.QIcon(self.pixmaps[index.row()].scaled(
60, 60, QtCore.Qt.KeepAspectRatio,
QtCore.Qt.SmoothTransformation))
if role == QtCore.Qt.UserRole:
return self.pixmaps[index.row()]
if role == QtCore.Qt.UserRole + 1:
return self.locations[index.row()]
return None
def addPiece(self, pixmap, location):
if random.random() < 0.5:
row = 0
else:
row = len(self.pixmaps)
self.beginInsertRows(QtCore.QModelIndex(), row, row)
self.pixmaps.insert(row, pixmap)
self.locations.insert(row, location)
self.endInsertRows()
def flags(self,index):
if index.isValid():
return (QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable |
QtCore.Qt.ItemIsDragEnabled)
return QtCore.Qt.ItemIsDropEnabled
def removeRows(self,row, count, parent):
if parent.isValid():
return False
if row >= len(self.pixmaps) or row + count <= 0:
return False
beginRow = max(0, row)
endRow = min(row + count - 1, len(self.pixmaps) - 1)
self.beginRemoveRows(parent, beginRow, endRow)
del self.pixmaps[beginRow:endRow + 1]
del self.locations[beginRow:endRow + 1]
self.endRemoveRows()
return True
def mimeTypes(self):
return ['image/x-puzzle-piece']
def mimeData(self, indexes):
mimeData = QtCore.QMimeData()
encodedData = QtCore.QByteArray()
stream = QtCore.QDataStream(encodedData, QtCore.QIODevice.WriteOnly)
for index in indexes:
if index.isValid():
pixmap = QtGui.QPixmap(self.data(index, QtCore.Qt.UserRole))
location = self.data(index, QtCore.Qt.UserRole + 1)
stream << pixmap << location
mimeData.setData('image/x-puzzle-piece', encodedData)
return mimeData
def dropMimeData(self, data, action, row, column, parent):
if not data.hasFormat('image/x-puzzle-piece'):
return False
if action == QtCore.Qt.IgnoreAction:
return True
if column > 0:
return False
if not parent.isValid():
if row < 0:
endRow = len(self.pixmaps)
else:
endRow = min(row, len(self.pixmaps))
else:
endRow = parent.row()
encodedData = data.data('image/x-puzzle-piece')
stream = QtCore.QDataStream(encodedData, QtCore.QIODevice.ReadOnly)
while not stream.atEnd():
pixmap = QtGui.QPixmap()
location = QtGui.QPoint()
stream >> pixmap >> location
self.beginInsertRows(QtCore.QModelIndex(), endRow, endRow)
self.pixmaps.insert(endRow, pixmap)
self.locations.insert(endRow, location)
self.endInsertRows()
endRow += 1
return True
def rowCount(self, parent):
if parent.isValid():
return 0
else:
return len(self.pixmaps)
def supportedDropActions(self):
return QtCore.Qt.CopyAction | QtCore.Qt.MoveAction
def addPieces(self, pixmap):
self.beginRemoveRows(QtCore.QModelIndex(), 0, 24)
self.pixmaps = []
self.locations = []
self.endRemoveRows()
for y in range(5):
for x in range(5):
pieceImage = pixmap.copy(x*80, y*80, 80, 80)
self.addPiece(pieceImage, QtCore.QPoint(x, y))
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.puzzleImage = QtGui.QPixmap()
self.setupMenus()
self.setupWidgets()
self.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Fixed))
self.setWindowTitle("Puzzle")
def openImage(self, path=None):
if not path:
path = QtGui.QFileDialog.getOpenFileName(self, "Open Image", '',
"Image Files (*.png *.jpg *.bmp)")
if path:
newImage = QtGui.QPixmap()
if not newImage.load(path):
QtGui.QMessageBox.warning(self, "Open Image",
"The image file could not be loaded.",
QtGui.QMessageBox.Cancel)
return
self.puzzleImage = newImage
self.setupPuzzle()
def setCompleted(self):
QtGui.QMessageBox.information(self, "Puzzle Completed",
"Congratulations! You have completed the puzzle!\nClick OK "
"to start again.",
QtGui.QMessageBox.Ok)
self.setupPuzzle()
def setupPuzzle(self):
size = min(self.puzzleImage.width(), self.puzzleImage.height())
self.puzzleImage = self.puzzleImage.copy((self.puzzleImage.width()-size)/2,
(self.puzzleImage.height() - size)/2, size, size).scaled(400,
400, QtCore.Qt.IgnoreAspectRatio,
QtCore.Qt.SmoothTransformation)
random.seed(QtGui.QCursor.pos().x() ^ QtGui.QCursor.pos().y())
self.model.addPieces(self.puzzleImage)
self.puzzleWidget.clear()
def setupMenus(self):
fileMenu = self.menuBar().addMenu("&File")
openAction = fileMenu.addAction("&Open...")
openAction.setShortcut("Ctrl+O")
exitAction = fileMenu.addAction("E&xit")
exitAction.setShortcut("Ctrl+Q")
gameMenu = self.menuBar().addMenu("&Game")
restartAction = gameMenu.addAction("&Restart")
openAction.triggered.connect(self.openImage)
exitAction.triggered.connect(QtGui.qApp.quit)
restartAction.triggered.connect(self.setupPuzzle)
def setupWidgets(self):
frame = QtGui.QFrame()
frameLayout = QtGui.QHBoxLayout(frame)
self.piecesList = QtGui.QListView()
self.piecesList.setDragEnabled(True)
self.piecesList.setViewMode(QtGui.QListView.IconMode)
self.piecesList.setIconSize(QtCore.QSize(60,60))
self.piecesList.setGridSize(QtCore.QSize(80,80))
self.piecesList.setSpacing(10)
self.piecesList.setMovement(QtGui.QListView.Snap)
self.piecesList.setAcceptDrops(True)
self.piecesList.setDropIndicatorShown(True)
self.model = PiecesModel(self)
self.piecesList.setModel(self.model)
self.puzzleWidget = PuzzleWidget()
self.puzzleWidget.puzzleCompleted.connect(self.setCompleted,
QtCore.Qt.QueuedConnection)
frameLayout.addWidget(self.piecesList)
frameLayout.addWidget(self.puzzleWidget)
self.setCentralWidget(frame)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = MainWindow()
window.openImage(':/images/example.jpg')
window.show()
sys.exit(app.exec_())
| StarcoderdataPython |
1606013 | <reponame>aws/aws-gamekit-unreal
import time
from base64 import b64decode, b64encode
from unittest import TestCase, mock
from layers.main.CommonLambdaLayer.python.gamekithelpers import pagination
class TestPagination(TestCase):
def setUp(self):
self.player_id = "foo"
self.start_key = {"bar": "baz"}
self.token = pagination.generate_pagination_token(self.player_id, self.start_key)
def test_validate_pagination_token(self):
self.assertTrue(pagination.validate_pagination_token(self.player_id, self.start_key, self.token))
def test_rejects_tampered_expired_at(self):
digest, expires_at = b64decode(self.token).decode().split(":")
expires_at = str(int(expires_at) + 300) #attempt to extend it another 5 minutes
tampered_token = b64encode(":".join([digest, expires_at]).encode()).decode()
self.assertFalse(pagination.validate_pagination_token(self.player_id, self.start_key, tampered_token))
def test_rejects_with_no_paging_token(self):
self.assertFalse(pagination.validate_pagination_token(self.player_id, self.start_key, None))
def test_rejects_tampered_digest(self):
digest, expires_at = b64decode(self.token).decode().split(":")
tampered_digest = "".join([digest, "DEADBEEF"])
tampered_token = b64encode(":".join([tampered_digest, expires_at]).encode()).decode()
self.assertFalse(pagination.validate_pagination_token(self.player_id, self.start_key, tampered_token)) | StarcoderdataPython |
3219387 | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for serializing numpy arrays, gym spaces and envs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import numpy as np
#ANDRES Commented
#from tensor2tensor.envs import env_service_pb2
from tensorflow.python.framework import tensor_util # pylint: disable=g-direct-tensorflow-import
def numpy_array_to_observation(array):
obs = env_service_pb2.Observation()
obs.observation.CopyFrom(tensor_util.make_tensor_proto(array))
return obs
def tensor_proto_to_numpy_array(tensor_proto):
return tensor_util.MakeNdarray(tensor_proto)
def step_request_from_discrete_action(action):
action_proto = env_service_pb2.Action(discrete_action=action)
step_request = env_service_pb2.StepRequest()
step_request.action.CopyFrom(action_proto)
return step_request
def gym_space_to_proto(gym_space):
"""Converts a gym space to `env_service_pb2.GymSpace`."""
if isinstance(gym_space, gym.spaces.Discrete):
return env_service_pb2.GymSpace(
discrete=env_service_pb2.SpaceDiscrete(num_actions=gym_space.n))
elif isinstance(gym_space, gym.spaces.Box):
space_proto = env_service_pb2.GymSpace()
box_proto = space_proto.box
# Set low & high first, we can set shape and type from it later.
box_proto.low.CopyFrom(tensor_util.make_tensor_proto(gym_space.low))
box_proto.high.CopyFrom(tensor_util.make_tensor_proto(gym_space.high))
# dtype and shape.
box_proto.dtype = box_proto.low.dtype
box_proto.shape.CopyFrom(box_proto.low.tensor_shape)
return space_proto
# A space that we haven't implemented.
return env_service_pb2.GymSpace(unimplemented_space=True)
def proto_to_gym_space(gym_space_proto):
"""Converts a `env_service_pb2.GymSpace` to a `gym.spaces`."""
if gym_space_proto.unimplemented_space:
return None
if gym_space_proto.HasField("discrete"):
return gym.spaces.Discrete(gym_space_proto.discrete.num_actions)
assert gym_space_proto.HasField("box")
low_np = tensor_proto_to_numpy_array(gym_space_proto.box.low)
high_np = tensor_proto_to_numpy_array(gym_space_proto.box.high)
return gym.spaces.Box(low=low_np, high=high_np, dtype=low_np.dtype)
def reward_range_to_proto(reward_range=None):
if reward_range is None:
reward_range = (-np.inf, np.inf)
return env_service_pb2.RewardRange(low=reward_range[0], high=reward_range[1])
| StarcoderdataPython |
3396478 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
##
# Copyright 2020 Telefónica Investigación y Desarrollo, S.A.U.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
##
"""
asyncio RO python client to interact with New Generation RO server
"""
import asyncio
import aiohttp
import yaml
import logging
__author__ = "<NAME> <<EMAIL>"
__date__ = "$09-Jan-2018 09:09:48$"
__version__ = "0.1.2"
version_date = "2020-05-08"
class NgRoException(Exception):
def __init__(self, message, http_code=400):
"""Common Exception for all RO client exceptions"""
self.http_code = http_code
Exception.__init__(self, message)
class NgRoClient:
headers_req = {'Accept': 'application/yaml', 'content-type': 'application/yaml'}
client_to_RO = {'tenant': 'tenants', 'vim': 'datacenters', 'vim_account': 'datacenters', 'sdn': 'sdn_controllers',
'vnfd': 'vnfs', 'nsd': 'scenarios', 'wim': 'wims', 'wim_account': 'wims',
'ns': 'instances'}
mandatory_for_create = {
'tenant': ("name", ),
'vnfd': ("name", "id"),
'nsd': ("name", "id"),
'ns': ("name", "scenario", "datacenter"),
'vim': ("name", "vim_url"),
'wim': ("name", "wim_url"),
'vim_account': (),
'wim_account': (),
'sdn': ("name", 'type'),
}
timeout_large = 120
timeout_short = 30
def __init__(self, loop, uri, **kwargs):
self.loop = loop
self.endpoint_url = uri
if not self.endpoint_url.endswith("/"):
self.endpoint_url += "/"
if not self.endpoint_url.startswith("http"):
self.endpoint_url = "http://" + self.endpoint_url
self.username = kwargs.get("username")
self.password = <PASSWORD>.get("password")
self.tenant_id_name = kwargs.get("tenant")
self.tenant = None
self.datacenter_id_name = kwargs.get("datacenter")
self.datacenter = None
logger_name = kwargs.get('logger_name', 'lcm.ro')
self.logger = logging.getLogger(logger_name)
if kwargs.get("loglevel"):
self.logger.setLevel(kwargs["loglevel"])
async def deploy(self, nsr_id, target):
"""
Performs an action over an item
:param item: can be 'tenant', 'vnfd', 'nsd', 'ns', 'vim', 'vim_account', 'sdn'
:param item_id_name: RO id or name of the item. Raise and exception if more than one found
:param descriptor: can be a dict, or a yaml/json text. Autodetect unless descriptor_format is provided
:param descriptor_format: Can be 'json' or 'yaml'
:param kwargs: Overrides descriptor with values as name, description, vim_url, vim_url_admin, vim_type
keys can be a dot separated list to specify elements inside dict
:return: dictionary with the information or raises NgRoException on Error
"""
try:
if isinstance(target, str):
target = self._parse_yaml(target)
payload_req = yaml.safe_dump(target)
url = "{}/ns/v1/deploy/{nsr_id}".format(self.endpoint_url, nsr_id=nsr_id)
async with aiohttp.ClientSession(loop=self.loop) as session:
self.logger.debug("NG-RO POST %s %s", url, payload_req)
# timeout = aiohttp.ClientTimeout(total=self.timeout_large)
async with session.post(url, headers=self.headers_req, data=payload_req) as response:
response_text = await response.read()
self.logger.debug("POST {} [{}] {}".format(url, response.status, response_text[:100]))
if response.status >= 300:
raise NgRoException(response_text, http_code=response.status)
return self._parse_yaml(response_text, response=True)
except (aiohttp.ClientOSError, aiohttp.ClientError) as e:
raise NgRoException(e, http_code=504)
except asyncio.TimeoutError:
raise NgRoException("Timeout", http_code=504)
async def status(self, nsr_id, action_id):
try:
url = "{}/ns/v1/deploy/{nsr_id}/{action_id}".format(self.endpoint_url, nsr_id=nsr_id, action_id=action_id)
async with aiohttp.ClientSession(loop=self.loop) as session:
self.logger.debug("GET %s", url)
# timeout = aiohttp.ClientTimeout(total=self.timeout_short)
async with session.get(url, headers=self.headers_req) as response:
response_text = await response.read()
self.logger.debug("GET {} [{}] {}".format(url, response.status, response_text[:100]))
if response.status >= 300:
raise NgRoException(response_text, http_code=response.status)
return self._parse_yaml(response_text, response=True)
except (aiohttp.ClientOSError, aiohttp.ClientError) as e:
raise NgRoException(e, http_code=504)
except asyncio.TimeoutError:
raise NgRoException("Timeout", http_code=504)
async def delete(self, nsr_id):
try:
url = "{}/ns/v1/deploy/{nsr_id}".format(self.endpoint_url, nsr_id=nsr_id)
async with aiohttp.ClientSession(loop=self.loop) as session:
self.logger.debug("DELETE %s", url)
# timeout = aiohttp.ClientTimeout(total=self.timeout_short)
async with session.delete(url, headers=self.headers_req) as response:
self.logger.debug("DELETE {} [{}]".format(url, response.status))
if response.status >= 300:
raise NgRoException("Delete {}".format(nsr_id), http_code=response.status)
return
except (aiohttp.ClientOSError, aiohttp.ClientError) as e:
raise NgRoException(e, http_code=504)
except asyncio.TimeoutError:
raise NgRoException("Timeout", http_code=504)
async def get_version(self):
"""
Obtain RO server version.
:return: a list with integers ["major", "minor", "release"]. Raises NgRoException on Error,
"""
try:
response_text = ""
async with aiohttp.ClientSession(loop=self.loop) as session:
url = "{}/version".format(self.endpoint_url)
self.logger.debug("RO GET %s", url)
# timeout = aiohttp.ClientTimeout(total=self.timeout_short)
async with session.get(url, headers=self.headers_req) as response:
response_text = await response.read()
self.logger.debug("GET {} [{}] {}".format(url, response.status, response_text[:100]))
if response.status >= 300:
raise NgRoException(response_text, http_code=response.status)
for word in str(response_text).split(" "):
if "." in word:
version_text, _, _ = word.partition("-")
return version_text
raise NgRoException("Got invalid version text: '{}'".format(response_text), http_code=500)
except (aiohttp.ClientOSError, aiohttp.ClientError) as e:
raise NgRoException(e, http_code=504)
except asyncio.TimeoutError:
raise NgRoException("Timeout", http_code=504)
except Exception as e:
raise NgRoException("Got invalid version text: '{}'; causing exception {}".format(response_text, e),
http_code=500)
@staticmethod
def _parse_yaml(descriptor, response=False):
try:
return yaml.safe_load(descriptor)
except yaml.YAMLError as exc:
error_pos = ""
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
error_pos = " at line:{} column:{}s".format(mark.line + 1, mark.column + 1)
error_text = "yaml format error" + error_pos
if response:
raise NgRoException("reponse with " + error_text)
raise NgRoException(error_text)
| StarcoderdataPython |
3343618 | <reponame>piersharding/magnum
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Magnum test utilities."""
from oslo_utils import uuidutils
from magnum.db import api as db_api
def get_test_cluster_template(**kw):
return {
'id': kw.get('id', 32),
'project_id': kw.get('project_id', 'fake_project'),
'user_id': kw.get('user_id', 'fake_user'),
'uuid': kw.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'),
'name': kw.get('name', 'clustermodel1'),
'image_id': kw.get('image_id', 'ubuntu'),
'flavor_id': kw.get('flavor_id', 'm1.small'),
'master_flavor_id': kw.get('master_flavor_id', 'm1.small'),
'keypair_id': kw.get('keypair_id', 'keypair1'),
'external_network_id': kw.get('external_network_id',
'd1f02cfb-d27f-4068-9332-84d907cb0e2e'),
'fixed_network': kw.get('fixed_network', 'private'),
'fixed_subnet': kw.get('fixed_network', 'private-subnet'),
'network_driver': kw.get('network_driver'),
'volume_driver': kw.get('volume_driver'),
'dns_nameserver': kw.get('dns_nameserver', '172.16.58.3'),
'apiserver_port': kw.get('apiserver_port', 8080),
'docker_volume_size': kw.get('docker_volume_size', 20),
'docker_storage_driver': kw.get('docker_storage_driver',
'devicemapper'),
'cluster_distro': kw.get('cluster_distro', 'fedora-atomic'),
'coe': kw.get('coe', 'swarm'),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
'labels': kw.get('labels', {'key1': 'val1', 'key2': 'val2'}),
'http_proxy': kw.get('http_proxy', 'fake_http_proxy'),
'https_proxy': kw.get('https_proxy', 'fake_https_proxy'),
'no_proxy': kw.get('no_proxy', 'fake_no_proxy'),
'registry_enabled': kw.get('registry_enabled', False),
'tls_disabled': kw.get('tls_disabled', False),
'public': kw.get('public', False),
'server_type': kw.get('server_type', 'vm'),
'insecure_registry': kw.get('insecure_registry', '10.0.0.1:5000'),
'master_lb_enabled': kw.get('master_lb_enabled', True),
'floating_ip_enabled': kw.get('floating_ip_enabled', True),
'hidden': kw.get('hidden', False),
}
def create_test_cluster_template(**kw):
"""Create and return test ClusterTemplate DB object.
Function to be used to create test ClusterTemplate objects in the database.
:param kw: kwargs with overriding values for ClusterTemplate's attributes.
:returns: Test ClusterTemplate DB object.
"""
cluster_template = get_test_cluster_template(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del cluster_template['id']
dbapi = db_api.get_instance()
return dbapi.create_cluster_template(cluster_template)
def get_test_cluster(**kw):
attrs = {
'id': kw.get('id', 42),
'uuid': kw.get('uuid', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'),
'name': kw.get('name', 'cluster1'),
'discovery_url': kw.get('discovery_url', None),
'ca_cert_ref': kw.get('ca_cert_ref', None),
'magnum_cert_ref': kw.get('magnum_cert_ref', None),
'project_id': kw.get('project_id', 'fake_project'),
'user_id': kw.get('user_id', 'fake_user'),
'cluster_template_id': kw.get('cluster_template_id',
'e74c40e0-d825-11e2-a28f-0800200c9a66'),
'stack_id': kw.get('stack_id', '047c6319-7abd-4bd9-a033-8c6af0173cd0'),
'status': kw.get('status', 'CREATE_IN_PROGRESS'),
'status_reason': kw.get('status_reason', 'Completed successfully'),
'create_timeout': kw.get('create_timeout', 60),
'api_address': kw.get('api_address', '172.17.2.3'),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
'docker_volume_size': kw.get('docker_volume_size'),
'labels': kw.get('labels'),
'master_flavor_id': kw.get('master_flavor_id', None),
'flavor_id': kw.get('flavor_id', None),
}
if kw.pop('for_api_use', False):
attrs.update({
'node_addresses': kw.get('node_addresses', ['172.17.2.4']),
'node_count': kw.get('node_count', 3),
'master_count': kw.get('master_count', 3),
'master_addresses': kw.get('master_addresses', ['172.17.2.18'])
})
# Only add Keystone trusts related attributes on demand since they may
# break other tests.
for attr in ['trustee_username', 'trustee_password', 'trust_id']:
if attr in kw:
attrs[attr] = kw[attr]
return attrs
def create_test_cluster(**kw):
"""Create test cluster entry in DB and return Cluster DB object.
Function to be used to create test Cluster objects in the database.
:param kw: kwargs with overriding values for cluster's attributes.
:returns: Test Cluster DB object.
"""
cluster = get_test_cluster(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del cluster['id']
dbapi = db_api.get_instance()
return dbapi.create_cluster(cluster)
def get_test_quota(**kw):
attrs = {
'id': kw.get('id', 42),
'project_id': kw.get('project_id', 'fake_project'),
'resource': kw.get('resource', 'Cluster'),
'hard_limit': kw.get('hard_limit', 10)
}
return attrs
def create_test_quota(**kw):
"""Create test quota entry in DB and return Quota DB object.
Function to be used to create test Quota objects in the database.
:param kw: kwargs with overriding values for quota's attributes.
:returns: Test Quota DB object.
"""
quota = get_test_quota(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del quota['id']
dbapi = db_api.get_instance()
return dbapi.create_quota(quota)
def get_test_x509keypair(**kw):
return {
'id': kw.get('id', 42),
'uuid': kw.get('uuid', '72625085-c507-4410-9b28-cd7cf1fbf1ad'),
'project_id': kw.get('project_id', 'fake_project'),
'user_id': kw.get('user_id', 'fake_user'),
'certificate': kw.get('certificate',
'certificate'),
'private_key': kw.get('private_key', 'private_key'),
'private_key_passphrase': kw.get('private_key_passphrase',
'private_key_passphrase'),
'intermediates': kw.get('intermediates', 'intermediates'),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
}
def create_test_x509keypair(**kw):
"""Create test x509keypair entry in DB and return X509KeyPair DB object.
Function to be used to create test X509KeyPair objects in the database.
:param kw: kwargs with overriding values for x509keypair's attributes.
:returns: Test X509KeyPair DB object.
"""
x509keypair = get_test_x509keypair(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del x509keypair['id']
dbapi = db_api.get_instance()
return dbapi.create_x509keypair(x509keypair)
def get_test_magnum_service(**kw):
return {
'id': kw.get('', 13),
'report_count': kw.get('report_count', 13),
'host': kw.get('host', 'fakehost'),
'binary': kw.get('binary', 'fake-bin'),
'disabled': kw.get('disabled', False),
'disabled_reason': kw.get('disabled_reason', 'fake-reason'),
'forced_down': kw.get('forced_down', False),
'last_seen_up': kw.get('last_seen_up'),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
}
def create_test_magnum_service(**kw):
"""Create test magnum_service entry in DB and return magnum_service DB object.
:param kw: kwargs with overriding values for magnum_service's attributes.
:returns: Test magnum_service DB object.
"""
magnum_service = get_test_magnum_service(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del magnum_service['id']
dbapi = db_api.get_instance()
return dbapi.create_magnum_service(magnum_service)
def get_test_quotas(**kw):
return {
'id': kw.get('', 18),
'project_id': kw.get('project_id', 'fake_project'),
'resource': kw.get('resource', 'Cluster'),
'hard_limit': kw.get('hard_limit', 10),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
}
def create_test_quotas(**kw):
"""Create test quotas entry in DB and return quotas DB object.
:param kw: kwargs with overriding values for quota attributes.
:returns: Test quotas DB object.
"""
quotas = get_test_quotas(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del quotas['id']
dbapi = db_api.get_instance()
return dbapi.create_quota(quotas)
def get_test_federation(**kw):
return {
'id': kw.get('id', 42),
'uuid': kw.get('uuid', '60d6dbdc-9951-4cee-b020-55d3e15a749b'),
'name': kw.get('name', 'fake-name'),
'project_id': kw.get('project_id', 'fake_project'),
'hostcluster_id': kw.get('hostcluster_id', 'fake_master'),
'member_ids': kw.get('member_ids', ['fake_member1', 'fake_member2']),
'properties': kw.get('properties', {'dns-zone': 'example.com.'}),
'status': kw.get('status', 'CREATE_IN_PROGRESS'),
'status_reason': kw.get('status_reason', 'Completed successfully.'),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at')
}
def create_test_federation(**kw):
"""Create test federation entry in DB and return federation DB object.
:param kw: kwargs with overriding values for federation attributes.
:return: Test quotas DB object.
"""
federation = get_test_federation(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del federation['id']
dbapi = db_api.get_instance()
return dbapi.create_federation(federation)
def get_test_nodegroup(**kw):
return {
'id': kw.get('id', 12),
'uuid': kw.get('uuid', '483203a3-dbee-4a9c-9d65-9820512f4df8'),
'name': kw.get('name', 'nodegroup1'),
'cluster_id': kw.get('cluster_id',
'5d12f6fd-a196-4bf0-ae4c-1f639a523a52'),
'project_id': kw.get('project_id', 'fake_project'),
'docker_volume_size': kw.get('docker_volume_size'),
'labels': kw.get('labels'),
'flavor_id': kw.get('flavor_id', None),
'image_id': kw.get('image_id', None),
'node_addresses': kw.get('node_addresses', ['172.17.2.4']),
'node_count': kw.get('node_count', 3),
'role': kw.get('role', 'worker'),
'max_node_count': kw.get('max_node_count', None),
'min_node_count': kw.get('min_node_count', 1),
'is_default': kw.get('is_default', True),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at')
}
def create_test_nodegroup(**kw):
"""Create test nodegroup entry in DB and return federation DB object.
:param kw: kwargs with overriding values for nodegroup attributes.
:return: Test nodegroup DB object.
"""
nodegroup = get_test_nodegroup(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' in nodegroup:
del nodegroup['id']
dbapi = db_api.get_instance()
return dbapi.create_nodegroup(nodegroup)
def get_nodegroups_for_cluster(**kw):
# get workers nodegroup
worker = get_test_nodegroup(
role='worker',
name=kw.get('worker_name', 'test-worker'),
uuid=kw.get('worker_uuid', uuidutils.generate_uuid()),
cluster_id=kw.get('cluster_id',
'5d12f6fd-a196-4bf0-ae4c-1f639a523a52'),
project_id=kw.get('project_id', 'fake_project'),
node_addresses=kw.get('node_addresses', ['172.17.2.4']),
node_count=kw.get('node_count', 3)
)
# get masters nodegroup
master = get_test_nodegroup(
role='master',
name=kw.get('master_name', 'test-master'),
uuid=kw.get('master_uuid', uuidutils.generate_uuid()),
cluster_id=kw.get('cluster_id',
'5d12f6fd-a196-4bf0-ae4c-1f639a523a52'),
project_id=kw.get('project_id', 'fake_project'),
node_addresses=kw.get('master_addresses', ['172.17.2.18']),
node_count=kw.get('master_count', 3)
)
return {'master': master, 'worker': worker}
def create_nodegroups_for_cluster(**kw):
nodegroups = get_nodegroups_for_cluster(**kw)
# Create workers nodegroup
worker = nodegroups['worker']
del worker['id']
create_test_nodegroup(**worker)
# Create masters nodegroup
master = nodegroups['master']
del master['id']
create_test_nodegroup(**master)
| StarcoderdataPython |
47781 | from setuptools import setup, Extension
setup(ext_modules=[
Extension('_module', sources=["module_wrap.c"])
]) | StarcoderdataPython |
25731 | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
This is Allie's modeling API to help build classification or regression models.
All you need to do is run the model.py script and you will be guided through the
modeling process.
Usage: python3 model.py
Alternative CLI Usage: python3 model.py audio 2 c gender males females
- audio = audio file type
- 2 = 2 classes
- c = classification (r for regression)
- gender = common name of model
- male = first class
- female = second class [via N number of classes]
For addditional documentation, check out
https://github.com/jim-schwoebel/allie/tree/master/training
'''
###############################################################
## IMPORT STATEMENTS ##
###############################################################
import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform
from pyfiglet import Figlet
f=Figlet(font='doh')
print(f.renderText('Allie'))
f=Figlet(font='doom')
import pandas as pd
import matplotlib.pyplot as plt
###############################################################
## CREATE HELPER FUNCTIONS ##
###############################################################
def most_common(lst):
'''
get most common item in a list
'''
return max(set(lst), key=lst.count)
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def get_folders(listdir):
folders=list()
for i in range(len(listdir)):
if listdir[i].find('.') < 0:
folders.append(listdir[i])
return folders
def classifyfolder(listdir):
filetypes=list()
for i in range(len(listdir)):
if listdir[i].endswith(('.mp3', '.wav')):
filetypes.append('audio')
elif listdir[i].endswith(('.png', '.jpg')):
filetypes.append('image')
elif listdir[i].endswith(('.txt')):
filetypes.append('text')
elif listdir[i].endswith(('.mp4', '.avi')):
filetypes.append('video')
elif listdir[i].endswith(('.csv')):
filetypes.append('csv')
counts={'audio': filetypes.count('audio'),
'image': filetypes.count('image'),
'text': filetypes.count('text'),
'video': filetypes.count('video'),
'csv': filetypes.count('csv')}
# get back the type of folder (main file type)
countlist=list(counts)
countvalues=list(counts.values())
maxvalue=max(countvalues)
maxind=countvalues.index(maxvalue)
return countlist[maxind]
def pull_element(mylist, element):
pull_=list()
for i in range(len(mylist)):
pull_.append(mylist[i][element])
return pull_
def convert_csv(X_train, y_train, labels, mtype, classes):
'''
Take in a array of features and labels and output a
pandas DataFrame format for easy .CSV expor and for model training.
This is important to make sure all machine learning training sessions
use the same dataset (so they can be benchmarked appropriately).
'''
# from pandas merging guide https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html
feature_list=labels
data=list()
for i in tqdm(range(len(X_train)), desc='converting csv...'):
newlist=list()
for j in range(len(X_train[i])):
newlist.append([X_train[i][j]])
temp=pd.DataFrame(dict(zip(feature_list,newlist)), index=[i])
# print(temp)
data.append(temp)
data = pd.concat(data)
if mtype == 'c':
data['class_']=y_train
elif mtype == 'r':
if len(classes) == 1:
data[classes[0]]=y_train
else:
for j in range(len(classes)):
newy=pull_element(y_train, j)
data[classes[j]]=newy
data=pd.DataFrame(data, columns = list(data))
# print this because in pretty much every case you will write the .CSV file afterwards
print('writing csv file...')
return data
def device_info():
cpu_data={'memory':psutil.virtual_memory(),
'cpu percent':psutil.cpu_percent(),
'cpu times':psutil.cpu_times(),
'cpu count':psutil.cpu_count(),
'cpu stats':psutil.cpu_stats(),
'cpu swap':psutil.swap_memory(),
'partitions':psutil.disk_partitions(),
'disk usage':psutil.disk_usage('/'),
'disk io counters':psutil.disk_io_counters(),
'battery':psutil.sensors_battery(),
'boot time':psutil.boot_time(),
}
data={'time':datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
'timezone':time.tzname,
'operating system': platform.system(),
'os release':platform.release(),
'os version':platform.version(),
'cpu data':cpu_data,
'space left': list(psutil.disk_usage('/'))[2]/1000000000}
return data
def get_metrics(clf, problemtype, mtype, default_training_script, common_name, X_test, y_test, classes, modelname, settings, model_session, transformer_name, created_csv_files, test_data, model_start_time):
'''
get the metrics associated iwth a classification and regression problem
and output a .JSON file with the training session.
'''
metrics_=dict()
y_true=y_test
if default_training_script not in ['autogluon', 'autokeras', 'autopytorch', 'alphapy', 'atm', 'keras', 'devol', 'ludwig', 'safe', 'neuraxle']:
y_pred=clf.predict(X_test)
elif default_training_script=='alphapy':
# go to the right folder
curdir=os.getcwd()
print(os.listdir())
os.chdir(common_name+'_alphapy_session')
alphapy_dir=os.getcwd()
os.chdir('input')
os.rename('test.csv', 'predict.csv')
os.chdir(alphapy_dir)
os.system('alphapy --predict')
os.chdir('output')
listdir=os.listdir()
for k in range(len(listdir)):
if listdir[k].startswith('predictions'):
csvfile=listdir[k]
y_pred=pd.read_csv(csvfile)['prediction']
os.chdir(curdir)
elif default_training_script == 'autogluon':
from autogluon import TabularPrediction as task
test_data=test_data.drop(labels=['class'],axis=1)
y_pred=clf.predict(test_data)
elif default_training_script == 'autokeras':
y_pred=clf.predict(X_test).flatten()
elif default_training_script == 'autopytorch':
y_pred=clf.predict(X_test).flatten()
elif default_training_script == 'atm':
curdir=os.getcwd()
os.chdir('atm_temp')
data = pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
y_pred = clf.predict(data)
os.chdir(curdir)
elif default_training_script == 'ludwig':
data=pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
pred=clf.predict(data)['class__predictions']
y_pred=np.array(list(pred), dtype=np.int64)
elif default_training_script == 'devol':
X_test=X_test.reshape(X_test.shape+ (1,)+ (1,))
y_pred=clf.predict_classes(X_test).flatten()
elif default_training_script=='keras':
if mtype == 'c':
y_pred=clf.predict_classes(X_test).flatten()
elif mtype == 'r':
y_pred=clf.predict(X_test).flatten()
elif default_training_script=='neuraxle':
y_pred=clf.transform(X_test)
elif default_training_script=='safe':
# have to make into a pandas dataframe
test_data=pd.read_csv('test.csv').drop(columns=['class_'], axis=1)
y_pred=clf.predict(test_data)
print(y_pred)
# get classification or regression metrics
if mtype in ['c', 'classification']:
# now get all classification metrics
mtype='classification'
metrics_['accuracy']=metrics.accuracy_score(y_true, y_pred)
metrics_['balanced_accuracy']=metrics.balanced_accuracy_score(y_true, y_pred)
try:
metrics_['precision']=metrics.precision_score(y_true, y_pred)
except:
metrics_['precision']='n/a'
try:
metrics_['recall']=metrics.recall_score(y_true, y_pred)
except:
metrics_['recall']='n/a'
try:
metrics_['f1_score']=metrics.f1_score (y_true, y_pred, pos_label=1)
except:
metrics_['f1_score']='n/a'
try:
metrics_['f1_micro']=metrics.f1_score(y_true, y_pred, average='micro')
except:
metrics_['f1_micro']='n/a'
try:
metrics_['f1_macro']=metrics.f1_score(y_true, y_pred, average='macro')
except:
metrics_['f1_macro']='n/a'
try:
metrics_['roc_auc']=metrics.roc_auc_score(y_true, y_pred)
except:
metrics_['roc_auc']='n/a'
try:
metrics_['roc_auc_micro']=metrics.roc_auc_score(y_true, y_pred, average='micro')
except:
metrics_['roc_auc_micro']='n/a'
try:
metrics_['roc_auc_macro']=metrics.roc_auc_score(y_true, y_pred, average='macro')
except:
metrics_['roc_auc_micro']='n/a'
metrics_['confusion_matrix']=metrics.confusion_matrix(y_true, y_pred).tolist()
metrics_['classification_report']=metrics.classification_report(y_true, y_pred, target_names=classes)
plot_confusion_matrix(np.array(metrics_['confusion_matrix']), classes)
try:
# predict_proba only works for or log loss and modified Huber loss.
# https://stackoverflow.com/questions/47788981/sgdclassifier-with-predict-proba
try:
y_probas = clf.predict_proba(X_test)[:, 1]
except:
try:
y_probas = clf.decision_function(X_test)[:, 1]
except:
print('error making y_probas')
plot_roc_curve(y_test, [y_probas], [default_training_script])
except:
print('error plotting ROC curve')
print('predict_proba only works for or log loss and modified Huber loss.')
elif mtype in ['r', 'regression']:
# now get all regression metrics
mtype='regression'
metrics_['mean_absolute_error'] = metrics.mean_absolute_error(y_true, y_pred)
metrics_['mean_squared_error'] = metrics.mean_squared_error(y_true, y_pred)
metrics_['median_absolute_error'] = metrics.median_absolute_error(y_true, y_pred)
metrics_['r2_score'] = metrics.r2_score(y_true, y_pred)
plot_regressor(clf, classes, X_test, y_test)
data={'sample type': problemtype,
'training time': time.time()-model_start_time,
'created date': str(datetime.datetime.now()),
'device info': device_info(),
'session id': model_session,
'classes': classes,
'problem type': mtype,
'model name': modelname,
'model type': default_training_script,
'metrics': metrics_,
'settings': settings,
'transformer name': transformer_name,
'training data': created_csv_files,
'sample X_test': X_test[0].tolist(),
'sample y_test': y_test[0].tolist()}
if modelname.endswith('.pickle'):
jsonfilename=modelname[0:-7]+'.json'
elif modelname.endswith('.h5'):
jsonfilename=modelname[0:-3]+'.json'
else:
jsonfilename=modelname+'.json'
jsonfile=open(jsonfilename,'w')
json.dump(data,jsonfile)
jsonfile.close()
# also output requirements.txt for reproducibilty purposes
curdir=os.getcwd()
basedir=prev_dir(curdir)
os.chdir(basedir)
os.system('pip3 freeze -> requirements.txt')
# FUTURE - add in optional copy of cleaning, augmentation, and feature libraries contextually
# try:
# shutil.copytree(prev_dir(prev_dir(basedir))+'/features', basedir+'/features')
# except:
# print('error copying features')
# try:
# shutil.copytree(prev_dir(prev_dir(basedir))+'/cleaning', basedir+'/cleaning')
# except:
# print('error copying cleaning techniques')
# shutil.copytree(prev_dir(prev_dir(basedir))+'/augmentation', basedir+'/augmentation')
# except:
# print('error copying augmentation techniques')
os.chdir(curdir)
def plot_roc_curve(y_test, probs, clf_names):
'''
This function plots an ROC curve with the appropriate
list of classifiers.
'''
cycol = itertools.cycle('bgrcmyk')
for i in range(len(probs)):
print(y_test)
print(probs[i])
try:
fper, tper, thresholds = roc_curve(y_test, probs[i])
plt.plot(fper, tper, color=next(cycol), label=clf_names[i]+' = %s'%(str(round(metrics.auc(fper, tper), 3))))
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
except:
print('passing %s'%(clf_names[i]))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
plt.tight_layout()
plt.savefig('roc_curve.png')
plt.close()
def plot_confusion_matrix(cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("\nNormalized confusion matrix")
else:
print('\nConfusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig('confusion_matrix.png')
plt.close()
def plot_regressor(regressor, classes, X_test, y_test):
'''
plot regression models with a bar chart.
'''
try:
y_pred = regressor.predict(X_test)
# plot the first 25 records
if len(classes) == 2:
df = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()})
df1 = df.head(25)
df1.plot(kind='bar',figsize=(16,10))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.tight_layout()
plt.savefig('bar_graph_predictions.png')
plt.close()
# plot a straight line on the data
plt.scatter(X_test, y_test, color='gray')
plt.plot(X_test, y_pred, color='red', linewidth=2)
plt.tight_layout()
plt.savefig('straight_line_predictions.png')
plt.close()
else:
# multi-dimensional generalization
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df1 = df.head(25)
df1.plot(kind='bar',figsize=(10,8))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.tight_layout()
plt.savefig('bar_graph_predictions.png')
plt.close()
except:
print('error plotting regressor')
def pursue_modeling(mtype, model_dir, problemtype, default_training_script,common_name_model):
'''
simple script to decide whether or not to continue modeling the data.
'''
try:
model_listdir=os.listdir(model_dir+'/'+problemtype+'_models')
except:
model_listdir=list()
# note that these are tpot definitions
model_exists=False
if default_training_script == 'tpot':
if common_name_model + '_classifier' in model_listdir and mtype == 'c':
model_exists=True
elif common_name_model +'_regression' in model_listdir and mtype == 'r':
model_exists=True
else:
# only look for naming conflicts with TPOT for now, can expand into the future.
model_exists=False
return model_exists, model_listdir
def get_csvfiles(listdir):
csvfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.csv'):
csvfiles.append(listdir[i])
return csvfiles
###############################################################
## LOADING SETTINGS ##
###############################################################
# load the default feature set
cur_dir = os.getcwd()
prevdir= prev_dir(cur_dir)
sys.path.append(prevdir+'/train_dir')
settings=json.load(open(prevdir+'/settings.json'))
# get all the default feature arrays
default_audio_features=settings['default_audio_features']
default_text_features=settings['default_text_features']
default_image_features=settings['default_image_features']
default_video_features=settings['default_video_features']
default_csv_features=settings['default_csv_features']
create_csv=settings['create_csv']
# prepare training and testing data (should have been already featurized) - # of classes/folders
os.chdir(prevdir+'/train_dir')
data_dir=os.getcwd()
listdir=os.listdir()
folders=get_folders(listdir)
csvfiles=get_csvfiles(listdir)
# now assess folders by content type
data=dict()
for i in range(len(folders)):
os.chdir(folders[i])
listdir=os.listdir()
filetype=classifyfolder(listdir)
data[folders[i]]=filetype
os.chdir(data_dir)
###############################################################
## INITIALIZE CLASSES ##
###############################################################
# get all information from sys.argv, and if not,
# go through asking user for the proper parameters
try:
problemtype=sys.argv[1]
mtype=sys.argv[3]
if mtype == 'c':
classnum=sys.argv[2]
common_name=sys.argv[4]
classes=list()
for i in range(int(classnum)):
classes.append(sys.argv[i+5])
else:
classnum=1
problemtype='csv'
mtype=sys.argv[1]
csvfile=sys.argv[2]
classes=[sys.argv[3]]
common_name=csvfile[0:-4]
except:
# now ask user what type of problem they are trying to solve
mtype=input('is this a classification (c) or regression (r) problem? \n')
while mtype not in ['c','r']:
print('input not recognized...')
mtype=input('is this a classification (c) or regression (r) problem? \n')
if mtype == 'c':
problemtype=input('what problem are you solving? (1-audio, 2-text, 3-image, 4-video, 5-csv)\n')
while problemtype not in ['1','2','3','4','5']:
print('answer not recognized...')
problemtype=input('what problem are you solving? (1-audio, 2-text, 3-image, 4-video, 5-csv)\n')
if problemtype=='1':
problemtype='audio'
elif problemtype=='2':
problemtype='text'
elif problemtype=='3':
problemtype='image'
elif problemtype=='4':
problemtype='video'
elif problemtype=='5':
problemtype='csv'
if problemtype != 'csv':
print('\n OK cool, we got you modeling %s files \n'%(problemtype))
count=0
availableclasses=list()
for i in range(len(folders)):
if data[folders[i]]==problemtype:
availableclasses.append(folders[i])
count=count+1
classnum=input('how many classes would you like to model? (%s available) \n'%(str(count)))
print('these are the available classes: ')
print(availableclasses)
# get all if all (good for many classes)
classes=list()
if classnum=='all':
for i in range(len(availableclasses)):
classes.append(availableclasses[i])
else:
stillavailable=list()
for i in range(int(classnum)):
class_=input('what is class #%s \n'%(str(i+1)))
while class_ not in availableclasses and class_ not in '' or class_ in classes:
print('\n')
print('------------------ERROR------------------')
print('the input class does not exist (for %s files).'%(problemtype))
print('these are the available classes: ')
if len(stillavailable)==0:
print(availableclasses)
else:
print(stillavailable)
print('------------------------------------')
class_=input('what is class #%s \n'%(str(i+1)))
for j in range(len(availableclasses)):
stillavailable=list()
if availableclasses[j] not in classes:
stillavailable.append(availableclasses[j])
if class_ == '':
class_=stillavailable[0]
classes.append(class_)
elif problemtype == 'csv':
print('\n OK cool, we got you modeling %s files \n'%(problemtype))
print('csv file options are: %s \n'%(csvfiles))
csvfile=input('which csvfile would you like to use for classification? \n')
g=pd.read_csv(csvfile)
columns=list(g)
print('potential targets include: %s'%(columns))
target=input('what target would you like to use? \n')
csv_labels=g[target]
csv_features=g.drop([target], axis=1)
elif mtype =='r':
# for regression problems we need a target column to predict / classes from a .CSV
problemtype='csv'
# assumes the .CSV file is in the train dir
os.chdir(prevdir+'/train_dir')
listdir=os.listdir()
csvfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.csv'):
csvfiles.append(listdir[i])
csvfile=input('what is the name of the spreadsheet (in ./train_dir) used for prediction? \n\n available: %s\n\n'%(str(csvfiles)))
while csvfile not in csvfiles:
print('answer not recognized...')
csvfile=input('what is the name of the spreadsheet (in ./train_dir) used for prediction? \n\n available: %s\n\n'%(str(csvfiles)))
# the available classes are only the numeric columns from the spreadsheet
data = pd.read_csv(csvfile)
columns = list(data)
availableclasses=list()
for i in range(len(columns)):
# look at filetype extension in each column
coldata=data[columns[i]]
sampletypes=list()
for j in range(len(coldata)):
try:
values=float(coldata[j])
sampletypes.append('numerical')
except:
if coldata[j].endswith('.wav'):
sampletypes.append('audio')
elif coldata[j].endswith('.txt'):
sampletypes.append('text')
elif coldata[j].endswith('.png'):
sampletypes.append('image')
elif coldata[j].endswith('.mp4'):
sampletypes.append('video')
else:
sampletypes.append('other')
coltype=most_common(sampletypes)
# correct the other category if needed
if coltype == 'other':
# if coltype.endswith('.csv'):
# coltype='csv'
if len(set(list(coldata))) < 10:
coltype='categorical'
else:
# if less than 5 unique answers then we can interpret this as text input
coltype='typedtext'
if coltype == 'numerical':
availableclasses.append(columns[i])
if len(availableclasses) > 0:
classnum=input('how many classes would you like to model? (%s available) \n'%(str(len(availableclasses))))
print('these are the available classes: %s'%(str(availableclasses)))
classes=list()
stillavailable=list()
for i in range(int(classnum)):
class_=input('what is class #%s \n'%(str(i+1)))
while class_ not in availableclasses and class_ not in '' or class_ in classes:
print('\n')
print('------------------ERROR------------------')
print('the input class does not exist (for %s files).'%(problemtype))
print('these are the available classes: ')
if len(stillavailable)==0:
print(availableclasses)
else:
print(stillavailable)
print('------------------------------------')
class_=input('what is class #%s \n'%(str(i+1)))
for j in range(len(availableclasses)):
stillavailable=list()
if availableclasses[j] not in classes:
stillavailable.append(availableclasses[j])
if class_ == '':
class_=stillavailable[0]
classes.append(class_)
else:
print('no classes available... ending session')
sys.exit()
common_name=input('what is the 1-word common name for the problem you are working on? (e.g. gender for male/female classification) \n')
###############################################################
## UPGRADE MODULES / LOAD MODULES ##
###############################################################
print('-----------------------------------')
print(' LOADING MODULES ')
print('-----------------------------------')
# upgrade to have the proper scikit-learn version later
os.chdir(cur_dir)
os.system('python3 upgrade.py')
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import numpy as np
from sklearn import metrics
from sklearn.metrics import roc_curve
###############################################################
## CLEAN THE DATA ##
###############################################################
clean_data=settings['clean_data']
clean_dir=prevdir+'/cleaning'
if clean_data == True and mtype == 'c':
# only pursue augmentation strategies on directories of files and classification problems
print('-----------------------------------')
print(f.renderText('CLEANING DATA'))
print('-----------------------------------')
for i in range(len(classes)):
if problemtype == 'audio':
# clean audio via default_audio_cleaners
os.chdir(clean_dir+'/audio_cleaning')
elif problemtype == 'text':
# clean text via default_text_cleaners
os.chdir(clean_dir+'/text_cleaning')
elif problemtype == 'image':
# clean images via default_image_cleaners
os.chdir(clean_dir+'/image_cleaning')
elif problemtype == 'video':
# clean video via default_video_cleaners
os.chdir(clean_dir+'/video_cleaning')
elif problemtype == 'csv':
# clean .CSV via default_csv_cleaners
os.chdir(clean_dir+'/csv_cleaning')
os.system('python3 clean.py "%s"'%(data_dir+'/'+classes[i]))
elif clean_data == True and mtype == 'r':
for i in range(len(classes)):
if problemtype == 'csv':
# clean .CSV via default_csv_cleaners
os.chdir(clean_dir+'/csv_cleaning')
os.system('python3 clean.py "%s"'%(data_dir+'/'+classes[i]))
###############################################################
## AUGMENT THE DATA ##
###############################################################
augment_data=settings['augment_data']
augment_dir=prevdir+'/augmentation'
if augment_data == True and mtype == 'c':
# only pursue augmentation strategies on directories of files and classification problems
print('-----------------------------------')
print(f.renderText('AUGMENTING DATA'))
print('-----------------------------------')
for i in range(len(classes)):
if problemtype == 'audio':
# augment audio via default_audio_augmenters
os.chdir(augment_dir+'/audio_augmentation')
elif problemtype == 'text':
# augment text via default_text_augmenters
os.chdir(augment_dir+'/text_augmentation')
elif problemtype == 'image':
# augment images via default_image_augmenters
os.chdir(augment_dir+'/image_augmentation')
elif problemtype == 'video':
# augment video via default_video_augmenters
os.chdir(augment_dir+'/video_augmentation')
elif problemtype == 'csv':
# augment .CSV via default_csv_augmenters
os.chdir(augment_dir+'/csv_augmentation')
os.system('python3 augment.py "%s"'%(data_dir+'/'+classes[i]))
elif augment_data == True and mtype == 'r':
for i in range(len(classes)):
if problemtype == 'csv':
# featurize .CSV via default_csv_augmenters
os.chdir(augment_dir+'/csv_augmentation')
os.system('python3 augment.py "%s"'%(data_dir+'/'+classes[i]))
###############################################################
## FEATURIZE FILES ##
###############################################################
# now featurize each class (in proper folder)
if mtype == 'c':
data={}
print('-----------------------------------')
print(f.renderText('FEATURIZING DATA'))
print('-----------------------------------')
if problemtype == 'csv':
# csv features should have already been defined
# need to separate into number of unique classes
csv_labels=g[target]
csv_features=g.drop([target], axis=1)
csv_feature_labels=list(csv_features)
classes=list(set(list(csv_labels)))
for i in range(len(classes)):
class_type = classes[i]
feature_list=list()
label_list=list()
for i in range(len(csv_features)):
if csv_labels[i] == class_type:
feature_list.append(list(csv_features.iloc[i,:]))
label_list.append(csv_feature_labels)
data[class_type]=feature_list
else:
#
for i in range(len(classes)):
class_type=classes[i]
if problemtype == 'audio':
# featurize audio
os.chdir(prevdir+'/features/audio_features')
default_features=default_audio_features
elif problemtype == 'text':
# featurize text
os.chdir(prevdir+'/features/text_features')
default_features=default_text_features
elif problemtype == 'image':
# featurize images
os.chdir(prevdir+'/features/image_features')
default_features=default_image_features
elif problemtype == 'video':
# featurize video
os.chdir(prevdir+'/features/video_features')
default_features=default_video_features
print('-----------------------------------')
print(' FEATURIZING %s'%(classes[i].upper()))
print('-----------------------------------')
os.system('python3 featurize.py "%s"'%(data_dir+'/'+classes[i]))
os.chdir(data_dir+'/'+classes[i])
# load audio features
listdir=os.listdir()
feature_list=list()
label_list=list()
for j in range(len(listdir)):
if listdir[j][-5:]=='.json':
try:
g=json.load(open(listdir[j]))
# consolidate all features into one array (if featurizing with multiple featurizers)
default_feature=list()
default_label=list()
for k in range(len(default_features)):
default_feature=default_feature+g['features'][problemtype][default_features[k]]['features']
default_label=default_label+g['features'][problemtype][default_features[k]]['labels']
feature_list.append(default_feature)
label_list.append(default_label)
except:
print('ERROR - skipping ' + listdir[j])
data[class_type]=feature_list
elif mtype == 'r':
# featurize .CSV
os.chdir(prevdir+'/features/csv_features')
output_file=str(uuid.uuid1())+'.csv'
os.system('python3 featurize_csv_regression.py -i "%s" -o "%s" -t "%s"'%(prevdir+'/train_dir/'+csvfile, prevdir+'/train_dir/'+output_file, classes[0]))
csvfile=output_file
default_features=['csv_regression']
###############################################################
## GENERATE TRAINING DATA ##
###############################################################
print('-----------------------------------')
print(f.renderText('CREATING TRAINING DATA'))
print('-----------------------------------')
# perform class balance such that both classes have the same number
# of members (true by default, but can also be false)
os.chdir(prevdir+'/training/')
model_dir=prevdir+'/models'
balance=settings['balance_data']
remove_outliers=settings['remove_outliers']
outlier_types=settings['default_outlier_detector']
if mtype == 'c':
if problemtype != 'csv':
jsonfile=''
for i in range(len(classes)):
if i==0:
jsonfile=classes[i]
else:
jsonfile=jsonfile+'_'+classes[i]
jsonfile=jsonfile+'.json'
#try:
g=data
alldata=list()
labels=list()
lengths=list()
# check to see all classes are same length and reshape if necessary
for i in range(len(classes)):
class_=g[classes[i]]
lengths.append(len(class_))
lengths=np.array(lengths)
minlength=np.amin(lengths)
# now load all the classes
for i in range(len(classes)):
class_=g[classes[i]]
random.shuffle(class_)
# only balance if specified in settings
if balance==True:
if len(class_) > minlength:
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(len(class_)-minlength)))
class_=class_[0:minlength]
for j in range(len(class_)):
alldata.append(class_[j])
labels.append(i)
# load features file and get feature labels by loading in classes
labels_dir=prevdir+'/train_dir/'+classes[0]
os.chdir(labels_dir)
listdir=os.listdir()
features_file=''
for i in range(len(listdir)):
if listdir[i].endswith('.json'):
features_file=listdir[i]
labels_=list()
for i in range(len(default_features)):
tlabel=json.load(open(features_file))['features'][problemtype][default_features[i]]['labels']
labels_=labels_+tlabel
elif problemtype == 'csv':
# format data appropriately
jsonfile=target+'.json'
#try:
g=data
alldata=list()
labels=list()
lengths=list()
# check to see all classes are same length and reshape if necessary
for i in range(len(classes)):
class_=g[classes[i]]
lengths.append(len(class_))
lengths=np.array(lengths)
minlength=np.amin(lengths)
# now load all the classes
for i in range(len(classes)):
class_=g[classes[i]]
random.shuffle(class_)
# only balance if specified in settings
if balance==True:
if len(class_) > minlength:
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(len(class_)-minlength)))
class_=class_[0:minlength]
for j in range(len(class_)):
alldata.append(class_[j])
labels.append(i)
# load features file and get feature labels by loading in classes
labels_=csv_feature_labels
elif mtype == 'r':
regression_data=pd.read_csv(prevdir+'/train_dir/'+csvfile)
print(csvfile)
# get features and labels
features_=regression_data.drop(columns=classes, axis=1)
labels_=list(features_)
labels_csv=regression_data.drop(columns=list(features_), axis=1)
# iterate through each column and make into proper features and labels
features=list()
labels=list()
# testing
# print(len(features_))
# print(len(labels_))
# print(features_)
# print(labels_)
# print(features_.iloc[0,:])
# print(labels_.iloc[0,:])
# get features and labels
for i in range(len(features_)):
features.append(list(features_.iloc[i,:]))
labels.append(list(labels_csv.iloc[i,:]))
# convert to name alldata just to be consistent
alldata=features
# print(alldata[0])
# print(labels[0])
# print(labels_)
os.chdir(model_dir)
# get the split from the settings.json
try:
test_size=settings['test_size']
except:
test_size=0.25
# error checking around lengths of arrays and deleting as necessary
lengths=list()
for i in range(len(alldata)):
lengths.append(len(alldata[i]))
# CLEAN IF DIMENSIONS DO NOT MATCH!!
maxval=max(lengths)
minval=min(lengths)
delete_ind=list()
inds=list()
alldata=np.array(alldata)
labels=np.array(labels)
if maxval != minval:
if lengths.count(maxval) > lengths.count(minval):
for i in range(len(lengths)):
# this means that additional column has been removed
if lengths[i] == minval:
delete_ind.append(i)
elif lengths.count(maxval) < lengths.count(minval):
for i in range(len(lengths)):
# this means that additional column has been added
if lengths[i] == maxval:
delete_ind.append(i)
print('DELETING THESE INDICES: %s'%(str(delete_ind)))
print(alldata.shape)
print(labels.shape)
alldata=np.delete(alldata, tuple(delete_ind), axis=0)
labels=np.delete(labels, tuple(delete_ind))
print(alldata.shape)
print(labels.shape)
# # now see if any element in the array is a NaN and do not include if so in alldata or labels
# for i in range(len(alldata)):
# try:
# array_has_nan = list(np.isnan(np.array(alldata[i]))).count(True)
# array_has_string=list(np.char.isnumeric(np.array(alldata[i]))).count(False)
# except:
# array_has_string=1
# if array_has_nan > 0 or array_has_string > 0:
# inds.append(i)
# print(alldata[i])
# if len(inds) > 0:
# print('DELETING THESE INDICES: %s'%(str(inds)))
# alldata=np.delete(alldata, tuple(inds))
# labels=np.delete(labels, tuple(inds))
# REMOVE OUTLIERS IF SETTING IS TRUE
alldata=np.array(alldata)
labels=np.array(labels)
if remove_outliers==True:
print('-----------------------------------')
print(' REMOVING OUTLIERS')
print('-----------------------------------')
for i in range(len(outlier_types)):
outlier_type=outlier_types[i]
if outlier_type =='isolationforest':
from sklearn.ensemble import IsolationForest
clf = IsolationForest(random_state=0).fit(alldata)
y_pred = clf.predict(alldata)
inlier_ind=list(np.where(y_pred==1))
outlier_ind=list(np.where(y_pred==-1))
y_pred = y_pred.tolist()
print(type(y_pred))
print(type(y_pred[0]))
n_inliers = y_pred.count(1)
n_outliers = y_pred.count(-1)
print(n_inliers)
print(n_outliers)
# shape before
print(alldata.shape)
print(labels.shape)
# delete outliers
alldata=np.delete(alldata, tuple(outlier_ind), axis=0)
labels=np.delete(labels, tuple(outlier_ind))
print(alldata.shape)
print(labels.shape)
elif outlier_type=='zscore':
os.system('pip3 install statsmodels==0.11.1')
from scipy import stats
from statsmodels.formula.api import ols
# https://towardsdatascience.com/ways-to-detect-and-remove-the-outliers-404d16608dba
z = np.abs(stats.zscore(alldata))
# print(z)
threshold = 3
inds=list(set(np.where(z>threshold)[0]))
print(len(inds))
print(tuple(inds))
print(alldata.shape)
print('-->')
alldata = np.delete(alldata, tuple(inds), axis=0)
print(alldata.shape)
labels = np.delete(labels, tuple(inds))
print(len(alldata))
print(len(labels))
# rebalance data to all be the same length
newlabels=list(labels)
outlier_class=list()
for i in range(len(classes)):
outlier_class.append(newlabels.count(i))
lengths=np.array(outlier_class)
minlength=np.amin(outlier_class)
# now load all the classes
for i in range(len(classes)):
# only balance if specified in settings
if balance==True:
count2=newlabels.count(i)
while count2 > minlength:
count2=newlabels.count(i)
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(count2-minlength)))
ind=list(labels).index(i)
alldata=np.delete(alldata, tuple([ind]), axis=0)
labels=np.delete(labels, tuple([ind]))
newlabels=list(labels)
alldata=list(alldata)
labels=list(labels)
# split the data
X_train, X_test, y_train, y_test = train_test_split(alldata, labels, test_size=test_size)
# convert everything to numpy arrays (for testing later)
X_train=np.array(X_train)
X_test=np.array(X_test)
y_train=np.array(y_train)
y_test=np.array(y_test)
# create list of created csv files
created_csv_files=list()
# create training and testing datasets and save to a .CSV file for archive purposes
# this ensures that all machine learning training methods use the same training data
basefile=common_name
temp_listdir=os.listdir()
if create_csv == True:
try:
print(basefile+'_all.csv'.upper())
if basefile+'_all.csv' not in temp_listdir:
all_data = convert_csv(alldata, labels, labels_, mtype, classes)
all_data.to_csv(basefile+'_all.csv',index=False)
created_csv_files.append(basefile+'_all.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_all.csv'))
try:
print(basefile+'_train.csv'.upper())
if basefile+'_train.csv' not in temp_listdir:
train_data= convert_csv(X_train, y_train, labels_, mtype, classes)
train_data.to_csv(basefile+'_train.csv',index=False)
created_csv_files.append(basefile+'_train.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_train.csv'))
try:
print(basefile+'_test.csv'.upper())
if basefile+'_test.csv' not in temp_listdir:
test_data= convert_csv(X_test, y_test, labels_, mtype, classes)
test_data.to_csv(basefile+'_test.csv',index=False)
created_csv_files.append(basefile+'_test.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_test.csv'))
############################################################
## DATA TRANSFORMATION ##
############################################################
'''
Scale features via scalers, dimensionality reduction techniques,
and feature selection strategies per the settings.json document.
'''
preprocess_dir=prevdir+'/preprocessing'
os.chdir(preprocess_dir)
# get all the important settings for the transformations
scale_features=settings['scale_features']
reduce_dimensions=settings['reduce_dimensions']
select_features=settings['select_features']
default_scalers=settings['default_scaler']
default_reducers=settings['default_dimensionality_reducer']
default_selectors=settings['default_feature_selector']
# get command for terminal
transform_command=''
if problemtype == 'csv' and mtype == 'c':
transform_command=transform_command+' "'+'Class'+'"'
else:
for i in range(len(classes)):
transform_command=transform_command+' "'+classes[i]+'"'
# get filename / create a unique file name
if mtype=='r':
t_filename='r_'+common_name
elif mtype=='c':
t_filename='c_'+common_name
# only add names in if True
if scale_features == True:
for i in range(len(default_scalers)):
t_filename=t_filename+'_'+default_scalers[i]
if reduce_dimensions == True:
for i in range(len(default_reducers)):
t_filename=t_filename+'_'+default_reducers[i]
if select_features == True:
for i in range(len(default_selectors)):
t_filename=t_filename+'_'+default_selectors[i]
transform_file=t_filename+'.pickle'
if scale_features == True or reduce_dimensions == True or select_features == True:
print('----------------------------------')
print(f.renderText('TRANSFORMING DATA'))
print('----------------------------------')
# go to proper transformer directory
try:
os.chdir(problemtype+'_transformer')
except:
os.mkdir(problemtype+'_transformer')
os.chdir(problemtype+'_transformer')
# train transformer if it doesn't already exist
os.system('pip3 install scikit-learn==0.22.2.post1')
if transform_file in os.listdir():
# remove file if in listdir to avoid conflicts with naming
os.remove(transform_file)
print('making transformer...')
alldata=np.asarray(alldata)
labels=np.asarray(labels)
os.chdir(preprocess_dir)
if mtype == 'c':
print('python3 transform.py "%s" "%s" "%s" %s'%(problemtype, 'c', common_name, transform_command))
os.system('python3 transform.py "%s" "%s" "%s" %s'%(problemtype, 'c', common_name, transform_command))
os.chdir(problemtype+'_transformer')
print(transform_file)
transform_model=pickle.load(open(transform_file,'rb'))
alldata=transform_model.transform(np.array(alldata))
elif mtype == 'r':
command='python3 transform.py "%s" "%s" "%s" "%s" "%s" "%s"'%('csv', 'r', classes[0], csvfile, prevdir+'/train_dir/', common_name)
print(command)
os.system(command)
os.chdir(problemtype+'_transformer')
transform_model=pickle.load(open(transform_file,'rb'))
alldata=transform_model.transform(alldata)
os.chdir(preprocess_dir)
os.system('python3 load_transformer.py "%s" "%s"'%(problemtype, transform_file))
# now make new files as .CSV
os.chdir(model_dir)
# split the data
X_train, X_test, y_train, y_test = train_test_split(alldata, labels, test_size=test_size)
# convert to numpy arrays
X_train=np.array(X_train)
X_test=np.array(X_test)
y_train=np.array(y_train)
y_test=np.array(y_test)
# get new labels_ array
labels_=list()
for i in range(len(alldata[0].tolist())):
labels_.append('transformed_feature_%s'%(str(i)))
# now create transformed excel sheets
temp_listdir=os.listdir()
if create_csv == True:
try:
print(basefile+'_all_transformed.csv'.upper())
if basefile+'_all_transformed.csv' not in temp_listdir:
all_data = convert_csv(alldata, labels, labels_, mtype, classes)
all_data.to_csv(basefile+'_all_transformed.csv',index=False)
created_csv_files.append(basefile+'_all_transformed.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_all_transformed.csv'))
try:
print(basefile+'_train_transformed.csv'.upper())
if basefile+'_train_transformed.csv' not in temp_listdir:
train_data= convert_csv(X_train, y_train, labels_, mtype, classes)
train_data.to_csv(basefile+'_train_transformed.csv',index=False)
created_csv_files.append(basefile+'_train_transformed.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_train_transformed.csv'))
try:
print(basefile+'_test_transformed.csv'.upper())
if basefile+'_test_transformed.csv' not in temp_listdir:
test_data= convert_csv(X_test, y_test, labels_, mtype, classes)
test_data.to_csv(basefile+'_test_transformed.csv',index=False)
created_csv_files.append(basefile+'_test_transformed.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_test_transformed.csv'))
else:
# make a transform model == '' so that later during model training this can be skipped
transform_model=''
############################################################
## VISUALIZE DATA ##
############################################################
visualize_data=settings['visualize_data']
visual_dir=prevdir+'/visualize'
model_session=str(uuid.uuid1())
os.chdir(visual_dir)
if visualize_data == True and mtype == 'c':
print('----------------------------------')
print(f.renderText('VISUALIZING DATA'))
print('----------------------------------')
command='python3 visualize.py %s'%(problemtype)
for i in range(len(classes)):
command=command+' "'+classes[i]+'"'
os.system(command)
# restructure the visualization directory
os.chdir(visual_dir+'/visualization_session')
os.mkdir('visualizations')
vizdir=os.getcwd()
# move directories so that visualization is separate from main model directory
shutil.move(vizdir+'/clustering', vizdir+'/visualizations/clustering')
shutil.move(vizdir+'/feature_ranking', vizdir+'/visualizations/feature_ranking')
shutil.move(vizdir+'/model_selection', vizdir+'/visualizations/model_selection')
# go back to main direcotry
os.chdir(visual_dir)
# now copy over the visualization directory to
try:
shutil.copytree(visual_dir+'/visualization_session', model_dir+'/'+model_session)
except:
shutil.rmtree(model_dir+'/'+model_session)
shutil.copytree(visual_dir+'/visualization_session', model_dir+'/'+model_session)
# copy over settings.json
shutil.copy(prevdir+'/settings.json',model_dir+'/%s/settings.json'%(model_session))
else:
# make a model session for next section if it doesn't exist from visualization directory
os.chdir(model_dir)
try:
os.mkdir(model_session)
except:
shutil.rmtree(model_session)
os.mkdir(model_session)
# copy over settings.json
shutil.copy(prevdir+'/settings.json', model_dir+'/%s/settings.json'%(model_session))
############################################################
## TRAIN THE MODEL ##
############################################################
'''
Now we can train the machine learning model via the default_training script.
Note you can specify multiple training scripts and it will consecutively model the
files appropriately.
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#
# Here is what all the variables below mean:
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#
# alldata = list of features in an array for model training
# [[39.0, 112.15384615384616, 70.98195453650514, 248.0, 14.0, 103.0, 143.5546875...],
...
[39.0, 112.15384615384616, 70.98195453650514, 248.0, 14.0, 103.0, 143.5546875,...]]
# labels = list of labels in an array for model training
# ['males','females',...,'males','females']
# mtype = classification or regression problem?
# 'c' --> classification
# 'r' --> regression
# jsonfile = filename of the .JSON document seprating classes
# males_females.json
# problemtype = type of problem selected
# 'audio' --> audio files
# 'image' --> images files
# 'text' --> text files
# 'video' --> video files
# 'csv' --> csv files
# default_featurenames = default feature array(s) to use for modeling
# ['librosa_features']
# settings = overall settings currenty used for model training
# output of the settings.json document
-----
# transform_model = transformer model if applicable
# useful for data transformation as part of the model initialization process (if pickle file)
# uses scikit-learn pipeline
# X_train, X_test, y_train, y_test
# training datasets used in the .CSV documents
# also can use pandas dataframe if applicable (loading in the model dir)
'''
print('----------------------------------')
print(f.renderText('MODELING DATA'))
print('----------------------------------')
# get defaults
default_training_scripts=settings['default_training_script']
model_compress=settings['model_compress']
default_featurenames=''
if problemtype != 'csv' and mtype == 'c':
for i in range(len(default_features)):
if i ==0:
default_featurenames=default_features[i]
else:
default_featurenames=default_featurenames+'_|_'+default_features[i]
else:
default_featurenames='csv_classification'
# just move all created .csv files into model_session directory
os.chdir(model_dir)
os.chdir(model_session)
os.mkdir('data')
for i in range(len(created_csv_files)):
shutil.move(model_dir+'/'+created_csv_files[i], os.getcwd()+'/data/'+created_csv_files[i])
# initialize i (for tqdm) and go through all model training scripts
i=0
for i in tqdm(range(len(default_training_scripts)), desc=default_training_scripts[i]):
try:
model_start_time=time.time()
# go to model directory
os.chdir(model_dir)
# get common name and default training script to select proper model trainer
default_training_script=default_training_scripts[i]
common_name_model=common_name+'_'+default_training_script
model_exists, model_listdir = pursue_modeling(mtype, model_dir, problemtype, default_training_script, common_name_model)
if model_exists == False:
print('----------------------------------')
print(' .... training %s '%(default_training_script.upper()))
print('----------------------------------')
if default_training_script=='adanet':
print('Adanet training is coming soon! Please use a different model setting for now.')
# import train_adanet as ta
# ta.train_adanet(mtype, classes, jsonfile, alldata, labels, feature_labels, problemtype, default_featurenames)
elif default_training_script=='alphapy':
import train_alphapy as talpy
modelname, modeldir, files=talpy.train_alphapy(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='atm':
import train_atm as tatm
modelname, modeldir, files=tatm.train_atm(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autobazaar':
import train_autobazaar as autobzr
modelname, modeldir, files=autobzr.train_autobazaar(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autogbt':
import train_autogbt as tautogbt
modelname, modeldir, files=tautogbt.train_autogbt(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autogluon':
import train_autogluon as tautg
modelname, modeldir, files, test_data=tautg.train_autogluon(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autokaggle':
import train_autokaggle as autokag
modelname, modeldir, files=autokag.train_autokaggle(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autokeras':
import train_autokeras as autokeras_
modelname, modeldir, files=autokeras_.train_autokeras(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='automl':
import train_automl as auto_ml
modelname, modeldir, files=auto_ml.train_automl(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autosklearn':
print('Autosklearn training is unstable! Please use a different model setting for now.')
# import train_autosklearn as taskl
# taskl.train_autosklearn(alldata, labels, mtype, jsonfile, problemtype, default_featurenames)
elif default_training_script=='autopytorch':
import train_autopytorch as autotorch_
modelname, modeldir, files=autotorch_.train_autopytorch(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='btb':
import train_btb as tbtb
modelname, modeldir, files=tbtb.train_btb(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='cvopt':
import train_cvopt as tcvopt
modelname, modeldir, files = tcvopt.train_cvopt(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='devol':
import train_devol as td
modelname, modeldir, files=td.train_devol(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='gama':
import train_gama as tgama
modelname, modeldir, files=tgama.train_gama(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='gentun':
import train_gentun as tgentun
modelname, modeldir, files=tgentun.train_gentun(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='hyperband':
import train_hyperband as thband
modelname, modeldir, files = thband.train_hyperband(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='hypsklearn':
import train_hypsklearn as th
modelname, modeldir, files=th.train_hypsklearn(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='hungabunga':
import train_hungabunga as thung
modelname, modeldir, files=thung.train_hungabunga(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='imbalance':
import train_imbalance as timb
modelname, modeldir, files=timb.train_imbalance(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='keras':
import train_keras as tk
modelname, modeldir, files=tk.train_keras(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='ludwig':
import train_ludwig as tl
modelname, modeldir, files=tl.train_ludwig(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='mlblocks':
import train_mlblocks as mlb
modelname, modeldir, files=mlb.train_mlblocks(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='mlbox':
import train_mlbox as mlbox_
modelname, modeldir, files=mlbox_.train_mlbox(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='neuraxle':
if mtype=='c':
print('Neuraxle does not support classification at this time. Please use a different model training script')
break
else:
import train_neuraxle as tneuraxle
modelname, modeldir, files=tneuraxle.train_neuraxle(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='plda':
print('PLDA training is unstable! Please use a different model setting for now.')
# import train_pLDA as tp
# tp.train_pLDA(alldata,labels)
elif default_training_script=='pytorch':
import train_pytorch as t_pytorch
modelname, modeldir, files = t_pytorch.train_pytorch(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='safe':
import train_safe as tsafe
modelname, modeldir, files=tsafe.train_safe(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='scsr':
import train_scsr as scsr
if mtype == 'c':
modelname, modeldir, files=scsr.train_sc(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,minlength)
elif mtype == 'r':
modelname, modeldir, files=scsr.train_sr(X_train,X_test,y_train,y_test,common_name_model,problemtype,classes,default_featurenames,transform_model,model_dir,settings)
elif default_training_script=='tpot':
import train_TPOT as tt
modelname, modeldir, files=tt.train_TPOT(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
############################################################
## CALCULATE METRICS / PLOT ROC CURVE ##
############################################################
if modelname.endswith('.pickle'):
foldername=modelname[0:-7]
elif modelname.endswith('.h5'):
foldername=modelname[0:-3]
else:
foldername=common_name_model
# copy the folder in case there are multiple models being trained
try:
shutil.copytree(model_session, foldername)
except:
shutil.rmtree(foldername)
shutil.copytree(model_session, foldername)
cur_dir2=os.getcwd()
os.chdir(foldername)
os.mkdir('model')
os.chdir('model')
model_dir_temp=os.getcwd()
# dump transform model to the models directory if necessary
if transform_model == '':
transformer_name=''
else:
# dump the tranform model into the current working directory
transformer_name=modelname.split('.')[0]+'_transform.pickle'
tmodel=open(transformer_name,'wb')
pickle.dump(transform_model, tmodel)
tmodel.close()
# move all supplementary files into model folder
for j in range(len(files)):
shutil.move(modeldir+'/'+files[j], model_dir_temp+'/'+files[j])
# load model for getting metrics
if default_training_script not in ['alphapy', 'atm', 'autokeras', 'autopytorch', 'ludwig', 'keras', 'devol']:
loadmodel=open(modelname, 'rb')
clf=pickle.load(loadmodel)
loadmodel.close()
elif default_training_script == 'atm':
from atm import Model
clf=Model.load(modelname)
elif default_training_script == 'autokeras':
import tensorflow as tf
import autokeras as ak
clf = pickle.load(open(modelname, 'rb'))
elif default_training_script=='autopytorch':
import torch
clf=torch.load(modelname)
elif default_training_script == 'ludwig':
from ludwig.api import LudwigModel
clf=LudwigModel.load('ludwig_files/experiment_run/model/')
elif default_training_script in ['devol', 'keras']:
from keras.models import load_model
clf = load_model(modelname)
else:
clf=''
# create test_data variable for anything other than autogluon
if default_training_script != 'autogluon':
test_data=''
# now make main .JSON file for the session summary with metrics
get_metrics(clf, problemtype, mtype, default_training_script, common_name, X_test, y_test, classes, modelname, settings, model_session, transformer_name, created_csv_files, test_data, model_start_time)
# now move to the proper models directory
os.chdir(model_dir)
os.system('python3 create_readme.py "%s"'%(os.getcwd()+'/'+foldername))
try:
os.chdir(problemtype+'_models')
except:
os.mkdir(problemtype+'_models')
os.chdir(problemtype+'_models')
shutil.move(model_dir+'/'+foldername, os.getcwd()+'/'+foldername)
############################################################
## COMPRESS MODELS ##
############################################################
if model_compress == True:
print(f.renderText('COMPRESSING MODEL'))
# now compress the model according to model type
if default_training_script in ['hypsklearn', 'scsr', 'tpot']:
# all .pickle files and can compress via scikit-small-ensemble
from sklearn.externals import joblib
# open up model
loadmodel=open(modelname, 'rb')
model = pickle.load(loadmodel)
loadmodel.close()
# compress - from 0 to 9. Higher value means more compression, but also slower read and write times.
# Using a value of 3 is often a good compromise.
joblib.dump(model, modelname[0:-7]+'_compressed.joblib',compress=3)
# can now load compressed models as such
# thenewmodel=joblib.load(modelname[0:-7]+'_compressed.joblib')
# leads to up to 10x reduction in model size and .72 sec - 0.23 secoon (3-4x faster loading model)
# note may note work in sklearn and python versions are different from saving and loading environments.
elif default_training_script in ['devol', 'keras']:
# can compress with keras_compressor
import logging
from keras.models import load_model
from keras_compressor.compressor import compress
logging.basicConfig(
level=logging.INFO,
)
try:
print('compressing model!!')
model = load_model(modelname)
model = compress(model, 7e-1)
model.save(modelname[0:-3]+'_compressed.h5')
except:
print('error compressing model!!')
else:
# for everything else, we can compress pocketflow models in the future.
print('We cannot currently compress %s models. We are working on this!! \n\n The model will remain uncompressed for now'%(default_training_script))
else:
if mtype == 'r':
print('SKIPPING MODELTYPE - %s already exists in the %s folder: %s'%(common_name_model+'_regression', problemtype+'_models', str(model_listdir)))
elif mtype == 'c':
print('SKIPPING MODELTYPE - %s already exists in the %s folder: %s'%(common_name_model+'_classifier', problemtype+'_models', str(model_listdir)))
############################################################
## PRODUCTIONIZING MODELS ##
############################################################
# TO BE COMPLETED IN THE FUTURE!
except:
print('ERROR - error in modeling session')
| StarcoderdataPython |
3237466 | import numpy as np
import tensorflow as tf
def transform_box_to_discrete(dims, act_space):
n = dims^(np.sum(act_space.shape))
return n, tf.constant(act_space.high, dtype=tf.float32), tf.constant(act_space.low, dtype=tf.float32)
def transform_discrete_to_box(act_space):
n = tf.cast(tf.constant(act_space.n), dtype=tf.float32)
act_shape = (1,)
return act_shape, n
def contin_act_to_discrete(action, act_shape, highs, lows):
for dim in act_shape:
raise NotImplementedError
def discrete_act_to_contin(action, n):
return tf.cast(tf.cast(action, dtype=tf.float32) * tf.cast(n, dtype=tf.float32), dtype=tf.int32)
class DynamicFactor:
def __init__(self, tau: float = 1.0, init_factor: float = 1.0):
self.tau = tau
self.dynamic_factor = init_factor
def __call__(self, alpha, max_from_sample):
self.dynamic_factor = self.tau * max_from_sample + (1 - self.tau) * self.dynamic_factor
return self.dynamic_factor
class TanhTransformer:
def __init__(self, alpha: float = 1.0):
self.alpha = alpha
def __call__(self, loss):
return self.alpha * tf.math.tanh(loss) | StarcoderdataPython |
9257 | from freezegun import freeze_time
from rest_framework import test
from waldur_mastermind.billing.tests.utils import get_financial_report_url
from waldur_mastermind.invoices import models as invoice_models
from waldur_mastermind.invoices.tests import factories as invoice_factories
from waldur_mastermind.invoices.tests import fixtures as invoice_fixtures
@freeze_time('2017-01-10')
class PriceCurrentTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = invoice_fixtures.InvoiceFixture()
invoice_factories.InvoiceItemFactory(
invoice=self.fixture.invoice,
project=self.fixture.project,
unit=invoice_models.InvoiceItem.Units.PER_MONTH,
unit_price=100,
quantity=1,
)
invoice_factories.InvoiceItemFactory(
invoice=self.fixture.invoice,
project=self.fixture.project,
unit=invoice_models.InvoiceItem.Units.PER_DAY,
unit_price=3,
quantity=31,
)
def test_current_price(self):
self.client.force_authenticate(self.fixture.staff)
url = get_financial_report_url(self.fixture.project.customer)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['billing_price_estimate']['current'], 100 + 9 * 3)
diff = (
data['billing_price_estimate']['total']
- data['billing_price_estimate']['current']
)
self.assertEqual(diff, 22 * 3)
| StarcoderdataPython |
60889 | <filename>blog/models.py
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=128,verbose_name='博客分类')
def __str__(self):
return self.name
class Meta:
verbose_name = '博客分类'
verbose_name_plural = '博客分类'
class Tag(models.Model):
name = models.CharField(max_length=128,verbose_name='博客标签')
def __str__(self):
return self.name
class Meta:
verbose_name = '博客标签'
verbose_name_plural = '博客标签'
class Entry(models.Model):
title = models.CharField(max_length=128,verbose_name='文章标题')
author = models.ForeignKey(User,verbose_name='博客作者')
img = models.ImageField(upload_to='blog_images',null=True,blank=True,verbose_name='博客配图')
body = models.TextField(verbose_name='博客正文')
abstract = models.TextField(max_length=256,null=True,blank=True,verbose_name='博客摘要')
visiting = models.PositiveIntegerField(default=0,verbose_name='博客访问量')
category = models.ManyToManyField('Category',verbose_name='博客分类')
tags = models.ManyToManyField('Tag',verbose_name='博客标签')
created_time = models.DateTimeField(auto_now_add=True,verbose_name='创建时间')
modified_time = models.DateTimeField(auto_now=True,verbose_name='修改时间')
def __str__(self):
return self.title
class Meta:
ordering = ['-created_time']
verbose_name = '博客'
verbose_name_plural = '博客' | StarcoderdataPython |
1787550 | <filename>Project Euler (HackerRank)/063. Powerful digit counts.py
n = int(input())
for i in range(1,20):
j=1
while 1:
power = i**j
length = len(str(power))
if length == j :
j+=1
if length == n:
print(power)
else:
break
| StarcoderdataPython |
144325 | import numpy as np
def compute_fans(shape):
if len(shape) == 2:
fan_in, fan_out = shape[0], shape[1]
else:
fan_in, fan_out = np.prod(shape[1:]), shape[0]
return fan_in, fan_out
class initializer(object):
def __call__(self, shape):
return self.init(shape).astype(np.float32)
def init(self, shape):
raise NotImplementedError
class constant(initializer):
def __init__(self, val):
self._val = val
def init(self, shape):
return np.full(shape=shape, fill_value=self._val).astype(np.float32)
class zeros(constant):
def __init__(self):
super(zeros, self).__init__(0.0)
class xavieruniform(initializer):
def __init__(self, gain=1.0):
self._gain = gain
def init(self, shape):
fan_in, fan_out = compute_fans(shape)
a = self._gain * np.sqrt(6.0 / (fan_in + fan_out))
return np.random.uniform(low=-a, high=a, size=shape).astype(np.float32)
| StarcoderdataPython |
30457 | <filename>regions/core/regions.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides a Regions class.
"""
from .core import Region
from .registry import RegionsRegistry
__all__ = ['Regions']
__doctest_skip__ = ['Regions.read', 'Regions.write', 'Regions.parse',
'Regions.serialize']
class Regions:
"""
Class to hold a list of `~regions.Region` objects.
This class provides a unified I/O interface that supports reading,
writing, parsing, and serializing many region data formats.
Parameters
----------
regions : list of `~regions.Region`
The list of region objects.
"""
def __init__(self, regions):
self.regions = regions
def __getitem__(self, index):
newregions = self.regions[index]
if isinstance(newregions, Region): # one item
return newregions
else:
newcls = object.__new__(self.__class__)
newcls.regions = newregions
return newcls
def __repr__(self):
cls_name = self.__class__.__name__
return f'<{cls_name}({repr(self.regions)})>'
def __str__(self):
return str(self.regions)
def __len__(self):
return len(self.regions)
def append(self, region):
"""
Append the region to the end of the list of regions.
Parameters
----------
region : `~regions.Region`
The region to append.
"""
self.regions.append(region)
def extend(self, regions):
"""
Extend the list of regions by appending elements from the
input regions.
Parameters
----------
regions : list of `~regions.Region`
A list of regions to include.
"""
self.regions.extend(regions)
def insert(self, index, region):
"""
Insert the region before index.
Parameters
----------
index : int
The list index.
region : `~regions.Region`
The region to insert.
"""
self.regions.insert(index, region)
def reverse(self):
"""
Reverse the list of regions in place.
"""
self.regions.reverse()
def pop(self, index=-1):
"""
Remove and return the region at index.
Parameters
----------
index : int, optional
The index of the region to remove.
Returns
-------
result : `~regions.Region`
"""
return self.regions.pop(index)
def copy(self):
"""
Return a shallow copy of this object.
"""
newcls = object.__new__(self.__class__)
newcls.regions = self.regions.copy()
return newcls
@classmethod
def get_formats(cls):
"""
Get the registered I/O formats as a Table.
"""
return RegionsRegistry.get_formats(cls)
@classmethod
def read(cls, filename, format=None, cache=False, **kwargs):
"""
Read and parse a region file and return as a Regions object.
This method allows reading a file in many supported data
formats, e.g.,::
>>> from regions import Regions
>>> reg1 = Regions.read('regions.reg', format='ds9')
>>> reg2 = Regions.read('regions.crtf', format='crtf')
>>> reg3 = Regions.read('regions.fits', format='fits')
A list of the available formats for `~regions.Regions` is
available using::
>>> Regions.get_formats()
Parameters
----------
filename : str
The filename or URL of the file to read.
format : str, optional
The file format specifier.
cache : bool or 'update', optional
Whether to cache the contents of remote URLs. If 'update',
check the remote URL for a new version but store the result
in the cache.
**kwargs : dict, optional
Keyword arguments passed to the data reader.
Returns
-------
result : `~regions.Regions`
A `~regions.Regions` object containing the file contents.
"""
return RegionsRegistry.read(filename, cls, format=format,
cache=cache, **kwargs)
@classmethod
def parse(cls, data, format=None, **kwargs):
"""
Parse a region string or table and return as a Regions object.
This method allows parsing region data in many supported data
formats, e.g.,::
>>> from regions import Regions
>>> reg1 = Regions.parse(regions_str, format='ds9')
>>> reg2 = Regions.parse(regions_str, format='crtf')
>>> reg3 = Regions.parse(regions_tbl, format='fits')
A list of the available formats for `~regions.Regions` is
available using::
>>> Regions.get_formats()
Parameters
----------
data : str or `~astropy.table.Table`
The region data to parse.
format : str, optional
The file format specifier.
**kwargs : dict, optional
Keyword arguments passed to the data parser.
Returns
-------
result : `~regions.Regions`
A `~regions.Regions` object containing the data contents.
"""
return RegionsRegistry.parse(data, cls, format=format,
**kwargs)
def write(self, filename, format=None, overwrite=False, **kwargs):
"""
Write the regions to a region file in the specified format.
This method allows writing a file in many supported data
formats, e.g.,::
>>> from regions import Regions
>>> reg = Regions.read('regions.reg', format='ds9')
>>> reg.write('new_regions.reg', format='ds9')
>>> reg.write('new_regions.crtf', format='crtf')
>>> reg.write('new_regions.fits', format='fits')
A list of the available formats for `~regions.Regions` is
available using::
>>> Regions.get_formats()
Parameters
----------
filename : str
The filename or URL of the file to write.
format : str, optional
The file format specifier.
overwrite : bool, optional
If True, overwrite the output file if it exists. Raises an
`OSError` if False and the output file exists. Default is
False.
**kwargs : dict, optional
Keyword arguments passed to the data writer.
"""
return RegionsRegistry.write(self.regions, filename,
self.__class__, format=format,
overwrite=overwrite, **kwargs)
def serialize(self, format=None, **kwargs):
"""
Serialize the regions to a region string or table.
This method allows serializing regions in many supported data
formats, e.g.,::
>>> from regions import Regions
>>> reg = Regions.read('regions.reg', format='ds9')
>>> reg1_str = reg.serialize(format='ds9')
>>> reg2_str = reg.serialize(format='crtf')
>>> reg3_tbl = reg.serialize(format='fits')
A list of the available formats for `~regions.Regions` is
available using::
>>> Regions.get_formats()
Parameters
----------
format : str, optional
The file format specifier.
**kwargs : dict, optional
Keyword arguments passed to the data serializer.
"""
return RegionsRegistry.serialize(self.regions, self.__class__,
format=format, **kwargs)
| StarcoderdataPython |
4804151 | <filename>ProtVR.py
#!/usr/bin/python3
# Author: <NAME>
# Email: <EMAIL>
# URL: https://github.com/sarisabban
#
# Created By: <NAME>
# Created Date: 13 March 2017
import sys
import re
import urllib
import Bio
import os
from Bio.PDB import *
if sys.argv[1]=='-d':
print('Downloading',sys.argv[2],'from http://rcsb.org')
filename=urllib.request.urlopen('http://files.rcsb.org/view/'+sys.argv[2]+'.pdb')
else:
filename=open(sys.argv[1])
#print('Calculating...')
data=open('code.html','w')
data.write('<script src="https://aframe.io/releases/0.5.0/aframe.min.js"></script>\n')
data.write('<a-scene>\n')
data.write('\t<a-sky color="#111111"></a-sky>\n')
for line in filename:
#line=line.decode()
if line.startswith('ATOM'):
splitline=line.split()
try:
coordinates=(splitline[11],splitline[6],splitline[7],splitline[8])
except:
coordinates=(splitline[10],splitline[6],splitline[7],splitline[8])
#Convert PyMOL colurs to HEX: (225)*(Pymol Colour Value) https://pymolwiki.org/index.php/Color_Values
if coordinates[0]=='N':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#2D2DE1"></a-sphere>\n'
elif coordinates[0]=='C':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#2DE12D"></a-sphere>\n'
elif coordinates[0]=='O':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#E14343"></a-sphere>\n'
elif coordinates[0]=='H':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#CBCBCB"></a-sphere>\n'
elif coordinates[0]=='S':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#CBAE38"></a-sphere>\n'
elif coordinates[0]=='I':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#830083"></a-sphere>\n'
else:
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#6F6F6F"></a-sphere>\n'
x=' '.join(js)
data.write(x)
data.write('</a-scene>')
data.close()
print('Done')
| StarcoderdataPython |
107046 | <gh_stars>1-10
import torch
import networkx as nx
from torchdiffeq import odeint # odeint_adjoint as odeint
import numpy as np
from collections import OrderedDict, defaultdict
from scipy.spatial.transform import Rotation
import copy
from ..dynamics import ConstrainedHamiltonianDynamics, EuclideanT
class BodyGraph(nx.Graph):
"""docstring"""
def __init__(self):
super().__init__()
self.key2id = OrderedDict()
self.d2ids = defaultdict(list)#4*[[]]
def add_node(self,key,*args,**kwargs):
#print(key,len(self.key2id),self.key2id)
self.key2id[key]=len(self.key2id)
super().add_node(key,*args,**kwargs)
def add_extended_nd(self,key,m,moments=None,d=3,**kwargs):
""" Adds an extended body with name key, mass m and vector of principal
moments representing the eigenvalues of the the 2nd moment matrix
along principle directions.
d specifies the dimensional extent of the rigid body:
d=0 is a point mass with 1dof,
d=1 is a 1d nodesobject (eg beam) with 2dof
d=2 is a 2d object (eg plane or disk) with 3dof
d=3 is a 3d object (eg box,sphere) with 4dof"""
self.add_node(key,m=m,d=d,**kwargs)
self.d2ids[d].extend([self.key2id[key]+i for i in range(d+1)])
for i in range(d):
child_key = f'{key}_{i}'
self.add_node(child_key)
self.add_edge(key,child_key,internal=True,l=1.,I=m*moments[i])
for j in range(i):
self.add_edge(f'{key}_{j}',child_key,internal=True,l=np.sqrt(2))
def add_joint(self,key1,pos1,key2=None,pos2=None,rotation_axis=None):
""" adds a joint between extended bodies key1 and key2 at the position
in the body frame 1 pos1 and body frame 2 pos2. pos1 and pos2 should
be d dimensional vectors, where d is the dimension of the extended body.
If key2 is not specified, the joint connection is to a fixed point in space pos2."""
if key2 is not None:
if rotation_axis is None:
self.add_edge(key1,key2,external=True,joint=(pos1,pos2))
else:
self.add_edge(key1,key2,external=True,joint=(pos1,pos2),rotation_axis=rotation_axis)
else:
self.nodes[key1]['joint']=(pos1,pos2)
if rotation_axis is not None:
self.nodes[key1]['rotation_axis']=rotation_axis
def edges_wattribute(G,node,attribute):
all_edges = G.edges(node,data=True)
return dict((x[:-1], x[-1][attribute]) for x in edges if attribute in x[-1])
class RigidBody:
""" Two dimensional rigid body consisting of point masses on nodes (with zero inertia)
and beams with mass and inertia connecting nodes."""
dt = .1
integration_time=10
body_graph = NotImplemented
_m = None
_minv = None
def mass_matrix(self):
""" """
n = len(self.body_graph.nodes)
M = torch.zeros(n, n, dtype=torch.float64)
for ki, mass in nx.get_node_attributes(self.body_graph, "m").items():
i = self.body_graph.key2id[ki]
M[i, i] += mass
for (ki,kj), I in nx.get_edge_attributes(self.body_graph,"I").items():
i,j = self.body_graph.key2id[ki],self.body_graph.key2id[kj]
M[i,i] += I
M[i,j] -= I
M[j,i] -= I
M[j,j] += I
return M
@property
def M(self):
if self._m is None:
self._m = self.mass_matrix()
return self._m
@property
def Minv(self):
if self._minv is None:
self._minv = self.M.inverse()
return self._minv
def to(self, device=None, dtype=None):
self.M
self.Minv
self._m = self._m.to(device, dtype)
self._minv = self._minv.to(device, dtype)
def DPhi(self, zp):
bs,n,d = zp.shape[0],self.n,self.d
x,p = zp.reshape(bs,2,n,d).unbind(dim=1)
self.to(zp.device,zp.dtype)
Minv = self.Minv#.to(zp.device,dtype=zp.dtype)
v = Minv@p
DPhi = rigid_DPhi(self.body_graph, x, v)
# Convert d/dv to d/dp
DPhi[:,1] = (Minv@DPhi[:,1].reshape(bs,n,-1)).reshape(DPhi[:,1].shape)
return DPhi.reshape(bs,2*n*d,-1)
def sample_initial_conditions(self, n_systems):
raise NotImplementedError
def potential(self, x):
raise NotImplementedError
def hamiltonian(self, t, z):
bs, D = z.shape # of ODE dims, 2*num_particles*space_dim
n = len(self.body_graph.nodes)
x = z[:, : D // 2].reshape(bs, n, -1)
p = z[:, D // 2 :].reshape(bs, n, -1)
T = EuclideanT(p, self.Minv)
V = self.potential(x)
return T + V
def dynamics(self, wgrad=False):
return ConstrainedHamiltonianDynamics(self.hamiltonian, self.DPhi, wgrad=wgrad)
def to_pos_momentum(self, z0):
bs = z0.shape[0]
M = self.M.to(z0.device,z0.dtype)
xp = torch.stack(
[z0[:, 0], M @ z0[:, 1]], dim=1
).reshape(bs, -1)
return xp
def integrate(self, z0, T, tol=1e-7,method="dopri5"): # (x,v) -> (x,p) -> (x,v)
""" Integrate system from z0 to times in T (e.g. linspace(0,10,100))"""
bs = z0.shape[0]
xp = self.to_pos_momentum(z0)
with torch.no_grad():
xpt = odeint(self.dynamics(), xp, T.double(), rtol=tol, method=method)
xps = xpt.permute(1, 0, 2).reshape(bs, len(T), *z0.shape[1:])
Minv = self.Minv.to(z0.device,z0.dtype)
xvs = torch.stack([xps[:, :, 0], Minv @ xps[:, :, 1]], dim=2)
return xvs.to(z0.device)
def animate(self, zt):
# bs, T, 2,n,d
if len(zt.shape) == 5:
j = np.random.randint(zt.shape[0])
xt = zt[j, :, 0, :, :]
else:
xt = zt[:, 0, :, :]
anim = self.animator(xt, self)
return anim.animate()
@property
def animator(self):
return Animation
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return str(self)
def dist_constraints_DPhi(G,x,v):
""" inputs [Graph] [x (bs,n,d)] [v (bs,n,d)]
outputs [DPhi (bs,2,n,d,2,C)] """
bs,n,d = x.shape
p2p_constrs = nx.get_edge_attributes(G,'l'); p2ps = len(p2p_constrs)
tether_constrs = nx.get_node_attributes(G,"tether"); tethers = len(tether_constrs)
DPhi = torch.zeros(bs, 2, n, d, 2,p2ps+tethers, device=x.device, dtype=x.dtype)
# Fixed distance between two points
for cid,((ki,kj),_) in enumerate(p2p_constrs.items()):
i,j = G.key2id[ki],G.key2id[kj]
# Fill out dphi/dx
DPhi[:, 0,i, :, 0,cid] = 2 * (x[:, i] - x[:, j])
DPhi[:, 0,j, :, 0,cid] = 2 * (x[:, j] - x[:, i])
# Fill out d\dot{phi}/dx
DPhi[:, 0,i, :, 1,cid] = 2 * (v[:, i] - v[:, j])
DPhi[:, 0,j, :, 1,cid] = 2 * (v[:, j] - v[:, i])
# Fill out d\dot{phi}/dp
DPhi[:, 1,i, :, 1,cid] = 2 * (x[:, i] - x[:, j])
DPhi[:, 1,j, :, 1,cid] = 2 * (x[:, j] - x[:, i])
#DPhi[:, 1,:, :, 1,cid] = (2 * (x[:, i] - x[:, j])[:, None, :] * (Minv[:, i] - Minv[:, j])[:, :, None])
# Fixed distance between a point and a fixed point in space
for cid, (ki, (pos,_)) in enumerate(tether_constrs.items()):
i = G.key2id[ki]
ci = pos[None].to(x.device,x.dtype)
DPhi[:,0, i, :, 0,cid+p2ps] = 2 * (x[:, i] - ci)
DPhi[:,0, i, :, 1,cid+p2ps] = 2 * v[:, i]
DPhi[:,1, i, :, 1,cid+p2ps] = 2 * (x[:, i] - ci)
return DPhi
def joint_constraints_DPhi(G,x,v):
""" inputs [Graph] [x (bs,n,d)] [v (bs,n,d)]
outputs [DPhi (bs,2,n,d,2,C)].
Since the constraints are linear, x,v are not required. """
bs,n,d = x.shape
edge_joints = nx.get_edge_attributes(G,'joint')
node_joints = nx.get_node_attributes(G,'joint')
disabled_axes = nx.get_edge_attributes(G,'rotation_axis')
num_constraints = len(edge_joints)+len(node_joints)+len(disabled_axes)
DPhi = torch.zeros(bs, 2, n, d, 2,num_constraints,d, device=x.device, dtype=x.dtype)
delta = -1*torch.ones(d+1,d,device=x.device,dtype=x.dtype)
delta[1:] = torch.eye(d,device=x.device,dtype=x.dtype)
# Joints connecting two bodies
jid = 0
for ((ki,kj),(c1,c2)) in edge_joints.items():
i,j = G.key2id[ki],G.key2id[kj]
c1t = torch.cat([1-c1.sum()[None],c1]).to(x.device,x.dtype)
c2t = torch.cat([1-c2.sum()[None],c2]).to(x.device,x.dtype)
di = G.nodes[ki]['d']
dj = G.nodes[kj]['d']
for k in range(d):# (bs, di+1, d, d)
DPhi[:,0,i:i+1+di,k,0,jid,k] = c1t[None]
DPhi[:,0,j:j+1+dj,k,0,jid,k] = -c2t[None]
DPhi[:,1,i:i+1+di,k,1,jid,k] = c1t[None]
DPhi[:,1,j:j+1+dj,k,1,jid,k] = -c2t[None]
jid += 1
if 'rotation_axis' in G[ki][kj]:
ui,uj = G[ki][kj]['rotation_axis']
uit = torch.cat([-ui.sum()[None],ui]).to(x.device,x.dtype)
ujt = torch.cat([-uj.sum()[None],uj]).to(x.device,x.dtype)
for k in range(d):
DPhi[:,0,i:i+1+di,k,0,jid,k] = uit[None]
DPhi[:,0,j:j+1+dj,k,0,jid,k] = -ujt[None]
DPhi[:,1,i:i+1+di,k,1,jid,k] = uit[None]
DPhi[:,1,j:j+1+dj,k,1,jid,k] = -ujt[None]
jid+=1
# Xdelta = delta_matrix.T@x[:,i:i+di+1] # (d,d+1)@(bs,d+1,d)=(bs,d,d)
# # (bs,d+1,d,d)
# obj1_term = (Xdelta*axis[None,:,None]).sum(1)[:,None,:,None]*delta_matrix[None,:,None,:]
# obj2_term = Xdelta.permute(0,2,1)[:,None,:,:]*(delta_matrix@axis[:,None]).squeeze(-1)[None,:,None,None]
# Joints connecting a body to a fixed point in space
for jid2, (ki,(c1,_)) in enumerate(node_joints.items()):
i = G.key2id[ki]
c1t = torch.cat([1-c1.sum()[None],c1]).to(x.device,x.dtype)
di = G.nodes[ki]['d']
for k in range(d):# (bs, di+1, d, d)
DPhi[:,0,i:i+1+di,k,0,jid2+jid,k] = c1t[None]
DPhi[:,1,i:i+1+di,k,1,jid2+jid,k] = c1t[None]
return DPhi.reshape(bs,2,n,d,2,-1)
def rigid_DPhi(rigid_body_graph, x, v):
"""inputs [Graph (n,E)] [x (bs,n,d)] [v (bs, n, d)]
ouput [DPhi (bs, 2,n,d, 2,C)]"""
constraints = (dist_constraints_DPhi,joint_constraints_DPhi)
DPhi = torch.cat([constraint(rigid_body_graph,x,v) for constraint in constraints],dim=-1)
#DPhi[:,1] = (Minv@DPhi[:,1].reshape(bs,n,-1)).reshape(DPhi[:,1].shape)
return DPhi#.reshape(bs,2*n*d,-1) #(bs,2,n,d,2,C)->#(bs,2nd,2C)
def project_onto_constraints(G,z,tol=1e-5):
"""inputs [Graph (n,E)] [z (bs,2,n,d)] ouput [Pz (bs,2,n,d)]
Runs several iterations of Newton-Raphson to minimize the constraint violation """
bs,_,n,d = z.shape
violation = np.inf
with torch.no_grad():
i=0
while violation>tol:
Phi = rigid_Phi(G,z[:,0],z[:,1]) # (bs,2,C)
DPhi = rigid_DPhi(G,z[:,0],z[:,1]) # (bs,2,n,d,2,C)
violation = (Phi**2).mean().sqrt()
J = DPhi.reshape(bs,2*n*d,-1).permute(0,2,1)
if J.shape[-2]<J.shape[-1]:
#Jinv = torch.pinverse(J)
#diff = -(Jinv@Phi.reshape(bs,-1,1)).reshape(*z.shape)
diff = (J.permute(0,2,1)@torch.solve(-Phi.reshape(bs,-1,1),J@J.permute(0,2,1))[0]).reshape(*z.shape)
else: #cry
print(J.shape)
#print(violation)
scale = (z**2).mean().sqrt()
z += diff.clamp(min=-scale/2,max=scale/2)
i+=1
if i>500: raise OverflowError("Newton-Raphson Constraint projection failed to converge")
#print(f"converged in {i} iterations")
return z
def rigid_Phi(G,x,v):
"""inputs [Graph (n,E)] [x (bs,n,d)] [v (bs, n, d)]
ouput [Phi (bs, 2, C)]"""
constraints = (dist_constraints,joint_constraints)
return torch.cat([constraint(G,x,v) for constraint in constraints],dim=-1)
def dist_constraints(G,x,v):
""" inputs [Graph] [x (bs,n,d)] [v (bs,n,d)]
outputs [Phi (bs,2,C)]"""
bs,n,d = x.shape
p2p_constrs = nx.get_edge_attributes(G,'l'); p2ps = len(p2p_constrs)
tether_constrs = nx.get_node_attributes(G,"tether"); tethers = len(tether_constrs)
Phi = torch.zeros(bs, 2, p2ps+tethers, device=x.device, dtype=x.dtype)
# Fixed distance between two points
for cid,((ki,kj),lij) in enumerate(p2p_constrs.items()):
i,j = G.key2id[ki],G.key2id[kj]
#print(x.shape,i,j,ki,kj)
xdiff = x[:, i] - x[:, j]
vdiff = v[:, i] - v[:, j]
Phi[:, 0, cid] = (xdiff**2).sum(-1) - lij**2
Phi[:, 1, cid] = 2*(xdiff*vdiff).sum(-1)
# Fixed distance between a point and a fixed point in space
for cid, (ki, (pos,lij)) in enumerate(tether_constrs.items()):
i = G.key2id[ki]
ci = pos[None].to(x.device,x.dtype)
xdiff = x[:, i] - ci
Phi[:, 0,cid+p2ps] = ((xdiff)**2).sum(-1) - lij**2
Phi[:, 1,cid+p2ps] = 2*(xdiff*v[:,i]).sum(-1)
return Phi
def joint_constraints(G,x,v):
""" inputs [Graph] [x (bs,n,d)] [v (bs,n,d)]
outputs [Phi (bs,2,C)]"""
bs,n,d = x.shape
edge_joints = nx.get_edge_attributes(G,'joint')
node_joints = nx.get_node_attributes(G,'joint')
disabled_axes = nx.get_edge_attributes(G,'rotation_axis')
num_constraints = len(edge_joints)+len(node_joints)+len(disabled_axes)
Phi = torch.zeros(bs,2,num_constraints,d,device=x.device,dtype=x.dtype)
z = torch.stack([x,v],dim=1)
jid=0
for (ki,kj),(c1,c2) in edge_joints.items():
i,j = G.key2id[ki],G.key2id[kj]
c1t = torch.cat([1-c1.sum()[None],c1]).to(x.device,x.dtype)
c2t = torch.cat([1-c2.sum()[None],c2]).to(x.device,x.dtype)
di = G.nodes[ki]['d']
dj = G.nodes[kj]['d']
Phi[:,0,jid,:] = (x[:,i:i+di+1,:]*c1t[None,:,None]).sum(1) - (x[:,j:j+dj+1,:]*c2t[None,:,None]).sum(1)
Phi[:,1,jid,:] = (v[:,i:i+di+1,:]*c1t[None,:,None]).sum(1) - (v[:,j:j+dj+1,:]*c2t[None,:,None]).sum(1)
jid += 1
if 'rotation_axis' in G[ki][kj]:
ui,uj = G[ki][kj]['rotation_axis']
uit = torch.cat([-ui.sum()[None],ui]).to(x.device,x.dtype)
ujt = torch.cat([-uj.sum()[None],uj]).to(x.device,x.dtype)
Phi[:,0,jid,:] = (x[:,i:i+di+1,:]*uit[None,:,None]).sum(1) - (x[:,j:j+dj+1,:]*ujt[None,:,None]).sum(1)
Phi[:,1,jid,:] = (v[:,i:i+di+1,:]*uit[None,:,None]).sum(1) - (v[:,j:j+dj+1,:]*ujt[None,:,None]).sum(1)
jid+=1
# Joints connecting a body to a fixed point in space
for jid2, (ki,(c1,c2)) in enumerate(node_joints.items()):
i = G.key2id[ki]
c1t = torch.cat([1-c1.sum()[None],c1]).to(x.device,x.dtype)
di = G.nodes[ki]['d']
Phi[:,0,jid2+jid,:] = (x[:,i:i+di+1,:]*c1t[None,:,None]).sum(1) - c2[None].to(x.device,x.dtype)
Phi[:,1,jid2+jid,:] = (v[:,i:i+di+1,:]*c1t[None,:,None]).sum(1)
return Phi.reshape(bs,2,-1)
| StarcoderdataPython |
78776 | <filename>hc2002/plugin/symbolic_values.py
import hc2002.plugin as plugin
import hc2002.config as config
plugin.register_for_resource(__name__, 'hc2002.resource.instance')
_prefixes = ('availability-zone:', 'image:', 'kernel:', 'key:',
'load-balancers:', 'ramdisk:', 'security-groups:', 'spot-price:',
'subnet:', 'vpc:')
def apply(instance):
def resolve_symbol(original_value):
value = original_value
visited = set()
while isinstance(value, basestring) \
and value.startswith(prefix):
value = value.format(region=config.region, **instance)
if value in instance \
and value not in visited:
visited.add(value)
value = instance[value]
else:
if original_value == value:
raise Exception("Unable to resolve '%s'" % value)
else:
raise Exception(
"While resolving '%s': unable to resolve '%s'"
% (original_value, value))
return value
# Resolve symbols
for prefix in _prefixes:
key = prefix[:-1]
if key not in instance:
continue
if isinstance(instance[key], basestring):
instance[key] = resolve_symbol(instance[key])
elif isinstance(instance[key], list):
instance[key] = map(resolve_symbol, instance[key])
# Drop resolvable symbols
for key in instance.keys():
if key.startswith(_prefixes):
del instance[key]
| StarcoderdataPython |
1640235 | from jd.api.base import RestApi
class KeplerSkuProductServiceRequest(RestApi):
def __init__(self,domain='gw.api.360buy.com',port=80):
RestApi.__init__(self,domain, port)
self.skuIdSet = None
self.extFieldSet = None
def getapiname(self):
return 'jd.kepler.sku.ProductService'
| StarcoderdataPython |
1742850 | <reponame>takuron/Lesson
from machine import Pin,Signal
from micropython import const
import time
LED_RED_PIN = const(17)
LED_GREEN_PIN = const(16)
LED_BLUE_PIN = const(15)
red_led = Pin(LED_RED_PIN,Pin.OUT,value=1)
green_led = Pin(LED_GREEN_PIN,Pin.OUT,value=1)
blue_led = Pin(LED_BLUE_PIN,Pin.OUT,value=1)
while True:
red_led.value(0)
blue_led.value(1)
green_led.value(1)
time.sleep(1)
red_led.value(1)
blue_led.value(0)
green_led.value(1)
time.sleep(1)
red_led.value(1)
blue_led.value(1)
green_led.value(0)
time.sleep(1)
| StarcoderdataPython |
1737642 | <filename>geoutil.py
import geopandas as gpd
import tempfile
def bounds_to_set(bounds):
if not isinstance(bounds, list):
print('Unknown boundary format, ignoring')
return None
if isinstance(bounds[0], str) and bounds[0].startswith('geo:'):
btl = bounds[0].strip('geo:').split(',')
bbr = bounds[1].strip('geo:').split(',')
bounds = [
float(btl[0].strip()),
float(btl[1].strip()),
float(bbr[0].strip()),
float(bbr[1].strip()),
]
# Thorben: "Safest bet currently: lower left, upper right corner notation."
if bounds[0] > bounds[1]:
bounds[0], bounds[1] = bounds[1], bounds[0]
bounds[2], bounds[3] = bounds[3], bounds[2]
if bounds[0] > bounds[2]:
bounds[0], bounds[2] = bounds[2], bounds[0]
if bounds[1] > bounds[3]:
bounds[1], bounds[3] = bounds[3], bounds[1]
return tuple(bounds)
def set_to_bounds(bounds):
if bounds is None: return []
if len(bounds) == 2 and bounds[0].startswith('geo:'): return bounds
if len(bounds) < 4: raise Exception("Invalid bounds", bounds)
print("setting", bounds)
return ['geo:%f,%f' % (bounds[1], bounds[0]), 'geo:%f,%f' % (bounds[3], bounds[2])]
def points_reduce(filename, factor=2):
data = gpd.read_file(filename)
newgp = data.copy().iloc[::factor, :]
print('Original: %d - Reduced: %d' % (len(data), len(newgp)))
newgp.reindex()
newfile = "output/temp.reduced.geojson"
print('Writing 1/%d reduced data to' % factor, newfile)
newgp.to_file(newfile, driver='GeoJSON')
return newfile
def html_geo_thumb(imgkey):
return \
("<a target=\"_blank\" href=\"https://www.mapillary.com/map/im/%s\">" + \
"<img style=\"width:200px\" " + \
"src=\"https://images.mapillary.com/%s/thumb-320.jpg\"></a>") % \
(imgkey, imgkey)
| StarcoderdataPython |
91968 | """
solution Adventofcode 2019 day 4 part 1.
https://adventofcode.com/2019/day/4
author: pca
"""
def valid_pw(pw_str: int) -> bool:
has_double = False
if len(pw_str) != 6:
return False
max_digit = 0
prev_digit = -1
for ch in pw_str:
cur_digit = int(ch)
# decreasing
if cur_digit < max_digit:
return False
else:
max_digit = cur_digit
if cur_digit == prev_digit:
has_double = True
prev_digit = cur_digit
return has_double
def main(args=None):
puzzle_input = [int(ch) for ch in '245318-765747'.split('-')]
cnt = 0
for val in range(puzzle_input[0], puzzle_input[1]):
if valid_pw(str(val)):
cnt += 1
print(cnt)
if __name__ == "__main__":
main()
| StarcoderdataPython |
71073 | <filename>mainnotes/migrations/0001_initial.py<gh_stars>1-10
# Generated by Django 3.1.7 on 2021-03-30 08:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import encrypted_fields.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Notes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300)),
('description', models.TextField()),
('created_date', models.DateTimeField(auto_now_add=True)),
('recently_modified_date', models.DateTimeField(auto_now=True)),
('hidden_note', models.BooleanField(default=False)),
('trashed_note', models.BooleanField(default=False)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='notesImages/')),
('note', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainnotes.notes')),
],
),
migrations.CreateModel(
name='HiddenNotePassword',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hidden_note_password', encrypted_fields.fields.EncryptedCharField(max_length=50)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
3287371 | <reponame>ritlew/django-hls-video
import os
from django_hls_video.settings import *
DEBUG = False
| StarcoderdataPython |
4803520 | #! /usr/bin/env python
import functions as fn
def load_dictionary(filename, key_col, value_col):
file = open(filename)
dictionary = dict()
for line in file:
line=line.rstrip("\n")
fields = line.split("\t")
key = fields[key_col]
value = fields[value_col]
hpo_l = value.split(", ")
if key not in dictionary:
dictionary[key] = []
for hpo in hpo_l:
if hpo not in dictionary[key]:
dictionary[key].append(hpo)
else:
for hpo in hpo_l:
if hpo not in dictionary[key]:
dictionary[key].append(hpo)
return(dictionary)
def search_in_dictionary(dictionary, key):
if key in dictionary:
l = dictionary[key]
else:
l = []
return(l)
########################################################################################################################################
# OPTPARSE
########################################################################################################################################
import optparse
parser=optparse.OptionParser()
parser.add_option("-p", "--patient_file", dest="patient_file",
help="patient file", metavar="FILE")
parser.add_option("-A", "--key_pat_col", dest="key_patient",
help="column with key in file", type='int')
parser.add_option("-a", "--value_pat_col", dest="value_patient",
help="column with value in file", type='int')
parser.add_option("-g", "--go_cluster_file", dest="go_cluster_file",
help="go cluster file", metavar="FILE")
parser.add_option("-k", "--kegg_cluster_file", dest="kegg_cluster_file",
help="kegg_cluster file", metavar="FILE")
parser.add_option("-r", "--reactome_cluster_file", dest="reactome_cluster_file",
help="reactome_cluster file", metavar="FILE")
parser.add_option("-B", "--key_col", dest="key_cluster",
help="column with key in file", type='int')
parser.add_option("-b", "--value_col", dest="value_cluster",
help="column with value in file", type='int')
parser.add_option("-D", "--gene_col", dest="gene_col",
help="column with genes in file", type='int')
parser.add_option("-o", "--output_file", dest="output_file",
help="output file", metavar="FILE")
parser.add_option("-f", "--gene_file", dest="gene_file",
help="gene file", metavar="FILE")
parser.add_option("-C", "--key_gene_col", dest="key_gene",
help="column with key in gene file", type='int')
parser.add_option("-c", "--value_gene_col", dest="value_gene",
help="column with value in gene file", type='int')
(options, arg) = parser.parse_args()
#######################################################################################################################################
# MAIN
#######################################################################################################################################
go_hpos = load_dictionary(options.go_cluster_file, options.key_cluster, options.value_cluster)
go_gene = load_dictionary(options.go_cluster_file, options.key_cluster, options.gene_col)
kegg_hpos = load_dictionary(options.kegg_cluster_file, options.key_cluster, options.value_cluster)
kegg_gene = load_dictionary(options.kegg_cluster_file, options.key_cluster, options.gene_col)
reactome_hpos = load_dictionary(options.reactome_cluster_file, options.key_cluster, options.value_cluster)
reactome_gene = load_dictionary(options.reactome_cluster_file, options.key_cluster, options.gene_col)
pat2gene_dict = load_dictionary(options.gene_file, options.key_gene, options.value_gene)
#print(patient_hpos_overlap_in_cluster)
patient_profile_dict = fn.build_dictionary(options.patient_file, options.key_patient, options.value_patient)
output_file = open(options.output_file, 'w')
patient_hpo_file = open(options.patient_file)
output_file.write("Patient" + "\t" + "Metric" + "\t" + "Number" + "\n")
patient_l = []
for line in patient_hpo_file:
line = line.rstrip("\n")
fields = line.split("\t")
patient = fields[options.key_patient]
hpo = fields[options.value_patient]
go_hpo_l = search_in_dictionary(go_hpos, patient)
kegg_hpo_l = search_in_dictionary(kegg_hpos, patient)
reactome_hpo_l = search_in_dictionary(reactome_hpos, patient)
go_gene_l = search_in_dictionary(go_gene, patient)
kegg_gene_l = search_in_dictionary(kegg_gene, patient)
reactome_gene_l = search_in_dictionary(reactome_gene, patient)
predicted_gene_l = set(go_gene_l + kegg_gene_l + reactome_gene_l)
cluster_hpo_l = set(go_hpo_l + kegg_hpo_l + reactome_hpo_l)
if hpo in cluster_hpo_l:
print(fields[0], fields[1], fields[2], sep="\t")
if patient not in patient_l:
patient_l.append(patient)
output_file.write(patient + "\t" + "Total number of HPOs" + "\t" + str(len(patient_profile_dict[patient])) + "\n")
output_file.write(patient + "\t" + "Total HPOs overlapping clusters" + "\t" + str(len(cluster_hpo_l)) + "\n")
if patient in pat2gene_dict:
output_file.write(patient + "\t" + "Number of genes in patient's CNV" + "\t" + str(len(pat2gene_dict[patient])) + "\n")
output_file.write(patient + "\t" + "Number of possible patogenic genes" + "\t" + str(len(predicted_gene_l)) + "\n")
| StarcoderdataPython |
67408 | # Generated by Django 2.0 on 2017-12-18 14:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tutorial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('language_used', models.CharField(max_length=200)),
('date_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(blank=True, null=True)),
('git_repo_url', models.URLField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('skill_level', models.CharField(choices=[('1', 'Beginner'), ('2', 'Intermediate'), ('3', 'Advanced')], max_length=1)),
],
),
migrations.CreateModel(
name='TutorialComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(blank=True, null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='TutorialStep',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('description', models.TextField(blank=True, null=True)),
('order_number', models.IntegerField()),
],
),
migrations.AddField(
model_name='tutorial',
name='comments',
field=models.ManyToManyField(to='website.TutorialComment'),
),
migrations.AddField(
model_name='tutorial',
name='steps',
field=models.ManyToManyField(to='website.TutorialStep'),
),
]
| StarcoderdataPython |
3382497 | <gh_stars>1-10
import random
# 1 -> car
# 0 -> goat
choice = [1, 0, 0]
win = 0.0 # as float, to have decimal places
NUMBER_OF_SIMULATION = 10_000
for i in range(NUMBER_OF_SIMULATION):
random.shuffle(choice)
#print(choice)
user_index = 0 # 0 is first
user_index = random.randint(0, 2) # or random
# find where is 1 (car)
car_index = choice.index(1)
#print('user_index %d, car_index %d' % (user_index, car_index) )
# available indexes
available_indexes = [0, 1, 2]
available_indexes.remove(user_index)
if car_index != user_index:
available_indexes.remove(car_index)
index_to_open = random.choice(available_indexes)
#print('index_to_open %d' %(index_to_open) )
index_to_switch = [0, 1, 2]
index_to_switch.remove(user_index)
index_to_switch.remove(index_to_open)
index_to_switch = index_to_switch[0]
#print('index_to_switch %s' % (index_to_switch) )
# check result
if choice[index_to_switch] == 1:
#print('WIN')
win += 1
else:
#print('LOSE')
pass
win_percent = win / NUMBER_OF_SIMULATION * 100
lose = NUMBER_OF_SIMULATION - win
lose_percent = 100 - win_percent
print('#################################')
print('State | Number of games | Percent')
print('Win | {:7.0f} | {:.2f}%'.format(win, win_percent) )
print('Lose | {:7.0f} | {:.2f}%'.format(lose, lose_percent) )
print('---------------------------------')
print('SUM | {:7.0f} | {:.2f}%'.format(NUMBER_OF_SIMULATION, 100) )
| StarcoderdataPython |
140514 | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def inicio():
return render_template('inicio.html')
app.run() | StarcoderdataPython |
3342075 | # optimizer
optimizer = dict(type='Adadelta', lr=1.0)
optimizer_config = dict(grad_clip=dict(max_norm=0.5))
# learning policy
lr_config = dict(policy='step', step=[8, 10, 12])
total_epochs = 16
| StarcoderdataPython |
3209650 | <gh_stars>0
"""Collector base class."""
import logging
import traceback
from typing import cast, Optional, Set, Tuple, Type
import cachetools
import requests
from .type import ErrorMessage, Measurement, Response, Units, URL, Value
class Collector:
"""Base class for metric collectors."""
TIMEOUT = 10 # Default timeout of 10 seconds
RESPONSE_CACHE = cachetools.TTLCache(maxsize=256, ttl=60) # Briefly cache responses to prevent flooding sources
MAX_UNITS = 100 # The maximum number of units (e.g. violations, warnings) to send to the server
subclasses: Set[Type["Collector"]] = set()
def __init_subclass__(cls) -> None:
Collector.subclasses.add(cls)
super().__init_subclass__()
@classmethod
def get_subclass(cls, source_and_metric: str) -> Type["Collector"]:
"""Return the subclass registered for the source/metric name."""
simplified_class_name = source_and_metric.replace("_", "")
matching_subclasses = [sc for sc in cls.subclasses if sc.__name__.lower() == simplified_class_name]
return matching_subclasses[0] if matching_subclasses else cls
@staticmethod
def get(metric_type, sources) -> Response:
"""Connect to the sources to get and parse the measurement for the metric."""
source_responses = []
for source_uuid, source in sources.items():
collector_class = cast(Type[Collector], Collector.get_subclass(f"{source['type']}_{metric_type}"))
source_collector = collector_class()
source_response = source_collector.get_one(source)
source_response["source_uuid"] = source_uuid
source_responses.append(source_response)
values = [source_response["value"] for source_response in source_responses]
value = None if None in values else sum([int(value) for value in values])
return dict(sources=source_responses, value=value)
def get_one(self, source) -> Response:
"""Return the measurement response for one source."""
parameters = source.get("parameters", {})
api_url = self.api_url(**parameters)
landing_url = self.landing_url(**parameters)
response, connection_error = self.safely_get_source_response(api_url)
value, units, parse_error = self.safely_parse_source_response(response, **parameters)
return dict(api_url=api_url, landing_url=landing_url, value=value, units=units,
connection_error=connection_error, parse_error=parse_error)
def landing_url(self, **parameters) -> URL: # pylint: disable=no-self-use
"""Translate the urls into the landing urls."""
return parameters.get("url", "")
def api_url(self, **parameters) -> URL: # pylint: disable=no-self-use
"""Translate the url into the API url."""
return parameters.get("url", "")
@cachetools.cached(RESPONSE_CACHE, key=lambda self, url: cachetools.keys.hashkey(url))
def safely_get_source_response(self, url: URL) -> Tuple[Optional[requests.Response], ErrorMessage]:
"""Connect to the source and get the data, without failing."""
response, error = None, None
try:
response = self.get_source_response(url)
except Exception: # pylint: disable=broad-except
error = traceback.format_exc()
return response, error
def get_source_response(self, url: URL) -> requests.Response:
"""Open the url. Raise an exception if the response status isn't 200 or if a time out occurs."""
logging.info("Retrieving %s", url)
response = requests.get(url, timeout=self.TIMEOUT)
response.raise_for_status()
return response
def safely_parse_source_response(
self, response: requests.Response, **parameters) -> Tuple[Value, Units, ErrorMessage]:
"""Parse the data from the response, without failing."""
units: Units = []
value, error = None, None
if response:
try:
result = self.parse_source_response(response, **parameters)
value, units = result if isinstance(result, tuple) else (result, [])
except Exception: # pylint: disable=broad-except
error = traceback.format_exc()
return value, units[:self.MAX_UNITS], error
def parse_source_response(self, response: requests.Response, **parameters) -> Measurement:
# pylint: disable=no-self-use,unused-argument
"""Parse the response to get the measurement for the metric."""
return str(response.text)
| StarcoderdataPython |
1669868 | #!/usr/bin/env python
"""
A wrapper script for deleting a history
python ./print_history_command_lines.py --userkey <KEY> --url 'https://dev.globusgenomics.org' --history_id 939393933948
"""
import glob, re, json, time, sys, optparse, os, random
from bioblend import galaxy
import requests
requests.packages.urllib3.disable_warnings()
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split('(\d+)', text) ]
def search_for_line(file_path, pbs_list):
for pbs in pbs_list:
for line in open(pbs):
if file_path in line:
return pbs
def get_all_files_in_command(command):
files = []
for i in command.split():
if "/scratch/galaxy/files" in i or "/scratch/go" in i:
i = i.replace('"', '')
i = i.replace(';', '')
if "=" in i:
files.append(i.split("=")[1])
else:
files.append(i)
return files
parser = optparse.OptionParser()
parser.add_option( '--userkey', dest='user_api_key', help="The user api key" )
parser.add_option( '--url', dest='galaxyurl', help='Base URL' )
parser.add_option( '--output', dest='output_file', help='output file' )
parser.add_option( '--history_id', dest='history_id', help='history_id')
(options, args) = parser.parse_args()
url = options.galaxyurl
if "http:" in url:
url = url.replace("http", "https")
user_api_key = options.user_api_key
history_id = options.history_id
# get an api handle and the user's id
pbs_path = "/opt/galaxy/database/pbs"
gi = galaxy.GalaxyInstance(url=url, key=user_api_key)
complete_options_output_list = list()
message = ""
seen = []
pbs_files = glob.glob("%s/*.sh" % (pbs_path))
#pbs_files.sort(key=natural_keys)
for history_item in gi.histories.show_history(history_id, contents=True, details="all"):
dataset_metadata = gi.histories.show_dataset(history_id, history_item['id'])
if dataset_metadata['state'] != 'error' and dataset_metadata['state'] == 'ok' and dataset_metadata['deleted'] == False :
pbs_file = search_for_line(dataset_metadata['file_name'], pbs_files)
if pbs_file is None:
continue
with open(pbs_file, 'r') as f:
lines = f.readlines()
cmd = lines[-1].split("cd /opt/galaxy")[0]
stime = os.path.getctime(pbs_file)
if cmd not in seen:
#print "%s\n%s\n" % (dataset_metadata['name'], cmd)
complete_options_output_list.append([dataset_metadata['file_name'], dataset_metadata['name'], pbs_file, cmd, stime])
seen.append(cmd)
for tup in sorted(complete_options_output_list, key=lambda x: x[-1]):
print "%s\n%s\n" % (tup[1], tup[3])
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.