id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
9746057 | <reponame>spaceone/circuits<filename>docs/source/tutorials/telnet/telnet.py
#!/usr/bin/env python
import sys
from circuits import Component, handler
from circuits.io import File
from circuits.net.events import connect, write
from circuits.net.sockets import TCPClient
class Telnet(Component):
channel = "telnet"
def init(self, host, port):
self.host = host
self.port = port
TCPClient(channel=self.channel).register(self)
File(sys.stdin, channel="stdin").register(self)
def ready(self, socket):
self.fire(connect(self.host, self.port))
def read(self, data):
print(data.strip())
@handler("read", channel="stdin")
def read_user_input(self, data):
self.fire(write(data))
host = sys.argv[1]
port = int(sys.argv[2])
Telnet(host, port).run()
| StarcoderdataPython |
11303549 | <reponame>code-review-doctor/project-application<filename>ProjectApplication/project_core/migrations/0105_project_projectpartner.py
# Generated by Django 3.0.3 on 2020-02-19 14:33
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('project_core', '0104_delete_colour'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True, help_text='Date and time at which the entry was created')),
('modified_on', models.DateTimeField(auto_now=True, help_text='Date and time at which the entry was modified', null=True)),
('uuid', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, unique=True)),
('title', models.CharField(help_text='Title of the project', max_length=500)),
('location', models.CharField(blank=True, help_text='Name of more precise location of where the project would take place (not coordinates)', max_length=200, null=True)),
('start_date', models.DateField(help_text='Date on which the project is expected to start')),
('end_date', models.DateField(help_text='Date on which the project is expected to end')),
('duration_months', models.DecimalField(decimal_places=1, help_text='Duration of the project in months', max_digits=5, validators=[django.core.validators.MinValueValidator(0)])),
('call', models.ForeignKey(help_text='Call to which the project belongs', on_delete=django.db.models.deletion.PROTECT, to='project_core.Call')),
('created_by', models.ForeignKey(blank=True, help_text='User by which the entry was created', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='project_core_project_created_by_related', to=settings.AUTH_USER_MODEL)),
('geographical_areas', models.ManyToManyField(help_text='Geographical area(s) covered by the project', to='project_core.GeographicalArea')),
('keywords', models.ManyToManyField(help_text='Keywords that describe the project', to='project_core.Keyword')),
('modified_by', models.ForeignKey(blank=True, help_text='User by which the entry was modified', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='project_core_project_modified_by_related', to=settings.AUTH_USER_MODEL)),
('overarching_project', models.ForeignKey(blank=True, help_text='Overarching project to which this project contributes', null=True, on_delete=django.db.models.deletion.PROTECT, to='project_core.ExternalProject')),
('principal_investigator', models.ForeignKey(help_text='Main applicant of the project', on_delete=django.db.models.deletion.PROTECT, to='project_core.PersonPosition')),
('proposal', models.ForeignKey(blank=True, help_text='Proposal from which the project originates', null=True, on_delete=django.db.models.deletion.PROTECT, to='project_core.Proposal')),
],
options={
'unique_together': {('title', 'principal_investigator', 'call')},
},
),
migrations.CreateModel(
name='ProjectPartner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role_description', models.TextField(help_text="Description of the partner's role")),
('competences', models.TextField(help_text="Description of the partner's key competences")),
('person', models.ForeignKey(help_text='Person that is a partner', on_delete=django.db.models.deletion.PROTECT, to='project_core.PersonPosition')),
('project', models.ForeignKey(help_text='Project on which the partner is collaborating', on_delete=django.db.models.deletion.PROTECT, to='project_core.Project')),
('role', models.ForeignKey(help_text='Role of the partner', on_delete=django.db.models.deletion.PROTECT, to='project_core.Role')),
],
options={
'unique_together': {('person', 'role', 'project')},
},
),
]
| StarcoderdataPython |
12819566 | <reponame>paiuolo/django-sso-app
import logging
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.auth import get_user_model
from ..utils import get_or_create_user_profile
logger = logging.getLogger('django_sso_app')
User = get_user_model()
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
"""
App still creates profile for "staff" users
"""
if kwargs['raw']:
# setting property flag
setattr(instance, '__dssoa__loaddata', True)
# https://github.com/django/django/commit/18a2fb19074ce6789639b62710c279a711dabf97
return
user = instance
if created:
logger.debug('user created, creating profile')
profile = get_or_create_user_profile(user, commit=True)
logger.debug('new profile created "{}"'.format(profile))
# refreshing user instance
user.sso_app_profile = profile
| StarcoderdataPython |
267824 | <reponame>jonico/pacbot<filename>installer/core/terraform/resources/aws/ecs.py
from core.terraform.resources import TerraformResource
from core.config import Settings
from core.providers.aws.boto3 import ecs
class ECSClusterResource(TerraformResource):
"""
Base resource class for Terraform AWS ECS cluster resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_ecs_cluster"
setup_time = 600
available_args = {
'name': {'required': True, 'prefix': True, 'sep': "-"},
'tags': {'required': False}
}
def check_exists_before(self, input, tf_outputs):
"""
Check if the resource is already exists in AWS
Args:
input (instance): input object
tf_outputs (dict): Terraform output dictionary
Returns:
exists (boolean): True if already exists in AWS else False
checked_details (dict): Status of the existence check
"""
checked_details = {'attr': "name", 'value': self.get_input_attr('name')}
exists = False
if not self.resource_in_tf_output(tf_outputs):
exists = ecs.check_ecs_cluster_exists(
checked_details['value'],
input.AWS_AUTH_CRED)
return exists, checked_details
class ECSTaskDefinitionResource(TerraformResource):
"""
Base resource class for Terraform AWS ECS task definition resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_ecs_task_definition"
setup_time = 600
available_args = {
'family': {'required': True, 'prefix': True, 'sep': "-"},
'container_definitions': {'required': True},
'requires_compatibilities': {'required': True},
'network_mode': {'required': True},
'cpu': {'required': True},
'memory': {'required': True},
'execution_role_arn': {'required': True},
'task_role_arn': {'required': True},
'tags': {'required': False}
}
def check_exists_before(self, input, tf_outputs):
"""
Check if the resource is already exists in AWS
Args:
input (instance): input object
tf_outputs (dict): Terraform output dictionary
Returns:
exists (boolean): True if already exists in AWS else False
checked_details (dict): Status of the existence check
"""
checked_details = {'attr': "name", 'value': self.get_input_attr('family')}
exists = False
if not self.resource_in_tf_output(tf_outputs):
exists = ecs.check_ecs_task_definition_exists(
checked_details['value'],
input.AWS_AUTH_CRED)
return exists, checked_details
class ECSServiceResource(TerraformResource):
"""
Base resource class for Terraform AWS ECS service resource
Attributes:
resource_instance_name (str): Type of resource instance
available_args (dict): Instance configurations
"""
resource_instance_name = "aws_ecs_service"
setup_time = 600
available_args = {
'name': {'required': True, 'prefix': True, 'sep': "-"},
'task_definition': {'required': True},
'desired_count': {'required': True},
'launch_type': {'required': True},
'cluster': {'required': True},
'network_configuration': {
'required': True,
'inline_args': {
'network_configuration_security_groups': {'required': True, 'tf_arg_key': "security_groups"},
'network_configuration_subnets': {'required': True, 'tf_arg_key': "subnets"},
'network_configuration_assign_public_ip': {'required': True, 'tf_arg_key': "assign_public_ip"},
}
},
'load_balancer': {
'required': True,
'inline_args': {
'load_balancer_target_group_arn': {'required': True, 'tf_arg_key': "target_group_arn"},
'load_balancer_container_name': {'required': True, 'tf_arg_key': "container_name"},
'load_balancer_container_port': {'required': True, 'tf_arg_key': "container_port"},
}
},
'tags': {'required': False},
'propagate_tags': {'required': False}
}
| StarcoderdataPython |
1968984 | import unittest
import puzzle8
import search
import random
import time
class TestMethods(unittest.TestCase):
offTwoPuzzle = puzzle8.state([3,4,5,2,0,6,1,8,7])
gradePuzzle1 = puzzle8.state([8,7,6,5,4,3,2,1,0])
gradePuzzle2 = puzzle8.state([1,2,3,4,5,6,7,8,0])
def setUp(self):
self.gradePuzzles = [247860748,253206748,253780508,152293420,
300501380, 108306836]
self.pathLengths = [2,4,8,10,12,16]
def testAstarGradingNumWrongTiles(self):
print("A* num wrong tiles")
startTime = time.time()
for i in range(len(self.gradePuzzles)):
print("\nTrying to find a solution for puzzle with solution of depth",
self.pathLengths[i])
print("Starting puzzle:")
puzzle8.display(self.gradePuzzles[i])
solnPath = search.astar(self.gradePuzzles[i],
search.numWrongTiles)
print("Found a solution, of depth", len(solnPath))
print("Solution path:", solnPath)
self.assertEqual(len(solnPath),self.pathLengths[i])
print('A* num time',time.time()-startTime)
print('\n')
def testAstarGradingManhattan(self):
print("A* Manhattan distance")
startTime = time.time()
for i in range(len(self.gradePuzzles)):
print("\nTrying to find a solution for puzzle with solution of depth",
self.pathLengths[i])
print("Starting puzzle:")
puzzle8.display(self.gradePuzzles[i])
solnPath = search.astar(self.gradePuzzles[i],
search.manhattanDistance)
print("Found a solution, of depth", len(solnPath))
print("Solution path:", solnPath)
self.assertEqual(len(solnPath),self.pathLengths[i])
print('A* Manhattan time',time.time()-startTime)
print('\n')
if __name__=='__main__':
unittest.main() | StarcoderdataPython |
1724429 | # coding=UTF-8
import json
import unittest
import requests
from jsonpath import jsonpath
class TaskControllerTest(unittest.TestCase):
def test_spi_integration(self):
headers = {'Content-type': 'application/x-www-form-urlencoded'}
payload = {'username': 'dev', 'password': '<PASSWORD>', 'tenentCode': 'org'}
response = requests.post('http://127.0.0.1:8002/auth/sign-in', headers=headers, data=payload)
self.assertTrue(response.status_code == requests.codes.ok)
content_json_data = json.loads(response.content)
# 数据不存在时,会返回False,确保数据存在,不能等于False
access_token = jsonpath(content_json_data, 'access-token')[0]
request_headers = {'Content-type': 'application/json', 'access-token': access_token}
extension_response = requests.get('http://127.0.0.1:8101/task-ext/extensions', headers=request_headers)
self.assertTrue(extension_response.status_code == requests.codes.ok)
content_json_data = json.loads(response.content)
self.assertTrue(jsonpath(content_json_data, '$[*]'))
# test FeignClientBuilder
extension_response = requests.get('http://127.0.0.1:8102/delegate-extensions', headers=request_headers)
self.assertTrue(extension_response.status_code == requests.codes.ok)
content_json_data = json.loads(response.content)
self.assertTrue(jsonpath(content_json_data, '$[*]'))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6577320 | <reponame>wangao0824/Intelligent-Door-Lock
from upspackv2 import *
def sys_get_battery():
test = UPS2("/dev/ttyAMA0")
version,vin,batcap,vout = test.decode_uart()
return batcap
def sys_get_voltage():
test = UPS2("/dev/ttyAMA0")
version,vin,batcap,vout = test.decode_uart()
return float(vout)/1000
# print sys_get_battery()
# print sys_get_voltage() | StarcoderdataPython |
6604857 | <gh_stars>1-10
def recuring_fractionto_decimal(divident: int, diviser: int) -> float:
c, q, r = 0.0, 0, 0
if(diviser%10 == 9):
diviser= diviser
elif(diviser%10 == 3):
diviser= 3*diviser
divident= 3*divident
elif(diviser%10 == 7):
diviser= 7*diviser
divident= 7*divident
elif(diviser%10 == 1):
diviser= 9*diviser
divident= 9*divident
else:
print("Error: Not a recurring decimal.")
exit(0)
op= diviser/10 +1
for i in range(0, 6):
q= divident//op; r= divident%op
divident= r*10 +q
c= c+ q*(10**(-(i+1)))
return round(c,6) | StarcoderdataPython |
9745718 | from __future__ import absolute_import
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.crf import CRF
from .wordsequence import WordSequence
class SeqModel(nn.Module):
def __init__(self, data):
super(SeqModel, self).__init__()
self.data = data
self.use_crf = data.use_crf
print("build network...")
print("word feature extractor: ", data.word_feature_extractor)
self.gpu = data.HP_gpu
self.average_batch = data.average_batch_loss
# opinion 和 evidence 分开抽
label_size = data.label_alphabet_size
self.word_hidden = WordSequence(data)
if self.use_crf:
self.word_crf = CRF(label_size, batch_first=True)
if self.gpu:
self.word_crf = self.word_crf.cuda()
def neg_log_likelihood_loss(self, word_inputs, word_seq_lengths, batch_label, mask,
input_label_seq_tensor):
lstm_outs = self.word_hidden(word_inputs, word_seq_lengths, input_label_seq_tensor)
# lstm_outs(batch_size,sentence_length,tag_size)
batch_size = word_inputs.size(0)
if self.use_crf:
mask = mask.byte()
loss = (-self.word_crf(lstm_outs, batch_label, mask))
tag_seq = self.word_crf.decode(lstm_outs, mask)
else:
loss_function = nn.NLLLoss()
seq_len = lstm_outs.size(1)
lstm_outs = lstm_outs.view(batch_size * seq_len, -1)
score = F.log_softmax(lstm_outs, 1)
loss = loss_function(score, batch_label.contiguous().view(batch_size * seq_len))
_, tag_seq = torch.max(score, 1)
tag_seq = tag_seq.view(batch_size, seq_len)
return loss, tag_seq
def evaluate(self, word_inputs, word_seq_lengths, mask, input_label_seq_tensor):
lstm_outs = self.word_hidden(word_inputs, word_seq_lengths, input_label_seq_tensor)
if self.use_crf:
mask = mask.byte()
tag_seq = self.word_crf.decode(lstm_outs, mask)
else:
batch_size = word_inputs.size(0)
seq_len = lstm_outs.size(1)
lstm_outs = lstm_outs.view(batch_size * seq_len, -1)
_, tag_seq = torch.max(lstm_outs, 1)
tag_seq = mask.long() * tag_seq.view(batch_size, seq_len)
return tag_seq
def forward(self, word_inputs, word_seq_lengths, mask, input_label_seq_tensor):
return self.evaluate(word_inputs, word_seq_lengths, mask, input_label_seq_tensor)
| StarcoderdataPython |
5037559 | from __future__ import print_function
import re
import os
import sys
import time
import json
import glob
import Queue
import pickle
import anydbm
import argparse
import traceback
import functools
import threading
# import subprocess
import contextlib
import collections
from datetime import datetime
from ceph_daemon import admin_socket
Stage = collections.namedtuple("Stage", ("name", "time"))
#
# digraph "ceph_request" {
# initiated -> queued_for_pg [label="data read from socket and put info queue"];
# queued_for_pg -> reached_pg [label="op fetched from queue"];
# reached_pg -> started [labe="op get all locks and processing started"];
# started -> "waiting for subops from 1,2" [label="request to replica is send"];
# "waiting for subops from 1,2" -> commit_queued_for_journal_write [label="???"];
# "waiting for subops from 1,2" -> "sub_op_commit_rec from 1" [label="replica 1 done"];
# "waiting for subops from 1,2" -> "sub_op_commit_rec from 2" [label="replica 2 done"];
# write_thread_in_journal_buffer -> journaled_completion_queued;
# journaled_completion_queued -> commit_sent -> journaled_completion_queued;
# op_commit -> op_applied [label="data written to main storage buffer"];
# op_applied -> done [label="data written(commited to disk?) to main storage?"];
# }
class OSDOp(object):
expected_stages_order = [
"initiated",
"queued_for_pg",
"reached_pg",
"started",
"waiting for subops from 1,2",
"commit_queued_for_journal_write",
"write_thread_in_journal_buffer",
"op_commit",
"op_applied",
"done"]
sub_op_key_map = {"sub_op_commit_rec from 1": "1st replica done",
"sub_op_commit_rec from 2": "2nd replica done"}
extra_steps = {"commit_sent", "journaled_completion_queued"}
skip_order = extra_steps | set(sub_op_key_map)
result_order = [
'pg_wait',
'send_to_replica',
'commit_queued_for_journal_write',
'write_thread_in_journal_buffer',
"op_commit",
"1st replica done",
"2nd replica done",
"journaled_completion_queued",
"commit_sent",
"op_applied",
"done"
]
def __init__(self, client, object, op_type, start_time, stages):
self.client = client
self.object = object
self.op_type = op_type
self.start_time = start_time
self.stages = stages
def to_op_times(self):
# main OSD stages
main_osd_stages = [stage for stage in self.stages if stage.name not in self.skip_order]
for stage, expected_name in zip(main_osd_stages, self.expected_stages_order):
assert stage.name == expected_name, ",".join(stage.name for stage in main_osd_stages)
if len(self.stages) < 3:
return {}
smap = {stage.name: stage.time for stage in self.stages}
res = {'pg_wait': smap['started']}
rep_send = smap.get("waiting for subops from 1,2")
if rep_send is not None:
res['send_to_replica'] = rep_send - smap['started']
for name, res_key in self.sub_op_key_map.items():
if name in smap:
res[res_key] = smap[name] - rep_send
send_done_idx = self.expected_stages_order.index("waiting for subops from 1,2")
ptime = smap["waiting for subops from 1,2"]
for stage_name in self.expected_stages_order[send_done_idx + 1:]:
if stage_name in smap:
res[stage_name] = smap[stage_name] - ptime
ptime = smap[stage_name]
else:
break
if 'commit_send' in smap:
dtime = smap["op_commit"] - smap['commit_send']
assert dtime >= 0
res["send_commit_ack"] = dtime
if "journaled_completion_queued" in smap:
dtime = smap["journaled_completion_queued"] - smap['write_thread_in_journal_buffer']
assert dtime >= 0
res["journal_commit"] = dtime
return res
def __str__(self):
res = "{0.__class__.__name__}({0.client}=>{0.object}, {1}):\n".format(self, ",".join(self.op_type))
return res + "\n".join(" {0.time:>7d} {0.name}".format(stage) for stage in self.stages)
def __repr__(self):
return str(self)
def to_ctime_ms(time_str):
dt_s, micro_sec = time_str.split('.')
dt = datetime.strptime(dt_s, '%Y-%m-%d %H:%M:%S')
return int(time.mktime(dt.timetuple()) * 1000000 + int(micro_sec))
rr = r"osd_op\((?P<client_id>client[^\t ]*?)\s+" + \
r"(?P<pool>\d+).(?P<PG>[a-f0-9]+)" + \
r"\s+(?P<object>.*?)\s+" + \
r"\[[^\]]*\][^\[]*\[[^\]]*\]" + \
r"\s+(?P<op_decr>[a-z+_]*)\s+.*?\)"
descr_rr = re.compile(rr)
def parse_op(op_js_data):
descr = op_js_data['description'].encode("utf8")
if not descr.startswith("osd_op"):
return
op_type_rr = descr_rr.match(descr)
if op_type_rr is None:
# print("Can't parse description line:\n{!r}".format(descr))
return
op_type = op_type_rr.group("op_decr").split("+")
client = op_type_rr.group("client_id")
object_name = op_type_rr.group("object")
stages = []
_, _, stages_json = op_js_data["type_data"]
stime = to_ctime_ms(op_js_data['initiated_at'])
for stage in stages_json:
if stage['event'] != 'initiated_at':
stages.append(Stage(stage['event'].encode("utf8"), to_ctime_ms(stage['time']) - stime))
return OSDOp(client, object_name, op_type, stime, stages)
def collect_disks_usage():
return open("/proc/diskstats").read()
def osd_exec(osd_id, args, cluster='ceph'):
asok = "/var/run/ceph/{}-osd.{}.asok".format(cluster, osd_id)
return admin_socket(asok, args.split(" "))
# return subprocess.check_output("ceph daemon {} {}".format(asok(osd_id, cluster), args), shell=True)
def collect_historic_ops(osd_id, cluster='ceph'):
return osd_exec(osd_id, "dump_historic_ops", cluster=cluster)
# data = json.loads()
# already_seen = set()
#
# for osd_op_js in data['Ops']:
# osd_op = parse_op(osd_op_js)
# if osd_op:
# op_id = (osd_op.client, osd_op.object, osd_op.start_time)
# if op_id not in already_seen:
# already_seen.add(op_id)
# yield osd_op
def collect_current_ops(osd_id, cluster='ceph'):
return osd_exec(osd_id, "dump_ops_in_flight", cluster=cluster)
# data = json.loads()
#
# for op in data['ops']:
# op_obj = parse_op(op)
# if op_obj:
# yield op_obj
def collect_perf(osd_id, cluster='ceph'):
return osd_exec(osd_id, "perf dump", cluster=cluster)
def find_all_osd(cluster_name='ceph'):
for fname in glob.glob("/var/run/ceph/{}-osd.*.asok".format(cluster_name)):
yield os.path.basename(fname).split('.')[1]
def calc_stats(ops):
time_per_stage = collections.defaultdict(int)
stage_appears_count = collections.defaultdict(int)
for op in ops:
for name, op_time in op.to_op_times().items():
time_per_stage[name] += op_time
stage_appears_count[name] += 1
for name in time_per_stage:
time_per_stage[name] /= stage_appears_count[name]
return time_per_stage
def show_stats(db_name, op_tp, osd_id=None):
prefix_re = re.compile("{}.osd-{}::".format(op_tp, '.*' if osd_id is None else osd_id))
db = anydbm.open(db_name, 'r')
with contextlib.closing(db):
osd_ops = [pickle.loads(val) for key, val in db.items() if prefix_re.match(key)]
stats = calc_stats(osd_ops)
for name in OSDOp.result_order:
if name in stats:
print("{:<40s} {:>8d}".format(name, stats[name] / 1000))
return 0
def worker(tag, func, timeout, oq, end_time):
ctime = time.time()
try:
while ctime < end_time:
oq.put((int(ctime * 1000), tag, func()))
sleep_for = (ctime + timeout) - time.time()
if sleep_for > 0:
time.sleep(sleep_for)
ctime = time.time()
oq.put((None, True, None))
except Exception:
traceback.print_exc()
oq.put((None, False, None))
def show_online(res_q, interval):
pass
# can we store to DB directly from data collect threads?
def store_to_db(res_q, dbpath, th_count):
db = anydbm.open(dbpath, "c")
with contextlib.closing(db):
while th_count != 0:
ctime, tag, res = res_q.get()
if ctime is None: # mean that threads ends
if not tag: # mean that thread failed
return 1
th_count -= 1
else:
db["{}::{}".format(tag, ctime)] = res
return 0
def print_results(res_q, th_count):
while th_count != 0:
ctime, tag, res = res_q.get()
if ctime is None:
if not tag:
return 1
th_count -= 1
else:
print("{} - {} - {}".format(ctime, tag, res))
return 0
def set_osd_historic(duration, keep, osd_id, cluster="ceph"):
data = json.loads(osd_exec(osd_id, "dump_historic_ops", cluster=cluster))
osd_exec(osd_id, "config set osd_op_history_duration {}".format(duration), cluster=cluster)
osd_exec(osd_id, "config set osd_op_history_size {}".format(keep), cluster=cluster)
return (data["duration to keep"], data["num to keep"])
def get_argparser():
descr = "Collect ceph performance info"
parser = argparse.ArgumentParser(prog='collect', description=descr)
subparsers = parser.add_subparsers(dest='subparser_name')
collect = subparsers.add_parser('collect', help='Collect data from running cluster')
collect.add_argument("-c", "--cluster", default="ceph", help="Ceph cluster name")
collect.add_argument("--db", default=None, help="Store into file in binary format")
collect.add_argument("-r", "--run-time", type=int, default=60, help="Data collect inteval in seconds")
collect.add_argument("-t", "--timeout", type=int, default=500, help="Collect timeout in ms")
collect.add_argument("-p", "--prepare-for-historic", action="store_true",
help="Prepare OSD for reliable historic ops collection")
collect.add_argument("osdids", nargs='*', help="OSD id's list or '*' to monitor all")
info = subparsers.add_parser('info', help='Show basic db info')
info.add_argument("db", help="Path to databse")
stat = subparsers.add_parser('stat', help='Show stat for db')
stat.add_argument("-i", "--osd-id", type=int, default=None, help="Show only event from selected osd")
stat.add_argument("type", choices=("ops", "historic"), help="Event to stat")
stat.add_argument("db", help="Path to databse")
return parser
def collect(opts):
if opts.osdids == ['*']:
osd_ids = list(find_all_osd())
else:
if '*' in opts.osdids:
print("* should be the only one osd id")
return 1
osd_ids = opts.osdids
osd_historic_params = {}
if opts.prepare_for_historic:
for osd_id in osd_ids:
osd_historic_params[osd_id] = set_osd_historic(2, 200, osd_id, opts.cluster)
try:
etime = time.time() + opts.run_time
res_q = Queue.Queue()
threads = []
for osd_id in osd_ids:
func = functools.partial(collect_current_ops, osd_id)
th = threading.Thread(target=worker,
args=('ops.osd-{}'.format(osd_id), func, opts.timeout / 1000.0, res_q, etime))
threads.append(th)
for osd_id in osd_ids:
func = functools.partial(collect_historic_ops, osd_id)
th = threading.Thread(target=worker,
args=('historic.osd-{}'.format(osd_id), func, opts.timeout / 1000.0, res_q, etime))
threads.append(th)
th = threading.Thread(target=worker,
args=('diskstats', collect_disks_usage, opts.timeout / 1000.0, res_q, etime))
threads.append(th)
for osd_id in osd_ids:
func = functools.partial(collect_perf, osd_id)
th = threading.Thread(target=worker,
args=('perf.osd-{}'.format(osd_id), func, opts.timeout / 1000.0, res_q, etime))
threads.append(th)
for th in threads:
th.daemon = True
th.start()
if opts.db is not None:
return store_to_db(res_q, opts.db, len(threads))
else:
return print_results(res_q, len(threads))
finally:
for osd_id, (duration, keep) in osd_historic_params.items():
osd_historic_params[osd_id] = set_osd_historic(duration, keep, osd_id, opts.cluster)
def main(argv):
parser = get_argparser()
opts = parser.parse_args(argv[1:])
if opts.subparser_name == 'collect':
return collect(opts)
elif opts.subparser_name == 'stat':
return show_stats(opts.db, opts.type, opts.osd_id)
else:
raise NotImplementedError()
if __name__ == "__main__":
exit(main(sys.argv))
| StarcoderdataPython |
184247 | from django.conf.urls import url
from . import views
from django.conf.urls import url, include
app_name = 'Article'
urlpatterns = [
url (r'^create/',views.CreateArticle,name="CreateArticle"),
url (r'^home/',views.home ,name="Home"),
url (r'^(?P<pk>[0-9]+)/addLike/',views.AddLike ,name="AddLike"),
url (r'^(?P<pk>[0-9]+)/AddDislikeike/',views.AddDislike ,name="AddDislike")
]
| StarcoderdataPython |
4975248 | from protocol import JsonReceiver
from twisted.internet import reactor
from twisted.internet import protocol, stdio
validClients = []
class verifyClientProtocol(JsonReceiver):
def __init__(self):
self.debug_enabled = False
def out(self, *messages):
for message in messages:
print(message)
def debug(self, *messages):
if self.debug_enabled:
self.out(*messages)
def connectionMade(self):
self.sendCommand("battle_server")
self.out("Battle server connected to the authentication server...")
def sendCommand(self, command, **params):
self.sendObject(command=command, params=params)
def objectReceived(self, obj):
self.debug("Data received: {0}".format(obj))
if obj.has_key('command'):
command = obj['command']
params = obj.get('params', {})
self.receiveCommand(command, **params)
def invalidJsonReceived(self, data):
self.debug("Invalid JSON data received: {0}".format(data))
def receiveCommand(self, command, **params):
commands = {
'validPlayer': self.verifiedPlayer,
'invalidatePlayer': self.logoutPlayer
}
if command not in commands:
self.debug("Invalid command received: {0}".format(command))
return
try:
commands[command](**params)
except TypeError as e:
self.debug("Invalid command parameters received: {0}".format(e))
def logoutPlayer(self, username):
try:
i = validClients.index(username)
except ValueError:
pass
else:
del validClients[i]
def verifiedPlayer(self,username):
global validClients
validClients.append(username)
# print(validClients)
def serverError(self, message):
self.out("Server error: {0}".format(message))
def serverMessage(self, message):
self.out(message)
class verifyClientFactory(protocol.ClientFactory):
protocol = verifyClientProtocol
def startedConnecting(self, connector):
destination = connector.getDestination()
print
"Connecting to server {0}:{1}, please wait...".format(destination.host, destination.port)
def clientConnectionFailed(self, connector, reason):
print
'Could not connect to authentication server at port ' + str(connector.getDestination().port)
def clientConnectionLost(self, connector, reason):
print
'Connection to authentication server severed'
reactor.stop() | StarcoderdataPython |
4960440 | from prefect import task, Flow
from prefect.engine.executors import DaskExecutor
import numpy as np
@task
def generate_diag(n):
eigvals = np.arange(0, n, dtype=float)
D = np.diag(eigvals)
return D
@task
def generate_random(n):
A = np.random.random((n, n))
return A
@task
def qr(A):
Q, R = np.linalg.qr(A)
return Q
@task
def first_half(D, Q):
B = np.matmul(D, Q.T)
return B
@task
def second_half(Q, B):
C = np.matmul(Q, B)
return C
@task
def diagonalize(C):
eigvals = np.linalg.eigvals(C)
eigvals.sort()
return eigvals
@task
def check_result(eigvals):
n = len(eigvals)
correct = np.arange(0, n, dtype=float)
check = np.abs(eigvals - correct).max() < 0.0001
print("Check results:", check)
with Flow("numpy-example") as flow:
D = generate_diag(1000)
A = generate_random(1000)
Q = qr(A)
B = first_half(D, Q)
C = second_half(Q, B)
eigvals = diagonalize(C)
check_result(eigvals)
# dask_scheduler = 'tcp://somehost:someport'
# executor = DaskExecutor(address=dask_scheduler)
# flow_state = flow.run(executor=executor)
flow_state = flow.run()
| StarcoderdataPython |
1798882 | import numpy as np
import pandas as pd
import os
import random
from textblob import TextBlob
from googleapiclient.discovery import build
api_key = os.environ.get("API_KEY")
def related_ids(url):
api_key = os.environ.get("API_KEY")
# get video ID
vid = url.split('=')[-1]
# build youtube object to access API, then make request
youtube = build('youtube','v3',developerKey=api_key)
search_request = youtube.search().list(
part = 'snippet',
relatedToVideoId = vid,
maxResults = 5,
type = 'video',)
search_response = search_request.execute()
# add the selected video to the top so it is included
# only need video IDs at this point, we request more info
# in a later API call
rel_vids = [vid]
for item in search_response['items']:
try:
#rel_titles.append(item['snippet']['title'])
rel_vids.append(item['id']['videoId'])
except:
continue
return rel_vids
def related_api_requests(video_ids, layers):
api_key = os.environ.get("API_KEY")
# build youtube resource object
youtube = build('youtube', 'v3', developerKey=api_key)
# video Ids to feed into API
#related_Ids = list(df['Id'])
related_Ids = video_ids
# contentDetails videos request to get video length
vid_request = youtube.videos().list(
part = 'contentDetails',
id = related_Ids,
#maxResults = 5
)
vid_response = vid_request.execute()
# loop through durations
durations = []
for item in vid_response['items']:
durations.append(item['contentDetails']['duration'])
# stat request for likes, dislikes, comment counts, and view counts
stat_request = youtube.videos().list(
part = 'statistics',
id = related_Ids,
#maxResults = 5
)
stat_response = stat_request.execute()
# empty lists to store data
likes = []
dislikes = []
views = []
comments = []
# loop through stats
for stat in stat_response['items']:
try:
likes.append(stat['statistics']['likeCount'])
except KeyError:
likes.append(0)
try:
dislikes.append(stat['statistics']['dislikeCount'])
except KeyError:
dislikes.append(0)
try:
views.append(stat['statistics']['viewCount'])
except KeyError:
views.append(0)
#comments.append(stat['statistics']['commentCount'])
# get channel titles
snip_request = youtube.videos().list(
part = 'snippet',
id = related_Ids,
#maxResults = 5
)
snip_response = snip_request.execute()
# lists for titles
channels = []
titles = []
upload_date = []
ids = []
# loop through snippets
for snip in snip_response['items']:
channels.append(snip['snippet']['channelTitle'])
titles.append(snip['snippet']['title'])
upload_date.append(snip['snippet']['publishedAt'])
ids.append(snip['id'])
# add fields to dataframe
#fields = [durations, likes, dislikes, views, comments]
df = pd.DataFrame()
df['Title'] = titles
df['ID'] = ids
df['Channel'] = channels
df['Length'] = durations
df['Likes'] = likes
df['Dislikes'] = dislikes
df['Views'] = views
#df['Comments'] = comments
df['Uploaded'] = upload_date
# convert to int
#fields = ['Likes', 'Dislikes', 'Views', 'Comments']
fields = ['Likes', 'Dislikes', 'Views']
for field in fields:
df[field] = df[field].apply(lambda x: int(x))
# create LikeRatio
df['LikeRatio'] = df['Likes'] / (df['Likes'] + df['Dislikes'])
# add layer column
df['Layer'] = layers
return df
def gather_data(url):
#out = {}
#selected = [url.split('=')[-1]]
#selected_titles = []
check_dict = {}
idx = url
for i in range(0,5):
# get ids of related video_ids
check = related_ids(idx)
# choose random video
selected = random.randint(1,5)
# set the id for next batch of suggested videos
idx = check[selected]
# drop the selected video from this batch so it doesn't get duplicated
del check[selected]
# add related ids to dictionary
check_dict[i] = check
# move related videos dictionary to dataframe
all_vids = pd.DataFrame()
for i in range(0,5):
temp = pd.DataFrame()
temp['id'] = check_dict[i]
temp['layer'] = i
all_vids = all_vids.append(temp)
# api request for more info on related videos
ids = all_vids['id'].tolist()
layers = all_vids ['layer'].tolist()
df = related_api_requests(ids, layers)
# add title polarity
df['polarity'] = df['Title'].apply(lambda x: abs(TextBlob(x).polarity))
return df
#url = url.split('=')[-1]
#rel_vids = related_ids(url)
#df = related_api_requests(rel_vids)
#df['polarity'] = df['Title'].apply(lambda x: abs(TextBlob(x).polarity))
#sorted_df = df.sort_values(['LikeRatio', 'Views'], ascending = False)
#sorted_df = df.sort_values(['polarity'], ascending = False)
#sorted_df = sorted_df.reset_index()
# drop videos that have already been selected
#if sorted_df['Title'] in selected_titles:
# sorted_df.drop(0, inplace = True)
# sorted_df = sorted_df.reset_index()
#selected_titles.append(sorted_df['Title'][0])
#out[i] = sorted_df
# selection logic could use some work...
#url = sorted_df.reset_index()['ID'][0]
#if url in selected:
# filtered = sorted_df[sorted_df['ID'] != url].reset_index()
# url = filtered['ID'][0]
#selected.append(url)
#selected_df = related_api_requests(selected[:5])
#return out, selected_df
| StarcoderdataPython |
4946176 | from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print(mnist.train.images.shape, mnist.train.labels.shape)
print(mnist.test.images.shape, mnist.test.labels.shape)
print(mnist.validation.images.shape,mnist.validation.labels.shape)
# Load data
x_train = mnist.train.images
y_train = mnist.train.labels
x_test = mnist.test.images
y_test = mnist.test.labels | StarcoderdataPython |
70710 | <gh_stars>1-10
from test import helpers
from passlib.hash import bcrypt
from flaskeddit import db
from flaskeddit.models import AppUser
class TestAuth:
def test_get_register(self, test_client):
"""
Tests GET request to the /register route to assert the registration page is
returned.
"""
response = test_client.get("/register")
assert response is not None
assert response.status_code == 200
assert b"Register" in response.data
def test_post_register(self, test_client):
"""
Test POST request to the /register route to assert the user is successfully
registered.
"""
response = test_client.post(
"/register",
data={
"username": "mockusername",
"password": "<PASSWORD>!",
"confirm_password": "<PASSWORD>!",
},
follow_redirects=True,
)
assert response is not None
assert response.status_code == 200
assert b"Successfully registered." in response.data
def test_get_login(self, test_client):
"""
Test GET request to the /login route to assert the login page is returned.
"""
response = test_client.get("/login")
assert response is not None
assert response.status_code == 200
assert b"Log In" in response.data
def test_post_login(self, test_client):
"""
Test POST request to the /login route to assert the user is successfully logged
in.
"""
password = "<PASSWORD>!"
hashed_password = bcrypt.hash(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>)
db.session.add(app_user)
db.session.commit()
response = test_client.post(
"/login",
data={"username": app_user.username, "password": password},
follow_redirects=True,
)
assert response is not None
assert response.status_code == 200
assert b"Successfully logged in" in response.data
def test_post_logout(self, test_client):
"""
Test POST request to the /logout route to assert the user is successfully
logged out.
"""
password = "<PASSWORD>!"
hashed_password = <PASSWORD>.<PASSWORD>(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>)
db.session.add(app_user)
db.session.commit()
helpers.login(test_client, app_user.username, password)
response = test_client.post("/logout", follow_redirects=True)
assert response is not None
assert response.status_code == 200
assert b"Successfully logged out" in response.data
| StarcoderdataPython |
3590693 | <filename>camera_state_predict/data_collect_haar.py
#!/usr/bin/env python3
# coding=utf-8
import cv2
import numpy as np
import csv
import time
from multiprocessing import Process
# 在中断后重新识别人脸会导致突然的速度变化,因为是和之前的有图速度进行运算
# 两边判断现在基本不起作用
class DataCollect(object):
def __init__(self, cam_id, video_name):
self.cam_id = cam_id
self.video_name = video_name
self.row = []
def _judge_move(self, cur_frame_inner, pre_frame_inner):
# gray_img = cv2.cvtColor(cur_frame_inner, cv2.COLOR_BGR2GRAY)
gray_img = cur_frame_inner
gray_img = cv2.resize(gray_img, (500, 500)) # 此条不知是否影响判断
gray_img = cv2.GaussianBlur(gray_img, (21, 21), 0)
if pre_frame_inner is None:
pre_frame_inner = gray_img
return pre_frame_inner
else:
img_delta = cv2.absdiff(pre_frame_inner, gray_img)
thresh = cv2.threshold(img_delta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
# image, contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if cv2.contourArea(c) < 500: # 设置敏感度
continue
else:
# print(cv2.contourArea(c))
# print("画面中有运动物体")
self.row.pop()
self.row.append('1')
break
pre_frame_inner = gray_img
return pre_frame_inner
def _entropy(self, band): # 计算画面熵
hist, _ = np.histogram(band, bins=range(0, 256))
hist = hist[hist > 0]
return -np.log2(hist / hist.sum()).sum()
def _process_rgb_delta(self, cur_frame_inner, entropy_last_inner): # 计算熵抖动
# b, g, r = cv2.split(cur_frame_inner)
# rgb_average = (self._entropy(r) + self._entropy(g) + self._entropy(b)) / 3
gray_average = self._entropy(cur_frame_inner)
if entropy_last_inner == 0:
self.row.append(0)
return gray_average
jitter = abs(gray_average - entropy_last_inner)
jitter = int(jitter)
# print("画面抖动数值:", jitter)
self.row.append(jitter)
return gray_average
def _cal_speed_location(self, cur_frame_inner, point_x_inner, point_y_inner,
smooth_times, speed_x, speed_y):
# cur_frame_inner = cv2.cvtColor(cur_frame_inner, cv2.COLOR_BGR2GRAY)
bodycascade = cv2.CascadeClassifier("haarcascade_upperbody.xml")
bodys = bodycascade.detectMultiScale(
cur_frame_inner,
scaleFactor=1.08, # 越小越慢,越可能检测到
minNeighbors=2, # 越小越慢,越可能检测到
minSize=(95, 80),
maxSize=(150, 180),
# minSize=(30, 30)
flags=cv2.CASCADE_SCALE_IMAGE
)
if len(bodys) == 0: # 没有人脸,速度和摄像头都为0
if point_x_inner == 0 and point_y_inner == 0: # 未出现过
self.row.append(0) # 一个自身判断无运动
self.row.append(0) # 两个速度
self.row.append(0)
self.row.append(0) # 左右
self.row.append(0)
# 加入平滑
else: # 之前帧已经出现过点,进行平滑
if smooth_times >= 0:
self.row.append(1) # 自身判断有运动
self.row.append(speed_x)
self.row.append(speed_y)
self.row.append(0) # 左右
self.row.append(0)
smooth_times -= 1
point_x_inner += speed_x
point_y_inner += speed_y
x, y, w, h = point_x_inner+speed_x, point_y_inner+speed_y, 150, 120
p1 = (x, y)
p2 = (x + w, y + h)
cv2.rectangle(cur_frame_inner, p1, p2, (0, 255, 0), 2)
else: # 已经平滑过5帧不再平滑
self.row.append(0) # 一个自身判断无运动
self.row.append(0) # 两个速度
self.row.append(0)
self.row.append(0) # 左右
self.row.append(0)
else:
smooth_times = -1 # 有识别,将平滑置为5, 若置为-1,不会使用平滑
self.row.append(1) # 自身判断有运动
# 只输入第一张人脸数据
print('Now face:', bodys)
x, y, w, h = bodys[0][0], bodys[0][1], bodys[0][2], bodys[0][3]
p1 = (x, y)
p2 = (x + w, y + h)
cv2.rectangle(cur_frame_inner, p1, p2, (0, 255, 0), 2)
if point_x_inner == 0 and point_y_inner == 0: # 刚标记后第一帧
# 两个速度为0
self.row.append(0)
self.row.append(0)
else:
v_updown = point_y_inner - p1[1]
v_leftright = p1[0] - point_x_inner
# print("横轴速度为:", v_leftright)
# print("纵轴速度为:", v_updown)
self.row.append(v_leftright)
self.row.append(v_updown)
speed_x = v_leftright
speed_y = v_updown
point_x_inner = p1[0]
point_y_inner = p1[1]
if p1[0] <= 50:
if p1[0] < 0:
self.row.append(50)
else:
self.row.append(50 - p1[0])
print("左边该开了", 50 - p1[0])
else:
self.row.append(0)
if p2[0] >= 590:
if p2[0] > 640:
self.row.append(50)
else:
self.row.append(p2[0] - 590)
print("右边该开了", p2[0] - 590)
else:
self.row.append(0)
return point_x_inner, point_y_inner, smooth_times, speed_x, speed_y
def data_collect(self):
# 全局变量
timecount_start = time.time()
time_stamp = 1 # 时间标记
pre_frame = None # 获取参数一:前一帧图像(灰度),判断是否有运动物体
entropy_last = 0 # 获取参数二:前一帧抖动数值
point_x, point_y = 0, 0 # 获取参数三:初始化运动点
smooth = 0 # 设置平滑的帧数
speed_x_last, speed_y_last = 0, 0 # 前一阵速度,用于平滑
camera = cv2.VideoCapture(self.video_name) # self.video_name
self.row = []
# file_name = str(datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S") + '_' + str(self.cam_id))
file_name = self.video_name[-25:-4]
with open('data/data_' + file_name + '.csv', 'w', newline='') as file: # newline不多空行
f = csv.writer(file)
# 循环获取参数
while True:
res, cur_frame = camera.read()
if res is not True:
break
cur_frame = cv2.cvtColor(cur_frame, cv2.COLOR_BGR2GRAY) # 转变至Gray
if cv2.waitKey(1) & 0xFF == 27:
break
# 参数0:时间(暂时加入时间帧)
# time_now = str(datetime.datetime.now().strftime("%H%M%S%f"))
# row.append(time_now[:-4]) # 毫秒只取两位
self.row.append(time_stamp)
time_stamp += 1
print('------', time_stamp, '-------')
# 获取参数一:开/关
self.row.append('0') # 判断有无运动,遇到有物体运动再改为1
pre_frame = self._judge_move(cur_frame, pre_frame)
# 获取参数二:图像抖动
entropy_last = self._process_rgb_delta(cur_frame, entropy_last)
# 获取参数三:速度和对应摄像头开关
point_x, point_y, smooth, speed_x_last, speed_y_last = self._cal_speed_location(cur_frame,
point_x,
point_y,
smooth,
speed_x_last,
speed_y_last)
# 写入一行
print(self.row)
f.writerow(self.row)
self.row = []
cv2.imshow(str(self.cam_id), cur_frame)
# 计算总用时,释放内存
timecount_end = time.time()
print(self.cam_id, " time:", timecount_end - timecount_start)
camera.release()
def start_collect(cam_id, video_name):
DataCollect(cam_id, video_name).data_collect()
if __name__ == "__main__":
global_start = time.time()
list_video_name = ["video/2cam_scene1/2017-08-07 18-00-50_0.avi"]
for i, name in enumerate(list_video_name):
p = Process(target=start_collect, args=(i, name))
p.start()
cv2.destroyAllWindows()
global_end = time.time()
print("global time:", global_end - global_start)
| StarcoderdataPython |
1695383 | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import aiohttp
import pytest
from testing_support.fixtures import validate_transaction_metrics
from yarl import URL
from newrelic.api.background_task import background_task
from newrelic.api.function_trace import function_trace
version_info = tuple(int(_) for _ in aiohttp.__version__.split(".")[:2])
skipif_aiohttp3 = pytest.mark.skipif(
version_info >= (3, 0), reason="This version of aiohttp does not support yield from syntax"
)
@asyncio.coroutine
def fetch(method, url):
with aiohttp.ClientSession() as session:
_method = getattr(session, method)
response = yield from asyncio.wait_for(_method(url), timeout=None)
response.raise_for_status()
yield from response.text()
@background_task(name="fetch_multiple")
@asyncio.coroutine
def fetch_multiple(method, url):
coros = [fetch(method, url) for _ in range(2)]
return asyncio.gather(*coros, return_exceptions=True)
if version_info < (2, 0):
_expected_error_class = aiohttp.errors.HttpProcessingError
else:
_expected_error_class = aiohttp.client_exceptions.ClientResponseError
def task(loop, method, exc_expected, url):
future = asyncio.ensure_future(fetch_multiple(method, url))
text_list = loop.run_until_complete(future)
if exc_expected:
assert isinstance(text_list[0], _expected_error_class)
assert isinstance(text_list[1], _expected_error_class)
else:
assert text_list[0] == text_list[1]
test_matrix = (
("get", False),
("post", True),
("options", True),
("head", True),
("put", True),
("patch", True),
("delete", True),
)
@skipif_aiohttp3
@pytest.mark.parametrize("method,exc_expected", test_matrix)
def test_client_yield_from(event_loop, local_server_info, method, exc_expected):
@validate_transaction_metrics(
"fetch_multiple",
background_task=True,
scoped_metrics=[
(local_server_info.base_metric + method.upper(), 2),
],
rollup_metrics=[
(local_server_info.base_metric + method.upper(), 2),
],
)
def task_test():
task(event_loop, method, exc_expected, local_server_info.url)
task_test()
@skipif_aiohttp3
def test_client_yarl_yield_from(event_loop, local_server_info):
method = "get"
@validate_transaction_metrics(
"fetch_multiple",
background_task=True,
scoped_metrics=[
(local_server_info.base_metric + method.upper(), 2),
],
rollup_metrics=[
(local_server_info.base_metric + method.upper(), 2),
],
)
def task_test():
task(event_loop, method, False, URL(local_server_info.url))
task_test()
@skipif_aiohttp3
@pytest.mark.parametrize("method,exc_expected", test_matrix)
def test_client_no_txn_yield_from(event_loop, local_server_info, method, exc_expected):
def task_test():
task(event_loop, method, exc_expected, local_server_info.url)
task_test()
@skipif_aiohttp3
@pytest.mark.parametrize("method,exc_expected", test_matrix)
def test_client_throw_yield_from(event_loop, local_server_info, method, exc_expected):
class ThrowerException(ValueError):
pass
@background_task(name="test_client_throw_yield_from")
@asyncio.coroutine
def self_driving_thrower():
with aiohttp.ClientSession() as session:
coro = session._request(method.upper(), local_server_info.url)
# activate the coroutine
coro.send(None)
# inject error
coro.throw(ThrowerException())
@validate_transaction_metrics(
"test_client_throw_yield_from",
background_task=True,
scoped_metrics=[
(local_server_info.base_metric + method.upper(), 1),
],
rollup_metrics=[
(local_server_info.base_metric + method.upper(), 1),
],
)
def task_test():
with pytest.raises(ThrowerException):
event_loop.run_until_complete(self_driving_thrower())
task_test()
@skipif_aiohttp3
@pytest.mark.parametrize("method,exc_expected", test_matrix)
def test_client_close_yield_from(event_loop, local_server_info, method, exc_expected):
@background_task(name="test_client_close_yield_from")
@asyncio.coroutine
def self_driving_closer():
with aiohttp.ClientSession() as session:
coro = session._request(method.upper(), local_server_info.url)
# activate the coroutine
coro.send(None)
# force close
coro.close()
@validate_transaction_metrics(
"test_client_close_yield_from",
background_task=True,
scoped_metrics=[
(local_server_info.base_metric + method.upper(), 1),
],
rollup_metrics=[
(local_server_info.base_metric + method.upper(), 1),
],
)
def task_test():
event_loop.run_until_complete(self_driving_closer())
task_test()
test_ws_matrix = (
# the 127.0.0.1 server does not accept websocket requests, hence an
# exception is expected but a metric will still be created
("ws_connect", True),
)
@skipif_aiohttp3
@pytest.mark.parametrize("method,exc_expected", test_ws_matrix)
def test_ws_connect_yield_from(event_loop, local_server_info, method, exc_expected):
@validate_transaction_metrics(
"fetch_multiple",
background_task=True,
scoped_metrics=[
(local_server_info.base_metric + "GET", 2),
],
rollup_metrics=[
(local_server_info.base_metric + "GET", 2),
],
)
def task_test():
task(event_loop, method, exc_expected, local_server_info.url)
task_test()
@skipif_aiohttp3
@pytest.mark.parametrize("method,exc_expected", test_matrix)
def test_create_task_yield_from(event_loop, local_server_info, method, exc_expected):
# `loop.create_task` returns a Task object which uses the coroutine's
# `send` method, not `__next__`
@asyncio.coroutine
def fetch_task(loop):
with aiohttp.ClientSession() as session:
coro = getattr(session, method)
resp = yield from loop.create_task(coro(local_server_info.url))
resp.raise_for_status()
yield from resp.text()
@background_task(name="test_create_task_yield_from")
@asyncio.coroutine
def fetch_multiple(loop):
coros = [fetch_task(loop) for _ in range(2)]
return asyncio.gather(*coros, return_exceptions=True)
@validate_transaction_metrics(
"test_create_task_yield_from",
background_task=True,
scoped_metrics=[
(local_server_info.base_metric + method.upper(), 2),
],
rollup_metrics=[
(local_server_info.base_metric + method.upper(), 2),
],
)
def task_test():
result = event_loop.run_until_complete(fetch_multiple(event_loop))
if exc_expected:
assert isinstance(result[0], _expected_error_class)
assert isinstance(result[1], _expected_error_class)
else:
assert result[0] == result[1]
task_test()
@skipif_aiohttp3
@pytest.mark.parametrize("method,exc_expected", test_matrix)
def test_terminal_node_yield_from(event_loop, local_server_info, method, exc_expected):
"""
This test injects a terminal node into a simple background task workflow.
It was added to validate a bug where our coro.send() wrapper would fail
when transaction's current node was terminal.
"""
def task_test():
@function_trace(terminal=True)
def execute_task():
task(event_loop, method, exc_expected, local_server_info.url)
execute_task()
task_test()
| StarcoderdataPython |
5089546 | <reponame>limetoad/pyqualtrics<filename>pyqualtrics/__init__.py
# -*- coding: utf-8 -*-
#
# This file is part of the pyqualtrics package.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/Baguage/pyqualtrics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
from StringIO import StringIO
from collections import OrderedDict
import collections
import requests
import os
__version__ = "0.5.0"
class Qualtrics(object):
"""
This is representation of Qualtrics REST API
"""
url = "https://survey.qualtrics.com/WRAPI/ControlPanel/api.php"
def __init__(self, user=None, token=None, api_version="2.5"):
"""
:param user: The user name. If omitted, value of environment variable QUALTRICS_USER will be used.
:param token: API token for the user. If omitted, value of environment variable QUALTRICS_TOKEN will be used.
:param api_version: API version to use (this library has been tested with version 2.5).
"""
if user is None:
user = os.environ.get("QUALTRICS_USER", None)
if user is None:
raise ValueError("user parameter should be passed to __init__ or enviroment variable QUALTRICS_USER should be set") # noqa
self.user = user
if token is None:
token = os.environ.get("QUALTRICS_TOKEN", None)
if token is None:
raise ValueError("token parameter should be passed to __init__ or enviroment variable QUALTRICS_TOKEN should be set") # noqa
self.token = token
self.default_api_version = api_version
# Version must be a string, not an integer or float
assert self.default_api_version, (str, unicode)
self.last_error_message = None
self.last_url = None
self.json_response = None
self.response = None # For debugging purpose
def __str__(self):
return self.user
def __repr__(self):
# Used code snippet from stackoverflow
# http://stackoverflow.com/questions/1436703/difference-between-str-and-repr-in-python
# Note this will print Qualtrics token - may be dangerous for logging
return "%s(%r)" % (self.__class__, self.__dict__)
def request(self, Request, post_data=None, post_files=None, **kwargs):
""" Send GET or POST request to Qualtrics API using v2.x format
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#overview_2.5
This function also sets self.last_error_message and self.json_response
:param Request: The name of the API call to be made ("createPanel", "deletePanel" etc).
:param post_data: Content of POST request. If None, GET request will be sent
:param post_files: Files to post (for importSurvey API call)
:param kwargs: Additional parameters for this API Call (LibraryID="abd", PanelID="123")
:return: None if request failed
"""
Version = kwargs.pop("Version", self.default_api_version)
# Version must be a string, not an integer or float
assert Version, (str, unicode)
# Special case for handling embedded data
ed = kwargs.pop("ED", None)
# http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression
params = dict({"User": self.user,
"Token": self.token,
"Format": "JSON",
"Version": Version,
"Request": Request,
}.items() + kwargs.items())
# Format emdedded data properly,
# for example ED[SubjectID]=CLE10235&ED[Zip]=74534
if ed is not None:
for key in ed:
params["ED[%s]" % key] = ed[key]
if post_data:
r = requests.post(self.url,
data=post_data,
params=params)
elif post_files:
r = requests.post(self.url,
files=post_files,
params=params)
else:
r = requests.get(self.url,
params=params)
self.last_url = r.url
self.response = r.text
try:
json_response = json.loads(r.text, object_pairs_hook=collections.OrderedDict)
except ValueError:
# If the data being deserialized is not a valid JSON document, a ValueError will be raised.
self.json_response = None
if "Format" not in kwargs:
self.last_error_message = "Unexpected response from Qualtrics: not a JSON document"
raise RuntimeError(self.last_error_message)
else:
# Special case - getSurvey. That request has a custom response format (xml).
# It does not follow the default response format
return r.text
self.json_response = json_response
# Sanity check.
if (Request == "getLegacyResponseData" or Request == "getPanel") and "Meta" not in json_response:
# Special cases - getLegacyResponseData and getPanel
# Success
return json_response
if "Meta" not in json_response:
# Should never happen
self.last_error_message = "Unexpected response from Qualtrics: no Meta key in JSON response"
raise RuntimeError(self.last_error_message)
if "Status" not in json_response["Meta"]:
# Should never happen
self.last_error_message = "Unexpected response from Qualtrics: no Status key in JSON response"
raise RuntimeError(self.last_error_message)
if json_response["Meta"]["Status"] == "Success":
self.last_error_message = None
return json_response
# If error happens, it returns JSON object too
# Error message is in json_response["Meta"]["ErrorMessage"]
self.last_error_message = json_response["Meta"]["ErrorMessage"]
return None
def createPanel(self, LibraryID, Name, **kwargs):
""" Creates a new Panel in the Qualtrics System and returns the id of the new panel
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#createPanel_2.5
:param LibraryID: The library id you want to create the panel in
:param Name: The name of the new panel
:return: PanelID of new panel, None if error occurs
"""
if self.request("createPanel", LibraryID=LibraryID, Name=Name, **kwargs) is None:
return None
return self.json_response["Result"]["PanelID"]
def deletePanel(self, LibraryID, PanelID, **kwargs):
""" Deletes the panel.
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#deletePanel_2.5
:param LibraryID: The library id the panel is in.
:param PanelID: The panel id that will be deleted.
:return: True if deletion was successful, False otherwise
"""
if self.request("deletePanel", LibraryID=LibraryID, PanelID=PanelID, **kwargs) is None:
return False
return True
def getPanelMemberCount(self, LibraryID, PanelID, **kwargs):
""" Gets the number of panel members
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#getPanelMemberCount_2.5
:param LibraryID: The library ID where this panel belongs
:param PanelID: The panel ID
:param kwargs: Additional parameters (used by unittest)
:return: The Number of members
"""
if self.request("getPanelMemberCount", LibraryID=LibraryID, PanelID=PanelID, **kwargs) is None:
return None
return int(self.json_response["Result"]["Count"])
def addRecipient(self, LibraryID, PanelID, FirstName, LastName, Email, ExternalDataRef, Language, ED):
""" Add a new recipient to a panel
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#addRecipient_2.5
:param LibraryID: The library the recipient belongs to
:param PanelID: The panel to add the recipient
:param FirstName: The first name
:param LastName: The last name
:param Email: The email address
:param ExternalDataRef: The external data reference
:param Language: The language code
:param ED: The embedded data (dictionary)
:return: The Recipient ID or None
"""
if not self.request("addRecipient",
LibraryID=LibraryID,
PanelID=PanelID,
FirstName=FirstName,
LastName=LastName,
Email=Email,
ExternalDataRef=ExternalDataRef,
Language=Language,
ED=ED):
return None
return self.json_response["Result"]["RecipientID"]
def getRecipient(self, LibraryID, RecipientID):
"""Get a representation of the recipient and their history
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#getRecipient_2.5
:param LibraryID: The library the recipient belongs to
:param RecipientID: The recipient id of the person's response history you want to retrieve
"""
if not self.request("getRecipient", LibraryID=LibraryID, RecipientID=RecipientID):
return None
return self.json_response["Result"]["Recipient"]
def removeRecipient(self, LibraryID, PanelID, RecipientID, **kwargs):
""" Removes the specified panel member recipient from the specified panel.
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#removeRecipient_2.5
:param LibraryID: The library the recipient belongs to
:param PanelID: The panel to remove the recipient from
:param RecipientID: The recipient id of the person that will be updated
:return: True if successful, False otherwise
"""
if not self.request("removeRecipient", LibraryID=LibraryID, PanelID=PanelID, RecipientID=RecipientID, **kwargs):
return False
return True
def sendSurveyToIndividual(self, **kwargs):
""" Sends a survey through the Qualtrics mailer to the individual specified.
Note that request will be put to queue and emails are not sent immediately (although they usually
delivered in a few seconds after this function is complete)
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#sendSurveyToIndividual_2.5
Example response (success):
{u'Meta': {u'Status': u'Success', u'Debug': u''},
u'Result': {u'DistributionQueueID': u'EMD_e3F0KAIVfzIYw0R', u'EmailDistributionID': u'EMD_e3F0KAIVfzIYw0R', u'Success': True}}
:param kwargs:
:return: EmailDistributionID
"""
if not self.request("sendSurveyToIndividual", **kwargs):
return None
return self.json_response["Result"]["EmailDistributionID"]
def createDistribution(self, SurveyID, PanelID, Description, PanelLibraryID, **kwargs):
""" Creates a distribution for survey and a panel. No emails will be sent. Distribution Links can be generated
later to take the survey.
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#createDistribution_2.5
:param SurveyID: The parent distribution you are reminding
:param PanelID: The panel you want to send to
:param Description: A description for this distribution
:param PanelLibraryID: The library id for the panel
:return: The distribution id
"""
if not self.request("createDistribution",
SurveyID=SurveyID,
PanelID=PanelID,
Description=Description,
PanelLibraryID=PanelLibraryID,
**kwargs):
return None
return self.json_response["Result"]["EmailDistributionID"]
def getDistributions(self, **kwargs):
""" Returns the data for the given distribution.
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#getDistributions_2.5
Requests for distribution surveys to users are queued and not delivered immediately. Thus
functions like sendSurveyToIndividual will successfully completed even though no email were sent yet.
DistributionID returned by those functions can be used to check status of email delivery.
:param kwargs:
:return:
"""
if not self.request("getDistributions", **kwargs):
return None
return self.json_response
def getSurveys(self, **kwargs):
"""
This request returns a list of all the surveys for the user.
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#getSurveys_2.5
:param kwargs: Additional parameters to API call
:return: ordered dictionary of surveys. {survey_id:metadata}
:rtype dict:
"""
response = self.request("getSurveys", **kwargs)
# print response
surveys = None
if response:
surveys = OrderedDict()
for survey in response["Result"]["Surveys"]:
surveys[survey['SurveyID']] = survey
return surveys
def getSurvey(self, SurveyID):
# Good luck dealing with XML
# Response does not include answers though
return self.request("getSurvey", SurveyID=SurveyID, Format=None)
def importSurvey(self, ImportFormat, Name, Activate=None, URL=None, FileContents=None, OwnerID=None, **kwargs):
"""
Import Survey
Note if contents of survey file is not correct empty survey will be created and error message will be returned
If it is a problem, it is up to application to handle this situation.
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#importSurvey_2.5
:param ImportFormat:
:param Name:
:param Activate:
:param URL:
:param FileContents:
:param OwnerID:
:return:
"""
result = self.request(
"importSurvey",
ImportFormat=ImportFormat,
Name=Name,
Activate=Activate,
URL=URL,
OwnerID=OwnerID,
post_files={"FileContents": FileContents} if FileContents else None,
**kwargs
)
if result is not None:
return result["Result"]["SurveyID"]
def deleteSurvey(self, SurveyID, **kwargs):
"""
Delete the specified survey
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#deleteSurvey_2.5
:param SurveyID: ID of the survey
:param kwargs: Additional parameters for API
:return:
"""
if self.request("deleteSurvey", SurveyID=SurveyID) is not None:
return True
return False
def activateSurvey(self, SurveyID, **kwargs):
""" Activates the specified Survey
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#activateSurvey_2.5
:param SurveyID: The Survey ID to activate
:return:
"""
if self.request("activateSurvey", SurveyID=SurveyID, **kwargs):
return True
return False
def deactivateSurvey(self, SurveyID, **kwargs):
""" Deactivates the specified Survey
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#deactivateSurvey_2.5
:param SurveyID: The Survey ID to deactivate
:return:
"""
if self.request("deactivateSurvey", SurveyID=SurveyID, **kwargs):
return True
return False
def getLegacyResponseData(self, SurveyID, **kwargs):
""" Returns all of the response data for a survey in the original (legacy) data format.
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#getLegacyResponseData_2.5
:param SurveyID: The survey you will be getting the responses for.
:param kwargs: Additional parameters allowed by getLegacyResponseData API call
:return:
"""
return self.request("getLegacyResponseData", SurveyID=SurveyID, **kwargs)
def getResponse(self, SurveyID, ResponseID, **kwargs):
""" Get data for a single response ResponseID in SurveyID. SurveyID is required by API
Refer to https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#getLegacyResponseData_2.5 for additional
information about kwargs
:param SurveyID: The survey you will be getting the responses for.
:param ResponseID: The response id of an individual response.
:param kwargs: Additional arguments (Labels, Questions, ExportQuestionID, ExportTags, LocalTime, etc)
:return: response as Python dictionary.
Example:
{u'Status': u'0', u'StartDate': u'2015-10-23 10:59:12', u'Q1': 2, u'Q2': 1, u'EndDate': u'2015-10-23 10:59:23',
u'Name': u'<NAME>', u'IPAddress': u'192.168.127.12', u'Q3': 2, u'ExternalDataReference': u'',
u'Finished': u'1', u'EmailAddress': u'<EMAIL>', u'ResponseSet': u'Default Response Set'}
"""
response = self.getLegacyResponseData(SurveyID=SurveyID, ResponseID=ResponseID, **kwargs)
if not response:
return None
if ResponseID not in response:
# Should never happen
self.last_error_message = "Qualtrics error: ResponseID %s not in response" % ResponseID
return None
return response[ResponseID]
def importResponses(self, SurveyID,
ResponseSetID=None,
FileURL=None,
Delimiter=None,
Enclosure=None,
IgnoreValidation=None,
DecimalFormat=None,
FileContents=None,
**kwargs):
""" This request imports responses from csv file or URL to the specified survey.
Refer to https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#importResponses_2.5 for additional info
:param SurveyID: The ID of the Survey the responses will be connected to.
:param ResponseSetID: The ID of the response set the responses will be placed in.
:param FileURL: The location of the CSV file containing the responses to be imported, we only support CSV files from ftp, ftps, http, and https. If you dont specify, it will use php://input from the request body.
:param Delimiter: Separate values by this character. Default is , (comma)
:param Enclosure: Allows a value to contain the delimiter. Default is " (quote)
:param IgnoreValidation: If set to true (1), we will not validate the responses as we import.
:param DecimalFormat: Decimals delimiter. Possible values are ,(comma) and .(period)
:param FileContents: The contents of the file posted using multipart/form-data
:return:
"""
if not self.request(
"importResponses",
SurveyID=SurveyID,
ResponseSetID=ResponseSetID,
FileURL=FileURL,
Delimiter=Delimiter,
Enclosure=Enclosure,
IgnoreValidation=IgnoreValidation,
DecimalFormat=DecimalFormat,
post_files={"FileContents": FileContents} if FileContents else None,
**kwargs):
return False
return True
def importResponsesAsDict(self, SurveyID, responses,
ResponseSetID=None,
Delimiter=None,
Enclosure=None,
IgnoreValidation=None,
DecimalFormat=None,
**kwargs):
""" Import responses from a python dictionary
Refer to https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#importResponses_2.5 for additional info
:param SurveyID:
:param responses: list of responses. Each response is represented as a dictionary
[
{"ResponseID": "R_1234", ...},
{"ResponseID": "R_1235", "Finished": "1", ...},
]
:param ResponseSetID: The ID of the response set the responses will be placed in.
:param Delimiter: Separate values by this character. Default is , (comma)
:param Enclosure: Allows a value to contain the delimiter. Default is " (quote)
:param IgnoreValidation: If set to true (1), we will not validate the responses as we import.
:param DecimalFormat: Decimals delimiter. Possible values are ,(comma) and .(period)
:param kwargs: Additional parameters
:return:
"""
assert(isinstance(responses, list))
if len(responses) < 1:
return True
headers = responses[0].keys()
buffer = str()
fp = StringIO(buffer)
dictwriter = csv.DictWriter(fp, fieldnames=headers)
dictwriter.writeheader()
dictwriter.writeheader()
for response in responses:
dictwriter.writerow(response)
contents = fp.getvalue()
return self.importResponses(
SurveyID=SurveyID,
ResponseSetID=ResponseSetID,
Delimiter=Delimiter,
Enclosure=Enclosure,
IgnoreValidation=IgnoreValidation,
DecimalFormat=DecimalFormat,
FileContents=contents,
**kwargs)
def updateResponseEmbeddedData(self, SurveyID, ResponseID, ED, **kwargs):
"""
Updates the embedded data for a given response.
:param SurveyID: The survey ID of the response to update.
:param ResponseID: The response ID for the response to update.
:param ED: The new embedded data, dictionary
:param kwargs: Additional arguments (Version, Format etc)
:return: True or False
"""
if not self.request(
"updateResponseEmbeddedData",
SurveyID=SurveyID,
ResponseID=ResponseID,
ED=ED,
**kwargs):
return False
return True
def getPanel(self, LibraryID, PanelID, EmbeddedData=None, LastRecipientID=None, NumberOfRecords=None,
ExportLanguage=None, Unsubscribed=None, Subscribed=None, **kwargs):
""" Gets all the panel members for the given panel
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#getPanel_2.5
:param LibraryID: The library id for this panel
:param PanelID: The panel id you want to export
:param EmbeddedData: A comma separated list of the embedded data keys you want to export. This is only required for a CSV export.
:param LastRecipientID: The last Recipient ID from a previous API call. Start returning everyone AFTER this Recipient
:param NumberOfRecords: The number of panel members to return. If not defined will return all of them
:param ExportLanguage: If 1 the language of each panel member will be exported.
:param Unsubscribed: If 1 only the unsubscribed panel members will be returned
:param Subscribed: If 1 then only subscribed panel members will be returned
:return: list of panel member as dictionaries
"""
if not self.request("getPanel",
LibraryID=LibraryID,
PanelID=PanelID,
EmbeddedData=EmbeddedData,
LastRecipientID=LastRecipientID,
NumberOfRecords=NumberOfRecords,
ExportLanguage=ExportLanguage,
Unsubscribed=Unsubscribed,
Subscribed=Subscribed,
**kwargs):
return None
return self.json_response
def importPanel(self, LibraryID, Name, CSV, **kwargs):
""" Imports a csv file as a new panel (optionally it can append to a previously made panel) into the database
and returns the panel id. The csv file can be posted (there is an approximate 8 megabytes limit) or a url can
be given to retrieve the file from a remote server.
The csv file must be comma separated using " for encapsulation.
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#importPanel_2.5
:param LibraryID:
:param Name:
:param CSV: contents of CSV file to be imported
:return:
"""
if kwargs.get("ColumnHeaders", None) == "1" or kwargs.get("ColumnHeaders", None) == 1:
fp = StringIO(CSV)
headers = csv.reader(fp).next()
if "Email" in headers and "Email" not in kwargs:
kwargs["Email"] = headers.index("Email") + 1
if "FirstName" in headers and "FirstName" not in kwargs:
kwargs["FirstName"] = headers.index("FirstName") + 1
if "LastName" in headers and "LastName" not in kwargs:
kwargs["LastName"] = headers.index("LastName") + 1
if "ExternalRef" in headers and "ExternalRef" not in kwargs:
kwargs["ExternalRef"] = headers.index("ExternalRef") + 1
fp.close()
result = self.request("importPanel", post_data=CSV, LibraryID=LibraryID, Name=Name, **kwargs)
if result is not None:
return result["Result"]["PanelID"]
return None
def importJsonPanel(self, LibraryID, Name, panel, headers=None, **kwargs):
""" Import JSON document as a new panel. Example document:
[
{"Email": "<EMAIL>", "FirstName": "PyQualtrics", "LastName": "Library"},
{"Email": "<EMAIL>", "FirstName": "PyQualtrics2", "LastName": "Library2"}
]
:param LibraryID:
:param Name:
:param panel:
:param kwargs:
:param headers:
:return:
"""
if headers is None:
headers = ["Email", "FirstName", "LastName", "ExternalRef"]
buffer = str()
fp = StringIO(buffer)
dictwriter = csv.DictWriter(fp, fieldnames=headers)
dictwriter.writeheader()
for subject in panel:
dictwriter.writerow(subject)
contents = fp.getvalue()
return self.importPanel(LibraryID=LibraryID,
Name=Name,
CSV=contents,
ColumnHeaders="1",
**kwargs
)
def getSingleResponseHTML(self, SurveyID, ResponseID, **kwargs):
""" Return response in html format (generated by Qualtrics)
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#getSingleResponseHTML_2.5
:param SurveyID: The response's associated survey ID
:param ResponseID: The response to get HTML for
:param kwargs: Addition parameters
:return: html response as a string
"""
if not self.request("getSingleResponseHTML",
SurveyID=SurveyID,
ResponseID=ResponseID,
**kwargs):
return None
return self.json_response["Result"]
def getAllSubscriptions(self):
""" Allows a 3rd party to check the status of all their subscriptions.
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#getAllSubscriptions_2.5
!!! NOT YET TESTED !!!
:return:
"""
return self.request(
"getAllSubscriptions",
)
def subscribe(self, Name, PublicationURL, Topics, Encrypt=None, SharedKey=None, BrandID=None, **kwargs):
""" Allows a 3rd party client to subscribe to Qualtrics events.
Topic subscription can be a single event * (Ex: 'threesixty.created') or a wildcard list of events
using the * (star) notation to denote 'everything'
(Ex:’threesixty.*’ will subscribe to all 360 events from Qualtrics.)
!!! NOT YET TESTED !!!
https://survey.qualtrics.com/WRAPI/ControlPanel/docs.php#subscribe_2.5
:return:
"""
result = self.request(
"subscribe",
Name=Name,
PublicationURL=PublicationURL,
Topics=Topics,
Encrypt=Encrypt,
SharedKey=SharedKey,
BrandID=BrandID,
)
return result
def generate_unique_survey_link(self, SurveyID, LibraryID, PanelID, DistributionID, FirstName, LastName, Email, ExternalDataRef="", Language="English", EmbeddedData=None):
""" Generate unique survey link for a person. Based on a response from Qualtrics Support
:param SurveyID:
:param LibraryID:
:param PanelID:
:param DistributionID:
:param FirstName:
:param LastName:
:param Email:
:param ExternalDataRef: (optional, defaults to "")
:param Language: (optional, defaults to "English")
:param EmbeddedData: (optional)
:return:
"""
assert isinstance(EmbeddedData, (dict, type(None)))
assert isinstance(SurveyID, (str, unicode))
assert isinstance(DistributionID, (str, unicode))
if EmbeddedData is None:
EmbeddedData = {}
recipient_id = self.addRecipient(LibraryID, PanelID, FirstName=FirstName, LastName=LastName, Email=Email, ExternalDataRef=ExternalDataRef, Language=Language, ED=EmbeddedData)
if recipient_id is None:
# last_error_message is set by addRecipient function
return None
if "_" not in SurveyID:
self.last_error_message = "Invalid SurveyID format (must be SV_xxxxxxxxxx)"
return None
if "_" not in DistributionID:
self.last_error_message = "Invalid DistributionID format (must be EMD_xxxxxxxxxx)"
return None
link = DistributionID.split("_")[1] + "_" + SurveyID.split("_")[1] + "_" + recipient_id
link = "http://new.qualtrics.com/SE?Q_DL=%s" % link
return link
| StarcoderdataPython |
4934757 | <reponame>aslemen/Keyaki.vim
import sys
is_syntag = False
tags = set()
for line in sys.stdin:
items = line.split()
if len(items) > 1:
if items[1] == "__syntags__":
is_syntag = True
elif items[1] == "__tagtab__":
tags.add(
(items[0], is_syntag)
)
else:
# do nothing
pass
# === END IF ===
else:
# do nothing
pass
# === END IF ===
# === END FOR line in sys.stdin ===
tags.add(("ID", False))
tags_sorted = sorted(tags, key = lambda t: str.lower(t[0]))
sys.stdout.write("\t\tlet b:penn_tags = [\n")
for tag, is_syn in tags_sorted:
sys.stdout.write(
"\t\t\t\t\t\\['{tag}', {is_syn}],\n".format(
tag = tag,
is_syn = "0" if is_syn else "1"
)
)
sys.stdout.write("\t\t\\]\n")
| StarcoderdataPython |
5015579 | import os
import pathlib
DIR_ROOT = pathlib.Path(__file__).parents[1] # If source...
if os.path.exists(os.path.abspath('./shard_projector.exe')): # If binary bundle...
DIR_ROOT = os.path.dirname(pathlib.Path(__file__))
DIR_INI = os.path.join(DIR_ROOT, "ini")
DIR_TEMP = os.path.join(DIR_ROOT, "temp")
DIR_EXT = os.path.join(DIR_ROOT, "ext")
DIR_IMG = os.path.join(DIR_ROOT, "img")
if __name__ == "__main__":
for each in [DIR_ROOT, DIR_INI, DIR_IMG, DIR_EXT]:
print(each) | StarcoderdataPython |
208948 | from django.shortcuts import render
from hello.apidata import NewsNasaApi, ImageDayApi
def index(request):
return render(request, "index.html")
def home(request):
return render(request, "home.html")
def news(request):
context = {"ListNews": NewsNasaApi()}
return render(request, "news.html", context)
def imageday(request):
context = ImageDayApi()
return render(request, "imageday.html", context)
def track(request):
return render(request, "track.html")
| StarcoderdataPython |
5143437 | import matplotlib.pyplot as plt
import numpy as np
import torch
import pickle
from tqdm import tqdm
from boneik import kinematics, solvers, utils, draw, criteria, io
from boneik import bvh
def create_human_body() -> kinematics.Body:
b = kinematics.BodyBuilder()
b.add_bone(
"torso",
"chest",
tip_to_base=utils.make_tip_to_base(1.17965, "-x,z,y"),
dofs={"rx": np.deg2rad([-10.0, 90.0])},
).add_bone(
"chest",
"neck",
tip_to_base=utils.make_tip_to_base(2.0279, "x,y,z"),
dofs={"ry": np.deg2rad([-90.0, 90.0])},
).add_bone(
"neck",
"head",
tip_to_base=utils.make_tip_to_base(0.73577, "-x,y,-z"),
).add_bone(
"neck",
"shoulder.L",
tip_to_base=utils.make_tip_to_base(0.71612, "-z,-x,y"),
).add_bone(
"shoulder.L",
"elbow.L",
tip_to_base=utils.make_tip_to_base(1.8189, "x,y,z"),
dofs={
"rx": np.deg2rad([-90.0, 30.0]),
"ry": np.deg2rad([-90.0, 90.0]),
"rz": np.deg2rad([-90.0, 90.0]),
},
).add_bone(
"elbow.L",
"hand.L",
tip_to_base=utils.make_tip_to_base(1.1908, "x,y,z"),
dofs={"rz": np.deg2rad([-135.0, 0.0])},
).add_bone(
"neck",
"shoulder.R",
tip_to_base=utils.make_tip_to_base(0.71612, "z,x,y"),
).add_bone(
"shoulder.R",
"elbow.R",
tip_to_base=utils.make_tip_to_base(1.8189, "x,y,z"),
dofs={
"rx": np.deg2rad([-90.0, 30.0]),
"ry": np.deg2rad([-90.0, 90.0]),
"rz": np.deg2rad([-90.0, 90.0]),
},
).add_bone(
"elbow.R",
"hand.R",
tip_to_base=utils.make_tip_to_base(1.1908, "x,y,z"),
dofs={"rz": np.deg2rad([0.0, 135.0])},
).add_bone(
"torso",
"hip.L",
tip_to_base=utils.make_tip_to_base(1.1542, "-y,x,z"),
).add_bone(
"hip.L",
"knee.L",
tip_to_base=utils.make_tip_to_base(2.2245, "x,-z,y"),
dofs={
"rx": np.deg2rad([-20.0, 20.0]),
"ry": np.deg2rad([-90.0, 90.0]),
"rz": np.deg2rad([-20.0, 20.0]),
},
).add_bone(
"knee.L",
"foot.L",
tip_to_base=utils.make_tip_to_base(1.7149, "x,y,z"),
dofs={"rz": np.deg2rad([0.0, 90.0])},
).add_bone(
"torso",
"hip.R",
tip_to_base=utils.make_tip_to_base(1.1542, "y,-x,z"),
).add_bone(
"hip.R",
"knee.R",
tip_to_base=utils.make_tip_to_base(2.2245, "x,-z,y"),
dofs={
"rx": np.deg2rad([-20.0, 20.0]),
"ry": np.deg2rad([-90.0, 90.0]),
"rz": np.deg2rad([-20.0, 20.0]),
},
).add_bone(
"knee.R",
"foot.R",
tip_to_base=utils.make_tip_to_base(1.7149, "x,y,z"),
dofs={"rz": np.deg2rad([-90.0, 0.0])},
).add_bone(
"root",
"torso",
tip_to_base=torch.eye(4),
# dofs={"rx", "ry", "rz", "tx", "ty", "tz"},
dofs={"rx", "ry", "rz"},
)
body = b.finalize(
[
"head",
"neck",
"shoulder.R",
"elbow.R",
"hand.R",
"shoulder.L",
"elbow.L",
"hand.L",
"hip.R",
"knee.R",
"foot.R",
"hip.L",
"knee.L",
"foot.L",
"torso",
"chest",
"root",
]
)
return body
def main():
import argparse
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument("input", type=Path, help="Pickled 3D joint predictions (NxMx3)")
parser.add_argument("-body", type=Path, help="Kinematic description file")
parser.add_argument("-input-fps", type=int, default=30, help="Input FPS")
parser.add_argument("-input-step", type=int, default=1, help="Fit every nth frame")
parser.add_argument(
"-scale", type=float, help="Scale anchors of first frame to this"
)
parser.add_argument(
"-max-loss", type=float, default=0.3, help="max loss to accept in fitting"
)
parser.add_argument(
"-crit",
type=str,
choices=["euclidean", "parallel"],
default="parallel",
help="Loss criterium to apply",
)
parser.add_argument("-output", type=Path, default=Path("./tmp/human.bvh"))
parser.add_argument("-show", type=int, default=1, help="visualize every nth frame")
args = parser.parse_args()
assert args.input.is_file()
if args.body is not None:
assert args.body.is_file()
body = io.load_json(args.body)
else:
body = create_human_body()
N = body.graph.number_of_nodes()
frame_data = pickle.load(open(r"C:\dev\bone-solve-ik\etc\frames_raw.pkl", "rb"))
if args.scale is not None:
scale_factor = utils.find_scale_factor(frame_data[0]) * args.scale
else:
scale_factor = 1.0
poses = [body.fk()] # important to start from rest-pose for bvh export.
solver = solvers.IKSolver(body)
if args.crit == "parallel":
crit = criteria.ParallelSegmentCriterium(torch.zeros((N, 3)), torch.ones(N))
else:
crit = criteria.EuclideanDistanceCriterium(torch.zeros((N, 3)), torch.ones(N))
crit.weights[-1] = 0 # root joint never has a corresponding anchor.
axes_ranges = [[-20, 20], [-20, 20], [-2, 5]]
fig, ax = draw.create_figure3d(axes_ranges=axes_ranges)
prev_pose = body.fk()
for i in tqdm(range(0, len(frame_data), args.input_step)):
crit.anchors[: N - 1] = torch.from_numpy(frame_data[i]).float() * scale_factor
torso = crit.anchors[-3].clone()
crit.anchors[: N - 1] -= torso # torso at 0/0/0
loss = solver.solve(crit, history_size=10, max_iter=10)
if loss > args.max_loss:
# retry from rest-pose
body.reset_()
loss = solver.solve(crit)
if loss < args.max_loss:
delta = body["root", "torso"].get_delta()
body["root", "torso"].set_delta(
[
delta[0],
delta[1],
delta[2],
torso[0],
torso[1],
torso[2],
]
)
new_pose = body.fk()
poses.append(new_pose)
prev_pose = new_pose
else:
body.reset_()
poses.append(prev_pose) # Do not skip any frames, unhandled by BVH
crit.anchors[: N - 1] += torso
if (i // args.input_step) % args.show == 0:
ax.cla()
ax.set_xlim(*axes_ranges[0])
ax.set_ylim(*axes_ranges[1])
ax.set_zlim(*axes_ranges[2])
draw.draw_kinematics(
ax,
body=body,
fk=body.fk(),
anchors=crit.anchors,
draw_vertex_labels=False,
draw_local_frames=False,
draw_root=False,
)
# fig.savefig(f"tmp/{i:05d}.png", bbox_inches="tight")
plt.show(block=False)
plt.pause(0.01)
bvh.export_bvh(
path=args.output, body=body, poses=poses, fps=(args.input_fps / args.input_step)
)
if __name__ == "__main__":
main()
# makefile()
| StarcoderdataPython |
3574829 | <filename>annealing/typesys/tools.py
import typed_ast.ast3 as ast
import util.error as err
import util.asttools as asttools
from typesys.mytypes import *
### Tools to manage type conversion ###
def width_max(ty1, ty2):
wd1 = ty1.width if isinstance(ty1, NumType) else -1
if isinstance(ty1, ComplexType):
wd1 = int(wd1 / 2) # ComplexType.width is total for real and imaginary
wd2 = ty2.width if isinstance(ty2, NumType) else -1
if isinstance(ty2, ComplexType):
wd2 = int(wd1 / 2) # ComplexType.width is total for real and imaginary
return max(wd1, wd2)
def type_max(ty1, ty2):
# MyType's magic methods (e.g., __lt__) are used
ty = max(ty1, ty2)
if isinstance(ty, BoolType) or isinstance(ty, AnyType) or type(ty) is NumType:
return ty
wd = width_max(ty1, ty2)
if isinstance(ty, ComplexType):
return ComplexType(wd * 2) # ComplexType.width is total for real and imaginary
elif isinstance(ty, NumType):
return ty.__class__(wd)
else:
err.error('Unsupported type_max on types:', ty1, ty2)
def type_div(ty1, ty2):
# MyType's magic methods (e.g., __lt__) are used
ty = max(ty1, ty2)
if isinstance(ty, BoolType):
return FloatType(32)
wd = width_max(ty1, ty2)
bound = FloatType(wd)
if ty <= bound:
return bound
bound = ComplexType(wd * 2)
if ty <= bound:
return bound
if isinstance(ty, NumType) or isinstance(ty, AnyType):
return ty
else:
err.error('Unsupported type_div on types:', ty1, ty2)
def type_merge(ty1, ty2):
# MyType's magic method __eq__ is used
if ty1 == ty2:
return ty1
elif isinstance(ty1, NumType) and isinstance(ty2, NumType):
return NumType()
elif isinstance(ty1, IterableType) and type(ty1) is type(ty2):
cl = type(ty1)
etype = type_merge(ty1.etype, ty2.etype)
if cl is ArrayType:
# Handling array -- Todo: merge shape info; Q.: to be error when ndim = -1?
ndim = ty1.ndim if ty1.ndim == ty2.ndim else -1
if ty1.module == ty2.module:
module = ty1.module
return ArrayType(etype, ndim, module = module)
else:
return AnyType()
elif cl is UDictType:
return cl(etype, type_merge(ty1.vtype, ty2.vtype))
else:
return cl(etype)
elif isinstance(ty1, CollectiveType) or isinstance(ty2, CollectiveType):
cl1 = type(ty1)
cl2 = type(ty2)
if (cl1 is TupleType and cl2 is UTupleType) or (cl2 is TupleType and cl1 is UTupleType):
return TupleType()
elif (cl1 is ListType and cl2 is UListType) or (cl2 is ListType and cl1 is UListType):
return ListType()
elif (cl1 is SetType and cl2 is USetType) or (cl2 is SetType and cl1 is USetType):
return SetType()
elif (cl1 is DictType and cl2 is UDictType) or (cl2 is DictType and cl1 is UDictType):
return DictType()
return AnyType()
def type_including(ty1, ty2):
return ty1 == type_merge(ty1, ty2)
### Tools related to MyType allocation ###
def array_with_check(etype, ndim, shape, module = ''):
assert not shape or ndim == len(shape)
if not shape or min(tuple(map(lambda x: x == -1, shape))):
# empty shape or all elements of shape is -1
if module: return ArrayType(etype, ndim, module = module)
else: return ArrayType(etype, ndim)
else:
if module: return ArrayType(etype, ndim, shape, module)
else: return ArrayType(etype, ndim, shape)
### User function signatures ###
def convert_tuple_type(tupled_type):
if len(tupled_type) == 1 or all([tupled_type[0] == ty for ty in tupled_type[1:]]):
return UTupleType(tupled_type[0])
else:
return TupleType()
class TypedSignature:
def __init__(self, name, params, returns):
ids = set(map(lambda x: x[0], params))
if len(params) != len(ids):
err.error('Duplicated parameters in:', params, 'for function:', name)
self.name = name
self.params = params # list of tuples (name, type, default)
self.returns = returns # type
def verify_args(self, types, keys = {}):
nargs = len(types)
nparams = len(self.params)
if nargs > nparams:
err.error('Calling:', self.name, 'with', nargs, 'arguments while it has only', nparams, 'parameters.')
for i in range(nargs):
ty = convert_tuple_type(types[i]) if isinstance(types[i], tuple) else types[i]
if not isinstance(ty, AnyType) and not type_including(self.params[i][1], ty):
err.warning('Calling:', self.name, 'with unmatched argument type:', ty, 'at position:', i)
for i in range(nargs, nparams):
n = self.params[i][0]
if n in keys:
ty = convert_tuple_type(keys[n]) if isinstance(keys[n], tuple) else keys[n]
if not isinstance(ty, AnyType) and not type_including(self.params[i][1], ty):
err.warning('Calling:', self.name, 'with unmatched argument type:', ty, 'for parameter:', n)
elif not self.params[i][2]:
err.error('Calling:', self.name, 'without argument for parameter:', n)
err.verbose(2, 'Verified argument types for:', self.name)
def params_as_dict(self):
return { p[0]: p[1] for p in self.params }
### Handling flow-sensitive typing rules for control branches ###
class BranchedContainer:
def __init__(self):
self.parent = None
self.body = None
self.orelse = None
def fork(self):
self.body = self.__class__()
self.orelse = self.__class__()
self.body.parent = self
self.orelse.parent = self
def join(self):
self.body.parent = None
self.orelse.parent = None
self.body = None
self.orelse = None
class TypeTable(BranchedContainer):
def __init__(self, init = {}):
self.type_table = init
BranchedContainer.__init__(self)
def add(self, var, ty):
if not self.type_table:
self.type_table = {var: ty}
elif var in self.type_table:
if isinstance(self.type_table[var], list):
self.type_table[var].append(ty)
else:
self.type_table[var] = [self.type_table[var], ty]
else:
self.type_table[var] = ty
def get_local(self, var, ver = -1):
if var not in self.type_table:
return None
types = self.type_table[var]
if isinstance(types, list):
if ver > len(types):
err.error('Exceeded version number:', ver, '; must be <', len(types))
return types[ver]
else:
if ver != -1 and ver != 0:
err.error('Exceeded version number:', ver, '; must be 0 or -1')
return types
def get(self, var, ver = -1):
if var not in self.type_table:
if self.parent:
return self.parent.get(var, ver)
return None
return self.get_local(var, ver)
def join(self):
bkeys = self.body.type_table.keys()
ekeys = self.orelse.type_table.keys()
cand = bkeys & ekeys
for var in bkeys | ekeys:
ty = AnyType()
if var in cand:
bty = self.body.get_local(var)
ety = self.orelse.get_local(var)
ty = type_merge(bty, ety)
self.add(var, ty)
BranchedContainer.join(self)
def __str__(self):
ret = 'type table: { '
for var in self.type_table:
ret += str(var) + ': '
types = self.type_table[var]
if isinstance(types, MyType):
ret += str(types) + ', '
else:
ret += str([str(ty) for ty in types]) + ', '
ret += '}'
if self.body:
ret += ' / body ' + str(self.body)
if self.orelse:
ret += ' / orelse ' + str(self.orelse)
return ret
| StarcoderdataPython |
141744 | <reponame>samlet/stack<gh_stars>1-10
meta_pickups={
'aux_domains': lambda r, common, data: {
'pos': r['head_pos'], 'head': r['head'], 'head_word': r['head_word'],
**common, **data},
'root_domains': lambda r, common, data: {'pos': r['upos'], 'rel': r['rel'], **common, **data},
'verb_domains': lambda r, common, data: {'pos': r['upos'], 'rel': r['rel'], **common, **data},
'predicate': lambda r, common, data: {
'pos': r['pos'], 'rel': r['rel'],
'segments':r['segments'] if 'segments' in r else [],
**common, **data},
'subj_domains': lambda r, common, data: {
'pos': r['head_pos'], 'head': r['head'], 'head_word': r['head_word'],
**common, **data},
}
def build_meta(r, data):
# from sagas.conf.conf import cf
type_name = r['type']
common = {'lemma': r['lemma'], 'word': r['word'], 'index': r['index'],
'stems': r['stems'],
'domain_type': type_name,
}
# if 'engine' not in data:
# data['engine']=cf.engine(data['lang'])
if type_name in meta_pickups:
return meta_pickups[type_name](r, common, data)
else:
return {'rel': r['rel'], **common, **data}
| StarcoderdataPython |
6625597 | import os
import utils
from SCons.Environment import Environment
from SCons.Script import Exit
def wii(env):
bin_path = "%s/bin" % os.environ['DEVKITPPC']
ogc_bin_path = "%s/libogc/bin" % os.environ['DEVKITPRO']
prefix = 'powerpc-eabi-'
def setup(x):
return '%s%s' % (prefix, x)
env['CC'] = setup('gcc')
env['LD'] = setup('ld')
env['CXX'] = setup('g++')
env['AS'] = setup('as')
env['AR'] = setup('ar')
env['OBJCOPY'] = setup('objcopy')
if utils.isWindows():
env.Append(CPPPATH = ["%s/libogc/include" % os.environ['DEVKITPRO'],
"%s/libogc/include/SDL" % os.environ['DEVKITPRO'],
"%s/libogc/include/freetype2" % os.environ['DEVKITPRO']])
env.Append(LIBPATH = ["%s/libogc/lib" % os.environ['DEVKITPRO'],
"%s/libogc/lib/wii" % os.environ['DEVKITPRO']])
env.Append(LIBS = ['SDL', 'SDL_image', 'SDL_mixer', 'png', 'freetype', 'z'])
else:
env.Append(CPPPATH = ["%s/libogc/include" % os.environ['DEVKITPRO']])
env.Append(CPPDEFINES = ['GEKKO', 'WII'])
flags = ['-mrvl', '-mcpu=750', '-meabi', '-mhard-float']
env.Append(CCFLAGS = flags)
env.Append(CXXFLAGS = flags)
env.Append(LINKFLAGS = flags)
# env.Append(CPPPATH = ['#src/wii'])
env['LINKCOM'] = '$CXX $LINKFLAGS -Wl,--start-group $ARCHIVES $SOURCES $_LIBDIRFLAGS $_LIBFLAGS -Wl,--end-group -o $TARGET'
env.Append(LIBS = ['wiiuse', 'wiikeyboard', 'iberty', 'bte', 'fat', 'ogc', 'm'])
# os.environ['PATH'] = "%s:%s:%s" % (bin_path, ogc_bin_path, os.environ['PATH'])
env.PrependENVPath('PATH', bin_path)
env.PrependENVPath('PATH', ogc_bin_path)
env.Append(CPPDEFINES = ['USE_SDL_MAIN'])
utils.safeParseConfig(env, 'sdl-config --cflags --libs')
utils.safeParseConfig(env, 'freetype-config --libs --cflags')
utils.safeParseConfig(env, 'libpng-config --libs --cflags')
return env
def checkPython(context):
context.Result(0)
return 0
def getEnvironment():
import utils
environment = Environment(ENV = os.environ)
environment['PAINTOWN_PLATFORM'] = ['wii', 'sdl']
peg_color = 'light-cyan'
environment['PAINTOWN_BACKEND'] = 'sdl'
environment['PAINTOWN_USE_PRX'] = False
environment['PAINTOWN_TESTS'] = {'CheckPython': checkPython}
environment['PAINTOWN_COLORIZE'] = utils.colorize
environment['PAINTOWN_NETWORKING'] = False
environment['LIBS'] = []
environment['PEG_MAKE'] = "%s %s" % (utils.colorize('Creating peg parser', peg_color), utils.colorize('$TARGET', 'light-blue'))
environment.Append(BUILDERS = {'Peg' : utils.pegBuilder(environment)})
environment.Append(CPPPATH = ['#src', '#src/util/network/hawknl'])
environment.Append(CPPDEFINES = ['USE_SDL'])
return utils.lessVerbose(wii(environment))
| StarcoderdataPython |
3305872 | <filename>src/Game.py<gh_stars>1-10
from __future__ import annotations
from src.Player import Player
class Game:
"""
This class represents a game of ShootyBoats.
=== Public Attributes ===
player1:
The player that goes first; is a human player.
player2:
The player that goes second; is a CPU player.
turn:
The player who's turn it is.
running:
Whether this game is in progress or not.
"""
player1: Player
player2: Player
turn: Player
running: bool
def __init__(self, player1: Player, player2: Player) -> None:
"""
Initializes a game of ShootyBoats.
"""
self.player1 = player1
self.player2 = player2
self.winner = None # no winners when initialized
self.turn = player1 # player1 starts off the game, so they get 1st turn
def get_shot_coordinates(self) -> tuple:
"""
Have the player who's turn it is make a "shoot" move. Return True if the
move was made, False otherwise.
"""
if self.turn is self.player1:
return self.player1.select_target()
return self.player2.get_random_coordinates()
def run_ship_placing_stage(self) -> None:
"""
Have the player who's turn it is, make a "place_ship" move. Return True
if the ship was placed, False otherwise.
"""
self.player1.place_ships()
self.player2.place_ships_randomly()
def run_battle_stage(self) -> None:
"""
This runs the 2nd stage of the game where both players battle each
other's boats.
"""
while self.player1.enemy_ships_sunk != 5 or \
self.player1.enemy_ships_sunk != 5:
# have the human make a move and print the boards so human can see
if self.turn is self.player1:
print(self.player1.board.get_player_board_string())
print(self.player2.board.get_target_board_string())
self.player1.make_move()
# if its not player1's move, have the random player move
self.player2.make_random_move()
# print winner
if self.player1.enemy_ships_sunk == 5:
print("Player1 has won")
else:
print("Player2 has won")
def run_game(self) -> None:
"""
Runs this game of ShootyBoats.
"""
self.run_ship_placing_stage()
self.run_battle_stage()
| StarcoderdataPython |
3487819 | '''
Created on Apr 28, 2020
@author: ballance
'''
from vsc.model.field_composite_model import FieldCompositeModel
'''
Created on Apr 28, 2020
@author: ballance
'''
from _io import StringIO
import vsc.model as vm
from vsc.model.constraint_dist_model import ConstraintDistModel
from vsc.model.constraint_expr_model import ConstraintExprModel
from vsc.model.constraint_foreach_model import ConstraintForeachModel
from vsc.model.constraint_solve_order_model import ConstraintSolveOrderModel
from vsc.model.covergroup_model import CovergroupModel
from vsc.model.coverpoint_bin_array_model import CoverpointBinArrayModel
from vsc.model.coverpoint_bin_collection_model import CoverpointBinCollectionModel
from vsc.model.coverpoint_bin_single_range_model import CoverpointBinSingleRangeModel
from vsc.model.coverpoint_model import CoverpointModel
from vsc.model.dist_weight_expr_model import DistWeightExprModel
from vsc.model.field_array_model import FieldArrayModel
from vsc.model.field_scalar_model import FieldScalarModel
from vsc.model.model_visitor import ModelVisitor
from vsc.model.rangelist_model import RangelistModel
from vsc.model.unary_expr_type import UnaryExprType
class ModelPrettyPrinter(ModelVisitor):
def __init__(self):
self.out = StringIO()
self.ind = ""
self.print_values = False
def do_print(self, m, print_values=False, show_exp=False):
self.ind = ""
self.print_values = print_values
self.show_exp = show_exp
self.out = StringIO()
m.accept(self)
return self.out.getvalue()
@staticmethod
def print(m, print_values=False, show_exp=False):
p = ModelPrettyPrinter()
return p.do_print(m, print_values, show_exp)
def write(self, s):
self.out.write(s)
def writeln(self, l):
self.out.write(self.ind + l + "\n")
def inc_indent(self):
self.ind += " "*4
def dec_indent(self):
self.ind = self.ind[4:]
def visit_composite_field(self, f : FieldCompositeModel):
name = f.name if f.name is not None else "<anonymous>"
self.writeln(name + " {")
self.inc_indent()
super().visit_composite_field(f)
self.dec_indent()
self.writeln("}")
def visit_constraint_block(self, c:vm.ConstraintBlockModel):
self.writeln("constraint " + c.name + " {")
self.inc_indent()
for stmt in c.constraint_l:
stmt.accept(self)
self.dec_indent()
self.writeln("}")
def visit_constraint_dist(self, d : ConstraintDistModel):
self.write(self.ind)
d.lhs.accept(self)
self.write(" dist { ")
for i in range(len(d.weights)):
if i > 0:
self.write(", ")
d.weights[i].accept(self)
self.write("}\n")
def visit_dist_weight(self, w : DistWeightExprModel):
if w.rng_rhs is not None:
self.write("[")
w.rng_lhs.accept(self)
self.write(":")
w.rng_rhs.accept(self)
self.write("]")
else:
w.rng_lhs.accept(self)
self.write(" : ")
w.weight.accept(self)
def visit_constraint_expr(self, c:ConstraintExprModel):
self.write(self.ind)
c.e.accept(self)
self.write(";\n")
def visit_constraint_foreach(self, f:ConstraintForeachModel):
self.write(self.ind + "foreach (")
f.lhs.accept(self)
self.write("[i]) {\n")
self.inc_indent()
for s in f.constraint_l:
s.accept(self)
self.dec_indent()
self.writeln("}")
def visit_constraint_if_else(self, c:vm.ConstraintIfElseModel):
self.write(self.ind + "if (")
c.cond.accept(self)
self.write(") {\n")
self.inc_indent()
c.true_c.accept(self)
self.dec_indent()
if c.false_c is not None:
self.writeln("} else {")
self.inc_indent()
c.false_c.accept(self)
self.dec_indent()
self.writeln("}")
def visit_constraint_implies(self, c:vm.ConstraintImpliesModel):
self.write(self.ind)
c.cond.accept(self)
self.write(" -> {")
for sc in c.constraint_l:
sc.accept(self)
self.write("}\n")
def visit_constraint_solve_order(self, c:ConstraintSolveOrderModel):
self.write(self.ind)
self.write("solve ")
for i, b in enumerate(c.before_l):
if i > 0:
self.write(",")
self.write(b.name)
self.write(" before ")
for i, a in enumerate(c.after_l):
if i > 0:
self.write(",")
self.write(a.name)
self.write("\n")
def visit_covergroup(self, cg : CovergroupModel):
self.writeln("covergroup " + cg.name)
self.inc_indent()
for cp in cg.coverpoint_l:
cp.accept(self)
self.dec_indent()
def visit_coverpoint(self, cp : CoverpointModel):
self.writeln("coverpoint " + cp.name)
self.inc_indent()
for b in cp.bin_model_l:
b.accept(self)
self.dec_indent()
def visit_coverpoint_bin_array(self, bn : CoverpointBinArrayModel):
self.writeln("bin_array " + bn.name)
def visit_coverpoint_bin_collection(self, bn : CoverpointBinCollectionModel):
self.writeln("bin_collection " + bn.name)
self.inc_indent()
for b in bn.bin_l:
b.accept(self);
self.dec_indent()
def visit_coverpoint_bin_single_range(self, bn : CoverpointBinSingleRangeModel):
self.writeln("bin_single_range " + bn.name + " " + str(bn.target_val_low) + " .. " + str(bn.target_val_high))
def visit_expr_array_subscript(self, s):
s.lhs.accept(self)
self.write("[")
s.rhs.accept(self)
self.write("]")
def visit_expr_array_sum(self, s):
if self.show_exp:
s.expr().accept(self)
else:
self.write(s.arr.fullname)
self.write(".sum")
def visit_expr_bin(self, e:vm.ExprBinModel):
if e.lhs is None or e.rhs is None:
print("op: " + str(e.op))
self.write("(")
e.lhs.accept(self)
self.write(" " + vm.BinExprType.toString(e.op) + " ")
e.rhs.accept(self)
self.write(")")
def visit_expr_in(self, e:vm.ExprInModel):
e.lhs.accept(self)
self.write(" in [")
for i, r in enumerate(e.rhs.rl):
if i > 0:
self.write(", ")
r.accept(self)
self.write("]")
def visit_expr_literal(self, e : vm.ExprLiteralModel):
self.write(str(int(e.val())))
def visit_expr_fieldref(self, e : vm.ExprFieldRefModel):
if self.print_values and hasattr(e.fm, "is_used_rand") and not e.fm.is_used_rand:
if isinstance(e.fm, FieldArrayModel):
self.write("[")
for i, f in enumerate(e.fm.field_l):
if i > 0:
self.write(", ")
self.write(str(int(f.get_val())))
self.write("]")
else:
self.write(e.fm.fullname + "(" + str(int(e.fm.get_val())) + ")")
else:
self.write(e.fm.fullname)
def visit_expr_unary(self, e : vm.ExprUnaryModel):
self.write(UnaryExprType.toString(e.op))
self.write("(")
e.expr.accept(self)
self.write(")")
def visit_expr_range(self, r):
r.lhs.accept(self)
self.write("..")
r.rhs.accept(self)
def visit_rangelist(self, r : RangelistModel):
for re in r.range_l:
if re[0] == re[1]:
self.write(str(re[0]))
else:
self.write(str(re[0]) + ".." + str(re[1]))
def visit_scalar_field(self, f:FieldScalarModel):
f_str = ""
if f.is_used_rand:
f_str += "rand "
if f.is_signed:
f_str += "signed "
else:
f_str += "unsigned "
f_str += "[" + str(f.width) + "] " + f.name
self.writeln(f_str)
| StarcoderdataPython |
3454045 | <gh_stars>1-10
from keras.models import Model, load_model
from keras.layers import Dense, Dropout, BatchNormalization
from keras.metrics import top_k_categorical_accuracy
from keras.utils import to_categorical
from misc.AttentionWeightedAverage import *
from misc.funcs import *
from textgenrnn import textgenrnn
from textgenrnn.utils import textgenrnn_encode_sequence
import numpy as np
from numpy.random import randint, permutation, seed
from matplotlib import pyplot as plt
import os
import json
#
#
#
class Highlighter:
"""
Main class used in authority. This class takes care of the loading of textual data,
saving and loading of models, classification of authors, and rendering visualizations
of said predictions.
"""
def __init__(self, maxlen=40, sample_stride=3):
# DEBUG: changing maxlen and sample_stride will break model
self.authors = []
self.authors_dict = {}
self.num_authors = 0
self.paths = {} # will store paths to text files, keys will be the authors
self.texts = [] # DEBUG: necessary?
self.texts_dict = {}
self.vocab = textgenrnn().vocab
# length of text snippets analyzed
self.maxlen = maxlen # recommend to leave at default to utilize transfer learning
self.num_samples_per_author = None
self.sample_stride = sample_stride
self.encoded_texts = [] # the texts we are working with, encoded in the format needed for the model
self.labels = [] # labels corresponding to the training data
def build_model(self,
dropout_rate=0.5,
batch_normalization=True,
textgenrnn_weights_path=0,
num_authors=None):
# DEBUG: make this changeable by user
"""define and compile the core highlighter model"""
if self.num_authors is None and num_authors is None:
print('Error: must specify number of authors.')
return
# define model
if type(textgenrnn_weights_path) is str:
self._tg_rnn = textgenrnn(textgenrnn_weights_path)
self.vocab = self._tg_rnn.vocab
else:
self._tg_rnn = textgenrnn()
self._tg_model = self._tg_rnn.model
self._input = self._tg_model.layers[0].input
self._tg_out = self._tg_model.layers[-2].output
if batch_normalization is True:
self._tg_out = BatchNormalization()(self._tg_out)
if dropout_rate > 0 and dropout_rate < 1:
self._tg_out = Dropout(rate=0.5)(self._tg_out)
self._classification = Dense(units=self.num_authors,
activation='softmax',
name='classification')(self._tg_out)
self.model = Model(inputs=self._input, outputs=self._classification)
# compile model
self.model.compile(loss='categorical_crossentropy',
optimizer='adamax',
metrics=['acc', top_3_acc])
def conv_text(self, text):
# DEBUG: requires build_model to be run first
"""convert given text into format required by model"""
return textgenrnn_encode_sequence(text, vocab=self.vocab, maxlen=self.maxlen)[0]
def load_author_texts(self, path, num_samples_per_author=None, verbose=True, rand_seed=0):
# DEBUG: requires build_model to be run first
"""
Trains on a corpus of several authors, located in path (name of folder with data).
Files must be .txt and folders must have tree structure given by
├--path
| ├--author_1
| ├--writing_1_by_author_1.txt
| ├--writing_2_by_author_1.txt
| ├-- ...
| ├--author_2
| ├--writing_1_by_author_2.txt
| ├--writing_2_by_author_2.txt
| ├-- ...
| ├-- ...
Author names will be read in as they appear in the
corresponding folder's name.
Note that "path" must end in a '/' to indicate it is a folder.
"""
if verbose:
print('loading text and author data...')
# index the authors and the paths of their works
for author in os.listdir(path):
self.authors += [author]
self.paths[author] = []
for filename in os.listdir(path + author):
if '.txt' in filename:
self.paths[author].append(path + author + '/' + filename)
self.authors.sort()
self.num_authors = len(self.authors)
# stores the indices associated to each author
self.authors_dict = { author:i for i,author in enumerate(self.authors)}
# store the joined texts associated to each author
for author in self.authors:
all_text_by_author = []
for path in self.paths[author]:
all_text_by_author += [open(path,'r',encoding='latin').read()]
all_text_by_author = '\n'.join(all_text_by_author)
self.texts_dict[author] = all_text_by_author
# list version of texts_dict
self.texts = [self.texts_dict[author] for author in self.authors] # DEBUG: necessary?
if verbose:
print('done.')
if verbose:
print('found authors:')
for author,text in zip(self.authors,self.texts):
print('\t'+author)
print('\t\tlength chars in text: {0:8d}'.format(len(text)))
print()
# now that we have the authors loaded we can build the model
if not hasattr(self, 'model'):
self.build_model(self.num_authors)
# we want to have the same amount of samples for each author
if verbose:
print('processing text...')
if num_samples_per_author is None and self.num_samples_per_author is None:
author_lengths = [int((len(text) - self.maxlen) / self.sample_stride) + 1 for text in self.texts]
self.num_samples_per_author = min(author_lengths)
else:
self.num_samples_per_author = num_samples_per_author
self.encoded_texts = []
self.labels = []
for author in self.authors:
text = self.texts_dict[author]
i = self.authors_dict[author]
size = len(text)
# if the size of the text is the minimum of all texts, sample uniformly
if size == self.num_samples_per_author:
for j in range(self.num_samples_per_author):
idx = j * self.sample_stride
self.encoded_texts.append(text[idx:idx+self.maxlen])
self.labels.append(i)
# otherwise sample randomly the same number of times
else:
for _ in range(self.num_samples_per_author):
idx = randint(0, size-self.maxlen)
self.encoded_texts.append(text[idx:idx+self.maxlen])
self.labels.append(i)
# convert text snippets into format required by model
self.encoded_texts = list(map(lambda t : textgenrnn_encode_sequence(t, self.vocab, self.maxlen)[0],
self.encoded_texts))
self.encoded_texts = np.array(self.encoded_texts)
self.labels = np.array(self.labels)
# seed the random number generator
seed(rand_seed)
# randomly shuffle the data
self.shuffle_idx = list(permutation(self.num_samples_per_author * self.num_authors))
self.encoded_texts = self.encoded_texts[self.shuffle_idx]
self.labels = self.labels[self.shuffle_idx]
# convert labels to one-hot
self.labels = to_categorical(self.labels, self.num_authors)
if verbose:
print('done.')
def train(self, epochs, validation_split=0.2, verbose=True):
"""train the classifier model on loaded data"""
# make sure we have data loaded before training
if self.labels is None or self.encoded_texts is None:
print('No texts loaded -- load them with the load_training_data method.')
return
else:
# train the model
print('training...')
self.model.fit(x=self.encoded_texts, y=self.labels,
epochs=epochs,
validation_split=validation_split,
verbose=verbose)
print('done.')
def highlight(self, text, padding='expand'):
"""
Scans given text to find the most likely author at each point in text.
padding=True ensures that the output is the same length as the original text.
"""
if hasattr(self, 'model'):
snippets = []
for i in range(len(text)-self.maxlen+1):
converted_snippet = self.conv_text(text[i:i+self.maxlen])
snippets.append(converted_snippet)
snippets = np.array(snippets)
preds = self.model.predict(snippets)
if padding:
# if padding == 'zeros':
# pad_l, pad_r = [0], [0]
if padding == 'repeat':
pad_l, pad_r = preds[0], preds[-1]
elif padding == 'expand':
return expand_filtering(preds, self.maxlen)
else:
print('Error: unrecognized padding.')
return -1
block_l = np.vstack([pad_l for _ in range(int(np.floor(self.maxlen/2)))])
block_r = np.vstack([pad_r for _ in range(int(np.ceil(self.maxlen/2)))])
preds = np.vstack([block_l, preds])
preds = np.vstack([preds, block_r])
return preds
else:
print('Error: make sure model is loaded and compiled.')
return -1
def predict(self, text):
"""Returns the (average) probability of each author having written the text"""
highlighting = self.highlight(text)
return highlighting.mean(axis=0)
def classify_author(self, text):
"""Tries to predict the author of the given text."""
highlighting = self.highlight(text)
avgs = np.mean(highlighting, axis=0)
author = self.authors[np.argmax(avgs)]
return author
def plot_highlights(self, text, authors=None, padding='expand'):
"""
Renders a plot of the highlighting intensity for each author as the
text progresses character by character.
"""
highlighting = self.highlight(text, padding=padding)
if authors is None:
authors = self.authors
fig, ax = plt.subplots(1)
for i in range(len(self.authors)):
ax.plot(highlighting[:,i], label=self.authors[i]);
ax.legend()
ax.set_xlabel('character index')
ax.set_ylabel('probability')
title = 'Highlighting for Text:\n'
if len(text) > 60:
subtitle = text[:60] + ' ...'
ax.set_title(title + subtitle)
plt.show()
def plot_histogram(self, text, authors=None):
"""
Renders a histogram of the associated probability for each author having
written the text.
"""
# make predictions
probs = self.predict(text)
# select the authors we want to include in the plot
if authors is None:
authors = self.authors
# set up plot in pyplot
fig, ax = plt.subplots()
idx = np.arange(self.num_authors);
ax.bar(idx, probs);
ax.set_xticks(idx)
ax.set_xticklabels(self.authors)
ax.set_xlabel('author')
ax.set_ylabel('probability')
title = 'Authorship Probabilities Associated to Text:\n'
if len(text) > 60:
subtitle = text[:60] + ' ...'
ax.set_title(title + subtitle)
plt.show()
def save_model(self, filepath_model, filepath_vars=0):
"""
Saves the model's variables in a JSON and the model architecture/weights in a .hdf5 file.
"""
try:
# write model weights and architecture to file
self.model.save(filepath_model)
except:
print('Error saving model file. Check saving path.')
if type(filepath_vars) is str:
vars = {}
vars['authors'] = self.authors
vars['authors_dict'] = self.authors_dict
vars['num_authors'] = self.num_authors
vars['paths'] = self.paths
# vars['texts'] = self.texts
# vars['texts_dict'] = self.texts_dict
vars['maxlen'] = self.maxlen
vars['num_samples_per_author'] = self.num_samples_per_author
vars['sample_stride'] = self.sample_stride
vars['encoded_texts'] = self.encoded_texts.tolist()
vars['labels'] = self.labels.tolist()
vars['vocab'] = self.vocab
try:
# write variables to file
with open(filepath_vars, 'w') as outfile:
json.dump(vars, outfile)
except:
print('Error saving var file. Check saving path.')
def load_model(self,
filepath_model, filepath_vars=0):
"""
Load the model's variables and weights from a JSON file.
"""
try:
# load the model from file
self.model = load_model(filepath_model,
custom_objects={"AttentionWeightedAverage":AttentionWeightedAverage,
"top_3_acc":top_3_acc})
# compile the model
self.model.compile(loss='categorical_crossentropy',
optimizer='adamax',
metrics=['acc', top_3_acc])
except:
print('Error loading model file from specified path.')
return -1
if type(filepath_vars) is str:
try:
# load the variables from file
with open(filepath_vars, 'r', encoding='latin') as json_file:
vars = json.load(json_file)
self.authors = vars['authors']
self.authors_dict = vars['authors_dict']
self.num_authors = vars['num_authors']
self.paths = vars['paths']
self.texts = vars['texts']
self.texts_dict = vars['texts_dict']
self.maxlen = vars['maxlen']
self.num_samples_per_author = vars['num_samples_per_author']
self.sample_stride = vars['sample_stride']
self.encoded_texts = np.array(vars['encoded_texts'])
self.labels = np.array(vars['labels'])
self.vocab = vars['vocab']
except FileNotFoundError:
print('Couldn\'t find model variables. Loading default vocab.')
self.vocab = textgenrnn().vocab
if self.num_authors == 0:
self.num_authors = K.int_shape(self.model.output)[-1]
if len(self.authors) == 0:
self.authors = [str(i) for i in range(self.num_authors)]
| StarcoderdataPython |
1919078 | <gh_stars>0
from __future__ import absolute_import
from __future__ import unicode_literals
from mock import patch
from custom.icds_reports.ucr.tests.test_base_form_ucr import BaseFormsTest
class TestAWCMgtForms(BaseFormsTest):
ucr_name = "static-icds-cas-static-ls_home_visit_forms_filled"
@patch('custom.icds_reports.ucr.expressions._get_user_location_id',
lambda user_id: 'qwe56poiuytr4xcvbnmkjfghwerffdaa')
def test_awc_visit_form_with_location_entered(self):
self._test_data_source_results(
'beneficiary_form_with_type_of_visit',
[{'user_id': 'cee18a35ce<PASSWORD>9<PASSWORD>',
'location_id': 'qwe56poiuytr4xcvbnmkjfghwerffdaa',
'doc_id': None,
'visit_type_entered': 'vhnd_day',
'home_visit_count': 1,
'submitted_on': None}])
@patch('custom.icds_reports.ucr.expressions._get_user_location_id',
lambda user_id: 'qwe56poiuytr4xcvbnmkjfghwerffdaa')
def test_awc_visit_form_without_location_entered(self):
self._test_data_source_results(
'beneficiary_form_without_type_of_visit',
[{'user_id': 'cee1<PASSWORD>9<PASSWORD>',
'location_id': 'qwe56poiuytr4xcvbnmkjfghwerffdaa',
'doc_id': None,
'visit_type_entered': '',
'home_visit_count': 1,
'submitted_on': None}])
| StarcoderdataPython |
5156405 | import sys
from pprint import pprint
# read apk
#import androguard.core.bytecodes.apk as apk
#a = apk.APK(sys.argv[1])
import androguard.core.bytecodes.dvm as dvm
from androguard.core.analysis.analysis import *
import inflection
import executor
import descriptors
# XXX must be library for this
def has_field_name(s):
"""
fieldName -> hasFieldName
"""
return 'has' + s[:1].upper() + s[1:]
class Reprotobuf(object):
def __init__(self, classes_dex):
self.dvm = dvm.DalvikVMFormat(classes_dex)
self.vma = uVMAnalysis(self.dvm)
self.tree = {}
self.files = {}
@classmethod
def from_classes_dex(cls, filename):
with open(sys.argv[1], 'rb') as f:
classes_dex = f.read()
return cls(classes_dex)
def get_proto_classes(self):
def is_proto(cls):
return ('MessageNano;' in cls.get_superclassname() and
'abstract' not in cls.get_access_flags_string())
return filter(is_proto, self.dvm.get_classes())
def add_class(self, classname, fields):
# build tree
parts = classname.split('$')
node = self.tree
for part in parts:
subnodes = node.setdefault('sub', {})
node = subnodes.setdefault(part, {})
node['class'] = classname
node['fields'] = fields
def process_classes(self):
class_analyzer = MessageNanoAnalyzer(self)
proto_classes = self.get_proto_classes()
for cls in proto_classes:
name = descriptors.extract_classname(cls.get_name())
fields = class_analyzer.analyze(cls)
self.add_class(name, fields)
def structure_packages(self):
for name in self.tree['sub']:
# extract package and outer name
parts = name.split('/')
filename_part = parts.pop()
package = '.'.join(parts)
filename = inflection.underscore(filename_part) + '.proto'
file_properties = {
'name': filename,
'package': filename_part,
'options': {
'java_package': '"%s"' % package,
'java_outer_classname': '"%s"' % filename_part,
},
'imports': set(),
}
# if there's a class at this level
if 'class' in self.tree['sub'][name]:
file_properties['options']['java_multiple_files'] = 'true';
file_properties['messages'] = {
'sub': {
filename_part: self.tree['sub'][name]
}
}
else:
file_properties['messages'] = self.tree['sub'][name]
# add this file to our list
assert filename not in self.files
self.files[filename] = file_properties
def determine_references(self):
"""
Given a division of messages into files and packages, we need to
know how each java class descriptor should be referenced in the proto
files. In particular, which file to import and how to reference it.
"""
refs = {}
for properties in self.files.values():
filerefs = self.determine_references_for_message_tree(
properties['messages'], properties['package'])
assert len(filerefs) > 0
for r in filerefs.values():
r['import'] = properties['name']
refs.update(filerefs)
return refs
def determine_references_for_message_tree(self, node, parent=''):
refs = {}
if 'class' in node:
refs[node['class']] = {'ref': parent}
if 'sub' not in node:
return refs
for subnode in node['sub']:
subrefs = self.determine_references_for_message_tree(
node['sub'][subnode], parent + '.' + subnode)
refs.update(subrefs)
return refs
def reference_within_scope(self, ref, scope):
if ref == scope:
return ref
ref_parts = ref.split('.')
scope_parts = scope.split('.')
i = 0
lim = min(len(ref_parts), len(scope_parts))
while i < lim and ref_parts[i] == scope_parts[i]:
i += 1
return '.'.join(ref_parts[i:])
def generate_code_for_message_tree(self, node, imports, scope='', indent_level=0):
code = ''
indent = ' ' * (2*indent_level)
if 'sub' in node:
for name in node['sub']:
code += indent + 'message %s {\n' % name
code += self.generate_code_for_message_tree(node['sub'][name],
imports, scope + '.' + name, indent_level + 1)
code += indent + '}\n\n'
if 'fields' in node:
fields = sorted(node['fields'].values(), key=lambda f: f['tag'])
for field in fields:
if 'ref' in field:
classname = field['ref']
imports.add(self.refs[classname]['import'])
field['type'] = self.refs[classname]['ref']
code += indent
code += '%(rule)s ' % field
code += self.reference_within_scope(field['type'], scope)
code += ' %(name)s = %(tag)d;\n' % field
return code
def generate_code(self):
self.refs = self.determine_references()
for properties in self.files.values():
properties['code'] = self.generate_code_for_message_tree(
properties['messages'], properties['imports'],
properties['package'])
def output(self):
for properties in self.files.values():
with open('./output/' + properties['name'], 'w') as f:
f.write('syntax = "proto2";\n\n')
f.write('package %s;\n' % properties['package'])
f.write('\n')
for k, v in properties['options'].items():
f.write('option %s = %s;\n' % (k, v))
f.write('\n')
for imprt in properties['imports']:
if imprt == properties['name']:
continue
f.write('import "%s";\n' % (imprt))
f.write('\n')
f.write(properties['code'])
class MessageNanoAnalyzer(object):
def __init__(self, workspace):
self.workspace = workspace
def get_fields_from_class(self, cls):
"""
Deduce fields by inspecting the fields of the Java class.
"""
# fetch all the fields
fields = {}
for field in cls.get_fields():
name = field.get_name()
fields[name] = {
'name': name,
'descriptor': field.get_descriptor(),
'rule': 'required',
}
# deduce optional ones from has* fields
optional = []
for name in fields:
if has_field_name(name) in fields:
optional.append(name)
# mark the optional fields, and remove
for name in optional:
del fields[has_field_name(name)]
fields[name]['rule'] = 'optional'
# remove _emptyArray if it exists
fields.pop('_emptyArray', None)
# deduce protobuf types from descriptors
for properties in fields.values():
descriptor = properties['descriptor']
protobuf_type = descriptors.to_protobuf_type(descriptor)
properties.update(protobuf_type)
return fields
def get_tags_from_class(self, cls):
methods = [m for m in cls.get_methods() if m.get_name() == 'writeTo']
if len(methods) == 0:
return {}
method = self.workspace.vma.get_method(methods[0])
basic_blocks = method.basic_blocks.gets()
e = executor.WriteToExecutor()
for bb in basic_blocks:
for inst in bb.get_instructions():
e.run(inst)
return e.get_tags()
def analyze(self, cls):
# deduce fields
fields = self.get_fields_from_class(cls)
# deduce tags
tag_map = self.get_tags_from_class(cls)
for name, tag in tag_map.items():
assert name in fields
fields[name]['tag'] = tag
# check we got a tag for everything
for properties in fields.values():
assert 'tag' in properties
return fields
# main ---------------------------------------------------------
workspace = Reprotobuf.from_classes_dex(sys.argv[1])
workspace.process_classes()
workspace.structure_packages()
workspace.generate_code()
workspace.output()
| StarcoderdataPython |
3255449 | <gh_stars>1-10
import torch.nn as nn
import torch.nn.functional as F
from tkdet.layers import get_norm
from tkdet.layers import make_divisible
from .base import Backbone
from .build import BACKBONE_REGISTRY
__all__ = [
"MobileNetV2",
"mobilenetv2_1_0",
"mobilenetv2_0_75",
"mobilenetv2_0_5",
"mobilenetv2_0_25",
]
class ConvBNReLU(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, groups=1, norm="BN"):
padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=groups,
bias=False
),
get_norm(norm, out_channels),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, in_channels, out_channels, stride, expand_ratio, norm="BN"):
super().__init__()
assert stride in (1, 2)
self.stride = stride
hidden_dim = round(in_channels * expand_ratio)
self.use_res_connect = stride == 1 and in_channels == out_channels
layers = []
if expand_ratio != 1:
layers.append(ConvBNReLU(in_channels, hidden_dim, 1))
layers.extend(
[
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
nn.Conv2d(hidden_dim, out_channels, 1, bias=False),
get_norm(norm, out_channels)
]
)
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(Backbone):
"""
Implement MobileNet v2 (https://arxiv.org/abs/1801.04381).
"""
def __init__(
self,
multiplier=1.0,
inverted_residual_setting=None,
block=InvertedResidual,
norm="BN",
out_features=None,
num_classes=1000
):
super().__init__()
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1]
]
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError(
f"inverted_residual_setting should be non-empty or a 4-element list."
)
input_channel = make_divisible(input_channel * multiplier)
last_channel = make_divisible(last_channel * max(1.0, multiplier))
features = [ConvBNReLU(3, input_channel, stride=2, norm=norm)]
strides = 2
self._out_feature_strides = {"0": strides}
self._out_feature_channels = {"0": input_channel}
index = 1
for t, c, n, s in inverted_residual_setting:
output_channel = make_divisible(c * multiplier)
for i in range(n):
stride = s if i == 0 else 1
strides *= stride
features.append(block(input_channel, output_channel, stride, t, norm))
input_channel = output_channel
self._out_feature_strides[str(index)] = strides
self._out_feature_channels[str(index)] = output_channel
index += 1
features.append(ConvBNReLU(input_channel, last_channel, 1, norm=norm))
self.features = nn.Sequential(*features)
self._out_feature_channels[str(index)] = last_channel
self._out_feature_strides[str(index)] = strides
if not out_features:
out_features = ["linear"]
if "linear" in out_features and num_classes is not None:
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(last_channel, num_classes)
)
self._out_features = out_features
def forward(self, x):
outputs = {}
for idx, layer in enumerate(self.features):
x = layer(x)
if str(idx) in self._out_features:
outputs[str(idx)] = x
if "linear" in self._out_features:
x = F.adaptive_avg_pool2d(x, 1).reshape(x.shape[0], -1)
x = self.classifier(x)
outputs["linear"] = x
return outputs
def get_mobilenet_v2(multiplier, cfg):
out_features = cfg.MODEL.BACKBONE.OUT_FEATURES
norm = cfg.MOBILENET_V2.NORM
return MobileNetV2(multiplier, norm=norm, out_features=out_features)
@BACKBONE_REGISTRY.register("MobileNet-V2-1.0")
def mobilenetv2_1_0(cfg):
return get_mobilenet_v2(1.0, cfg)
@BACKBONE_REGISTRY.register("MobileNet-V2-0.75")
def mobilenetv2_0_75(cfg):
return get_mobilenet_v2(0.75, cfg)
@BACKBONE_REGISTRY.register("MobileNet-V2-0.5")
def mobilenetv2_0_5(cfg):
return get_mobilenet_v2(0.5, cfg)
@BACKBONE_REGISTRY.register("MobileNet-V2-0.25")
def mobilenetv2_0_25(cfg):
return get_mobilenet_v2(0.25, cfg)
| StarcoderdataPython |
4961504 | <filename>features/pages/main_page.py
from selenium.webdriver.common.by import By
from features.pages.base_page import Page
def compare_data_with_expected(expected, real):
assert expected == real, "Expected '{}', but got '{}'".format(expected, real)
class MainPage(Page):
USER_ICON_LIST = (By.ID, "com.instagram.android:id/username")
TAB_BAR = (By.ID, "com.instagram.android:id/tab_bar")
LINER_BAR = (By.CLASS_NAME, "android.widget.FrameLayout")
def verify_main_page_is_open(self):
user_icon_list = self.find_elements(self.USER_ICON_LIST)
compare_data_with_expected(expected=5, real=len(user_icon_list))
compare_data_with_expected(expected="Your Story", real=user_icon_list[0].text)
tab_bar = self.find_element(self.TAB_BAR)
liner_tab = tab_bar.find_elements(self.LINER_BAR[0], self.LINER_BAR[1])
compare_data_with_expected(expected=5, real=len(liner_tab))
compare_data_with_expected(expected="Home", real=liner_tab[0].get_attribute("content-desc"))
compare_data_with_expected(expected="Search and Explore", real=liner_tab[1].get_attribute("content-desc"))
compare_data_with_expected(expected="Camera", real=liner_tab[2].get_attribute("content-desc"))
compare_data_with_expected(expected="Activity", real=liner_tab[3].get_attribute("content-desc"))
compare_data_with_expected(expected="Profile", real=liner_tab[4].get_attribute("content-desc"))
| StarcoderdataPython |
11226349 | '''Module for executable menu.'''
from collections import deque
import re
import webbrowser
from config import CMD_PROMPTS, OUTPUTS, YES, NO, HEADERS
class Action:
'''Superclass for menu actions'''
def __init__(self, io, item_service, action=None):
self._io = io
self._item_service = item_service
if action is None:
self._cmds = []
else:
self._cmds = CMD_PROMPTS[action]
def perform(self):
"""Performs the action and returns a boolean value
depending on whether the app should be running after
execution.
"""
self._io.write(OUTPUTS['unknown command'])
return True
def _get_info(self, prompt, error_msg):
"""Internal method to read and write info from/to the console."""
adding = True
while adding:
info = self._io.read(prompt)
if not info:
self._io.write(error_msg)
else:
adding = False
return info
def _list(self):
"""Writes all items to the console, tabulated.
Truncates over 20 character long fields.
return:
ids: list: list of item ids
"""
items = self._item_service.list_by_type_alphabetically()
if items:
items = deque([
[field if len(field) <= 20 else f"{field[:20]}..." for field in item]
for item in items])
ids = [item[1] for item in items]
items.appendleft(HEADERS)
self._io.write(items, True)
else:
ids = []
self._io.write(OUTPUTS["empty list"])
return ids
def _show_details(self, item):
"""Prints detailed info of an item.
Open url of an item in a new browser tab.
"""
if item:
info = [[key, val] for key, val in item.items()]
self._io.write(info, True)
url = ''
if info[0][1] == 'blog':
if 'http' not in info[4][1]:
url = 'https://' + info[4][1]
else:
url = info[4][1]
webbrowser.open(url)
if info[0][1] == 'video':
if 'http' not in info[3][1]:
url = 'https://' + info[3][1]
else:
url = info[3][1]
webbrowser.open(url)
else:
self._io.write(OUTPUTS['broken input'])
class Add(Action):
'''Superclass for add actions'''
def __init__(self, io, item_service, action):
super().__init__(io, item_service, action)
self._action = action
def perform(self):
'''Adds an item based on user input.'''
item = []
for cmd in self._cmds:
item.append(self._get_info(*cmd))
added = self._item_service.create_item(self._action, item)
if not added:
self._io.write(OUTPUTS["already in list"])
else:
self._io.write(OUTPUTS["added"])
return True
class AddBook(Add):
'''Subclass for adding books.'''
def __init__(self, io, item_service):
super().__init__(io, item_service, 'book')
class AddBlog(Add):
'''Subclass for adding blog posts.'''
def __init__(self, io, item_service):
super().__init__(io, item_service, 'blog')
class AddVideo(Add):
'''Subclass for adding videos.'''
def __init__(self, io, item_service):
super().__init__(io, item_service, 'video')
class List(Action):
'''Action for listing item information.'''
def __init__(self, io, item_service):
super().__init__(io, item_service)
def perform(self):
'''Performs the action.'''
self._io.write(OUTPUTS['list'])
self._list()
return True
class Delete(Action):
"""Action for deleting an item."""
def __init__(self, io, item_service):
super().__init__(io, item_service, 'delete')
def perform(self):
'''Performs the delete action.'''
self._io.write(OUTPUTS['list'])
items = self._list()
if items:
prompt, error_msg = self._cmds[0]
item = self._get_info(prompt, error_msg)
if item not in items:
self._io.write(OUTPUTS['item not found'])
else:
self._delete_item(item, items)
return True
def _delete_item(self, item, items):
'''Deletes an item.
args:
item: str: identifier for an item
items: list: list of items currently in the repository.
'''
for i in range(len(items)):
if items[i] == item:
self._confirm(item)
def _confirm(self, item):
'''Confirms the deletion of the item.'''
while True:
choice = self._io.read(OUTPUTS['confirm'])
if choice.upper() == YES:
self._item_service.delete_item(item)
self._io.write(OUTPUTS['deleting'])
return
if choice.upper() == NO:
self._io.write(OUTPUTS['not deleted'])
return
class Clear(Action):
'''Action for clearing all items and creating an empty document.'''
def __init__(self, io, item_service):
super().__init__(io, item_service, 'clear')
def perform(self):
'''Confirms the clear action.'''
self._io.write(OUTPUTS['list'])
choice = self._io.read(OUTPUTS['confirm_clearing'])
if choice.upper() == YES:
self._item_service.clear()
self._io.write(OUTPUTS['clearing'])
elif choice.upper() == NO:
self._io.write(OUTPUTS['not cleared'])
else:
self._io.write(OUTPUTS['unknown command'])
return True
class Details(Action):
'''Action for showing the details of an item.'''
def __init__(self, io, item_service):
super().__init__(io, item_service, 'details')
def perform(self):
'''Lists all items and then fetches the details.'''
ids = self._list()
found_item = None
if ids:
prompt, error_msg = self._cmds[0]
item_id = self._get_info(prompt, error_msg)
if item_id not in ids:
self._io.write(OUTPUTS['item not found'])
else:
found_item = self._item_service.find_by_id(item_id)
self._io.write(OUTPUTS['details results'])
self._show_details(found_item)
return True
class Search(Action):
'''Action for searching specific items.'''
def __init__(self, io, item_service):
super().__init__(io, item_service, 'search')
def perform(self):
'''Performs a search with a search word provided by the user.'''
self._io.write(OUTPUTS['search help'])
items = self._item_service.list_by_type_alphabetically()
if items:
results = self._search(items)
if results:
self._io.write(OUTPUTS['search results'])
results = deque([
[field if len(field) <= 20 else f"{field[:20]}..." for field in item]
for item in results])
results.appendleft(HEADERS)
self._io.write(results, True)
else:
self._io.write(OUTPUTS['item not found'])
else:
self._io.write(OUTPUTS["empty list"])
return True
def _search(self, items):
'''Internal method for performing a search.'''
results = deque()
prompt, error_msg = self._cmds[0]
search_word = str(self._get_info(prompt, error_msg))
for item in items:
result = re.findall(search_word, str(item[2:]), re.IGNORECASE)
if result:
results.append(item)
return results
class Quit(Action):
'''Action for quitting the application.'''
def __init__(self, io, item_service):
super().__init__(io, item_service)
def perform(self):
'''Quits the application.'''
self._io.write(OUTPUTS['quit'])
return False
| StarcoderdataPython |
11321206 | <gh_stars>0
import numpy as np
import pickle
import tensorflow as tf
#init random seed
np.random.seed(5)
print(tf.__version__)
print("#### load matrix from pickle")
print()
print("#### build item information matrix of citeulike-a by bag of word")
# find vocabulary_size = 8000
tag_id_to_index = {}
with open(r"last.fm/tags.dat", encoding="latin1") as tag_file:
lines = tag_file.readlines()[1:]
tag_size = len(lines)
for index,line in enumerate(lines):
tag_id = int(line.strip().split("\t")[0])
tag_id_to_index[tag_id] = index
# find item_size = 16980
artist_id_to_index = {}
with open(r"last.fm/artists.dat", encoding="latin1") as artists_file:
lines = artists_file.readlines()[1:]
artist_size = len(lines)
for index,line in enumerate(lines):
artist_id = int(line.strip().split("\t")[0])
artist_id_to_index[artist_id] = index
# initialize item_infomation_matrix (16980 , 8000)
artist_infomation_matrix = np.zeros((artist_size, tag_size))
# build item_infomation_matrix
with open(r"last.fm/user_taggedartists.dat", encoding="latin1") as artist_tag_file:
lines = artist_tag_file.readlines()[1:]
for line in lines:
artist_id = int(line.strip().split("\t")[1])
tag_id = int(line.strip().split("\t")[2])
if artist_id in artist_id_to_index:
artist_infomation_matrix[artist_id_to_index[artist_id]][tag_id_to_index[tag_id]] += 1
print("#### build rating matrix last.fm")
with open(r"last.fm/user_artists.dat", encoding="latin1") as rating_file:
rating_file_new = open("last.fm/ratings.dat", "w", encoding="latin1")
lines = rating_file.readlines()[1:]
for line in lines:
user = int(line.strip().split("\t")[0])
artist_id = int(line.strip().split("\t")[1])
if artist_id in artist_id_to_index:
rating_file_new.write("{user}::{artist}::{rate}::{timestamp}\n"
.format(user=user, artist=artist_id_to_index[artist_id], rate=1, timestamp=0))
rating_file_new.flush()
rating_file_new.close()
# clean rating
with open(r"last.fm/ratings.dat", encoding="latin1") as rating_file:
clean_rating_file = open("last.fm/clean_ratings.dat", "w", encoding="latin1")
_user = "x"
_user_count = 0
write_lines = []
lines = rating_file.readlines()
_user_index = 0
for line in lines:
cur_user = line.split("::")[0]
if cur_user == _user:
_user_count += 1
split_line = line.split("::")
split_line[0] = str(_user_index)
line = split_line[0] + "\t" + split_line[1] + "\t" + split_line[2] + "\t" + \
split_line[3]
write_lines += [line]
else:
print(cur_user, _user, _user_count)
if _user_count >= 20:
_user_index += 1
np.random.shuffle(write_lines)
clean_rating_file.write("".join(write_lines))
clean_rating_file.flush()
else:
None
_user = cur_user
_user_count = 1
split_line = line.split("::")
split_line[0] = str(_user_index)
line = split_line[0] + "\t" + split_line[1] + "\t" + split_line[2] + "\t" + \
split_line[3]
write_lines = [line]
if _user_count >= 20:
np.random.shuffle(write_lines)
clean_rating_file.write("".join(write_lines))
clean_rating_file.flush()
clean_rating_file.close()
user_size = _user_index + 1
# split train \ val rating
for p in [1, -10]:
with open(r"last.fm/clean_ratings.dat", encoding="latin1") as clean_rating_file:
clean_rating_tr_file = open("last.fm/clean_ratings_tr_p{p}.dat".format(p=p), "w", encoding="latin1")
clean_rating_val_file = open("last.fm/clean_ratings_val_p{p}.dat".format(p=p), "w", encoding="latin1")
clean_rating_neg_file = open("last.fm/clean_ratings_neg_p{p}.dat".format(p=p), "w", encoding="latin1")
lines = clean_rating_file.readlines()
_user = lines[0].split("\t")[0]
write_lines = []
negs = np.arange(artist_size)
np.random.shuffle(negs)
negs = list(negs)
for line in lines:
cur_user = line.split("\t")[0]
if cur_user == _user:
if int(line.split("\t")[1]) in negs:
write_lines += [line]
negs.remove(int(line.split("\t")[1]))
else:
print(cur_user, _user)
neg_line = "".join(["{}\t".format(cur_user)] + ["{}\t".format(neg) for neg in negs[:99]] + [
"{}\n".format(negs[100])])
clean_rating_tr_file.write("".join(write_lines[:-p]))
clean_rating_tr_file.flush()
clean_rating_val_file.write("".join(write_lines[-p:]))
clean_rating_val_file.flush()
clean_rating_neg_file.write(neg_line)
clean_rating_neg_file.flush()
_user = cur_user
write_lines = [line]
negs = np.arange(artist_size)
np.random.shuffle(negs)
negs = list(negs)
clean_rating_tr_file.write("".join(write_lines[:-p]))
clean_rating_tr_file.flush()
clean_rating_tr_file.close()
clean_rating_val_file.write("".join(write_lines[-p:]))
clean_rating_val_file.flush()
clean_rating_val_file.close()
neg_line = "".join(
["{}\t".format(cur_user)] + ["{}\t".format(neg) for neg in negs[:99]] + ["{}\n".format(negs[100])])
clean_rating_neg_file.write(neg_line)
clean_rating_neg_file.flush()
clean_rating_neg_file.close()
rating_matrix = np.zeros((user_size, artist_size))
with open(r"last.fm/clean_ratings_tr_p{p}.dat".format(p=p), encoding="latin1") as clean_rating_tr_file:
lines = clean_rating_tr_file.readlines()
for line in lines:
u_i = int(line.split("\t")[0])
m_i = int(line.split("\t")[1])
rating_matrix[u_i][m_i] = 1
with open(r'last.fm/rating_matrix_p{p}.pickle'.format(p=p), 'wb') as handle:
pickle.dump(rating_matrix, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(r'last.fm/item_infomation_matrix.pickle', 'wb') as handle:
pickle.dump(artist_infomation_matrix, handle, protocol=pickle.HIGHEST_PROTOCOL)
from shutil import copyfile
copyfile("last.fm/clean_ratings_tr_p1.dat", "last.fm/last.fm.hr-ndcg.train.rating")
copyfile("last.fm/clean_ratings_val_p1.dat", "last.fm/last.fm.hr-ndcg.test.rating")
copyfile("last.fm/clean_ratings_neg_p1.dat", "last.fm/last.fm.hr-ndcg.test.negative")
copyfile("last.fm/clean_ratings_tr_p-10.dat", "last.fm/last.fm.precision-recall.train.rating")
copyfile("last.fm/clean_ratings_val_p-10.dat", "last.fm/last.fm.precision-recall.test.rating")
copyfile("last.fm/clean_ratings_neg_p-10.dat", "last.fm/last.fm.precision-recall.test.negative")
| StarcoderdataPython |
5084453 | #!/usr/bin/env python
# Copyright (c) 2013, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# Redistributions of source code must retain the aframeve copyright notice, this list of conditions and the
# following disclaimer.
# Redistributions in binary form must reproduce the aframeve copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# this script exports xls-files from a canmatrix-object
# xls-files are the can-matrix-definitions displayed in Excel
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from builtins import *
import sys
import os.path
import codecs
import xlwt
import logging
from canmatrix.xls_common import *
logger = logging.getLogger('root')
from .canmatrix import *
import xlrd
# Font Size : 8pt * 20 = 160
#font = 'font: name Arial Narrow, height 160'
font = 'font: name Verdana, height 160'
if xlwt is not None:
sty_header = xlwt.easyxf(font + ', bold on; align: vertical center, horizontal center',
'pattern: pattern solid, fore-colour rose')
sty_norm = xlwt.easyxf(font + ', colour black')
sty_first_frame = xlwt.easyxf(font + ', colour black; borders: top thin')
sty_white = xlwt.easyxf(font + ', colour white')
# BUMatrix-Styles
sty_green = xlwt.easyxf('pattern: pattern solid, fore-colour light_green')
sty_green_first_frame = xlwt.easyxf(
'pattern: pattern solid, fore-colour light_green; borders: top thin')
sty_sender = xlwt.easyxf('pattern: pattern 0x04, fore-colour gray25')
sty_sender_first_frame = xlwt.easyxf(
'pattern: pattern 0x04, fore-colour gray25; borders: top thin')
sty_sender_green = xlwt.easyxf(
'pattern: pattern 0x04, fore-colour gray25, back-colour light_green')
sty_sender_green_first_frame = xlwt.easyxf(
'pattern: pattern 0x04, fore-colour gray25, back-colour light_green; borders: top thin')
def writeBuMatrix(buList, sig, frame, worksheet, row, col, firstframe):
# first-frame - style with borders:
if firstframe == sty_first_frame:
norm = sty_first_frame
sender = sty_sender_first_frame
norm_green = sty_green_first_frame
sender_green = sty_sender_green_first_frame
# consecutive-frame - style without borders:
else:
norm = sty_norm
sender = sty_sender
norm_green = sty_green
sender_green = sty_sender_green
# iterate over boardunits:
for bu in buList:
# every second Boardunit with other style
if col % 2 == 0:
locStyle = norm
locStyleSender = sender
# every second Boardunit with other style
else:
locStyle = norm_green
locStyleSender = sender_green
# write "s" "r" "r/s" if signal is sent, recieved or send and recived
# by boardunit
if sig and bu in sig.receiver and bu in frame.transmitter:
worksheet.write(row, col, label="r/s", style=locStyleSender)
elif sig and bu in sig.receiver:
worksheet.write(row, col, label="r", style=locStyle)
elif bu in frame.transmitter:
worksheet.write(row, col, label="s", style=locStyleSender)
else:
worksheet.write(row, col, label="", style=locStyle)
col += 1
# loop over boardunits ends here
return col
def writeExcelLine(worksheet, row, col, rowArray, style):
for item in rowArray:
worksheet.write(row, col, label=item, style=style)
col += 1
return col
def dump(db, file, **options):
head_top = ['ID', 'Frame Name', 'Cycle Time [ms]', 'Launch Type', 'Launch Parameter', 'Signal Byte No.', 'Signal Bit No.',
'Signal Name', 'Signal Function', 'Signal Length [Bit]', 'Signal Default', ' Signal Not Available', 'Byteorder']
head_tail = ['Value', 'Name / Phys. Range', 'Function / Increment Unit']
if "additionalAttributes" in options and len(options["additionalAttributes"]) > 0:
additionalSignalCollums = options["additionalAttributes"].split(",")
else:
additionalSignalCollums = []#["attributes['DisplayDecimalPlaces']"]
if "additionalFrameAttributes" in options and len(options["additionalFrameAttributes"]) > 0:
additionalFrameCollums = options["additionalFrameAttributes"].split(",")
else:
additionalFrameCollums = []#["attributes['DisplayDecimalPlaces']"]
if 'xlsMotorolaBitFormat' in options:
motorolaBitFormat = options["xlsMotorolaBitFormat"]
else:
motorolaBitFormat = "msbreverse"
workbook = xlwt.Workbook(encoding='utf8')
# wsname = os.path.basename(filename).replace('.xls', '')
# worksheet = workbook.add_sheet('K-Matrix ' + wsname[0:22])
worksheet = workbook.add_sheet('K-Matrix ')
rowArray = []
col = 0
# write frameardunits in first row:
buList = []
for bu in db.boardUnits:
buList.append(bu.name)
rowArray += head_top
head_start = len(rowArray)
rowArray += buList
for col in range(0,len(rowArray)):
worksheet.col(col).width = 1111
tail_start = len(rowArray)
rowArray += head_tail
additionalFrame_start = len(rowArray)
for col in range(tail_start, len(rowArray)):
worksheet.col(col).width = 3333
for additionalCol in additionalFrameCollums:
rowArray.append("frame." + additionalCol)
col += 1
for additionalCol in additionalSignalCollums:
rowArray.append("signal." + additionalCol)
col += 1
writeExcelLine(worksheet, 0, 0, rowArray,sty_header)
# set width of selected Cols
worksheet.col(1).width = 5555
worksheet.col(3).width = 3333
worksheet.col(7).width = 5555
worksheet.col(8).width = 7777
worksheet.col(head_start).width = 1111
worksheet.col(head_start + 1).width = 5555
frameHash = {}
logger.debug("DEBUG: Length of db.frames is %d" % len(db.frames))
for frame in db.frames:
if frame.is_complex_multiplexed:
logger.error("export complex multiplexers is not supported - ignoring frame " + frame.name)
continue
frameHash[int(frame.id)] = frame
# set row to first Frame (row = 0 is header)
row = 1
# iterate over the frames
for idx in sorted(frameHash.keys()):
frame = frameHash[idx]
framestyle = sty_first_frame
# sort signals:
sigHash = {}
for sig in frame.signals:
sigHash["%02d" % int(sig.getStartbit()) + sig.name] = sig
# set style for first line with border
sigstyle = sty_first_frame
additionalFrameInfo = []
for frameInfo in additionalFrameCollums:
try:
temp = eval("frame." + frameInfo)
except:
temp = ""
additionalFrameInfo.append(temp)
# iterate over signals
rowArray = []
if len(sigHash) == 0: # Frames without signals
rowArray += getFrameInfo(db, frame)
for item in range(5, head_start):
rowArray.append("")
tempCol = writeExcelLine(worksheet, row, 0, rowArray, framestyle)
tempCol = writeBuMatrix(buList, None, frame, worksheet, row, tempCol , framestyle)
rowArray = []
for col in range(tempCol, additionalFrame_start):
rowArray.append("")
rowArray += additionalFrameInfo
for i in additionalSignalCollums:
rowArray.append("")
writeExcelLine(worksheet, row, tempCol, rowArray, framestyle)
row += 1
continue
# iterate over signals
for sig_idx in sorted(sigHash.keys()):
sig = sigHash[sig_idx]
# if not first Signal in Frame, set style
if sigstyle != sty_first_frame:
sigstyle = sty_norm
if sig.values.__len__() > 0: # signals with valuetable
valstyle = sigstyle
# iterate over values in valuetable
for val in sorted(sig.values.keys()):
rowArray = getFrameInfo(db, frame)
frontcol = writeExcelLine(worksheet, row, 0, rowArray, framestyle)
if framestyle != sty_first_frame:
worksheet.row(row).level = 1
col = head_start
col = writeBuMatrix(buList, sig, frame, worksheet, row, col, framestyle)
# write Value
(frontRow, backRow) = getSignal(db, sig, motorolaBitFormat)
writeExcelLine(worksheet, row, frontcol, frontRow, sigstyle)
backRow += additionalFrameInfo
for item in additionalSignalCollums:
try:
temp = eval("sig." + item)
except:
temp = ""
backRow.append(temp)
writeExcelLine(worksheet, row, col + 2, backRow, sigstyle)
writeExcelLine(worksheet, row, col, [val, sig.values[val]], valstyle)
# no min/max here, because min/max has same col as values...
# next row
row += 1
# set style to normal - without border
sigstyle = sty_white
framestyle = sty_white
valstyle = sty_norm
# loop over values ends here
# no valuetable available
else:
rowArray = getFrameInfo(db, frame)
frontcol = writeExcelLine(worksheet, row, 0, rowArray, framestyle)
if framestyle != sty_first_frame:
worksheet.row(row).level = 1
col = head_start
col = writeBuMatrix(
buList, sig, frame, worksheet, row, col, framestyle)
(frontRow,backRow) = getSignal(db,sig,motorolaBitFormat)
writeExcelLine(worksheet, row, frontcol, frontRow, sigstyle)
if float(sig.min) != 0 or float(sig.max) != 1.0:
backRow.insert(0,str("%g..%g" %(sig.min,sig.max)))
else:
backRow.insert(0, "")
backRow.insert(0,"")
for item in additionalSignalCollums:
try:
temp = eval("sig." + col)
except:
temp = ""
backRow += additionalFrameInfo
for item in additionalSignalCollums:
try:
temp = eval("sig." + item)
except:
temp = ""
backRow.append(temp)
writeExcelLine(worksheet, row, col, backRow, sigstyle)
# next row
row += 1
# set style to normal - without border
sigstyle = sty_white
framestyle = sty_white
# reset signal-Array
signals = []
# loop over signals ends here
# loop over frames ends here
# frozen headings instead of split panes
worksheet.set_panes_frozen(True)
# in general, freeze after last heading row
worksheet.set_horz_split_pos(1)
worksheet.set_remove_splits(True)
# save file
workbook.save(file)
def load(file, **options):
if 'xlsMotorolaBitFormat' in options:
motorolaBitFormat = options["xlsMotorolaBitFormat"]
else:
motorolaBitFormat = "msbreverse"
wb = xlrd.open_workbook(file_contents=file.read())
sh = wb.sheet_by_index(0)
db = CanMatrix()
# Defines not imported...
# db.addBUDefines("NWM-Stationsadresse", 'HEX 0 63')
# db.addBUDefines("NWM-Knoten", 'ENUM "nein","ja"')
db.addFrameDefines("GenMsgCycleTime", 'INT 0 65535')
db.addFrameDefines("GenMsgDelayTime", 'INT 0 65535')
db.addFrameDefines("GenMsgCycleTimeActive", 'INT 0 65535')
db.addFrameDefines("GenMsgNrOfRepetitions", 'INT 0 65535')
# db.addFrameDefines("GenMsgStartValue", 'STRING')
launchTypes = []
# db.addSignalDefines("GenSigStartValue", 'HEX 0 4294967295')
db.addSignalDefines("GenSigSNA", 'STRING')
# eval search for correct collums:
index = {}
for i in range(sh.ncols):
value = sh.cell(0, i).value
if value == "ID":
index['ID'] = i
elif "Frame Name" in value:
index['frameName'] = i
elif "Cycle" in value:
index['cycle'] = i
elif "Launch Type" in value:
index['launchType'] = i
elif "Launch Parameter" in value:
index['launchParam'] = i
elif "Signal Byte No." in value:
index['startbyte'] = i
elif "Signal Bit No." in value:
index['startbit'] = i
elif "Signal Name" in value:
index['signalName'] = i
elif "Signal Function" in value:
index['signalComment'] = i
elif "Signal Length" in value:
index['signalLength'] = i
elif "Signal Default" in value:
index['signalDefault'] = i
elif "Signal Not Ava" in value:
index['signalSNA'] = i
elif "Value" in value:
index['Value'] = i
elif "Name / Phys" in value:
index['ValueName'] = i
elif "Function /" in value:
index['function'] = i
elif "Byteorder" in value:
index['byteorder'] = i
if "byteorder" in index:
index['BUstart'] = index['byteorder'] + 1
else:
index['BUstart'] = index['signalSNA'] + 1
index['BUend'] = index['Value']
# BoardUnits:
for x in range(index['BUstart'], index['BUend']):
db.boardUnits.add(BoardUnit(sh.cell(0, x).value))
# initialize:
frameId = None
signalName = ""
newBo = None
for rownum in range(1, sh.nrows):
# ignore empty row
if sh.cell(rownum, index['ID']).value.__len__() == 0:
break
# new frame detected
if sh.cell(rownum, index['ID']).value != frameId:
sender = []
# new Frame
frameId = sh.cell(rownum, index['ID']).value
frameName = sh.cell(rownum, index['frameName']).value
cycleTime = sh.cell(rownum, index['cycle']).value
launchType = sh.cell(rownum, index['launchType']).value
dlc = 8
launchParam = sh.cell(rownum, index['launchParam']).value
try:
launchParam = str(int(launchParam))
except:
launchParam = "0"
if frameId.endswith("xh"):
newBo = Frame(frameName, Id=int(frameId[:-2], 16), dlc=dlc, extended = True)
else:
newBo = Frame(frameName, Id=int(frameId[:-1], 16), dlc=dlc)
db.frames.addFrame(newBo)
# eval launctype
if launchType is not None:
newBo.addAttribute("GenMsgSendType", launchType)
if launchType not in launchTypes:
launchTypes.append(launchType)
# eval cycletime
try:
cycleTime = int(cycleTime)
except:
cycleTime = 0
newBo.addAttribute("GenMsgCycleTime", str(int(cycleTime)))
# new signal detected
if sh.cell(rownum, index['signalName']).value != signalName and len(sh.cell(rownum, index['signalName']).value)>0:
# new Signal
receiver = []
startbyte = int(sh.cell(rownum, index['startbyte']).value)
startbit = int(sh.cell(rownum, index['startbit']).value)
signalName = sh.cell(rownum, index['signalName']).value
signalComment = sh.cell(
rownum, index['signalComment']).value.strip()
signalLength = int(sh.cell(rownum, index['signalLength']).value)
signalDefault = sh.cell(rownum, index['signalDefault']).value
signalSNA = sh.cell(rownum, index['signalSNA']).value
multiplex = None
if signalComment.startswith('Mode Signal:'):
multiplex = 'Multiplexor'
signalComment = signalComment[12:]
elif signalComment.startswith('Mode '):
mux, signalComment = signalComment[4:].split(':', 1)
multiplex = int(mux.strip())
if "byteorder" in index:
signalByteorder = sh.cell(rownum, index['byteorder']).value
if 'i' in signalByteorder:
is_little_endian = True
else:
is_little_endian = False
else:
is_little_endian = True # Default Intel
is_signed = False
if signalName != "-":
for x in range(index['BUstart'], index['BUend']):
if 's' in sh.cell(rownum, x).value:
newBo.addTransmitter(sh.cell(0, x).value.strip())
if 'r' in sh.cell(rownum, x).value:
receiver.append(sh.cell(0, x).value.strip())
# if signalLength > 8:
newSig = Signal(signalName,
startBit=(startbyte - 1) * 8 + startbit,
signalSize=signalLength,
is_little_endian=is_little_endian,
is_signed=is_signed,
receiver=receiver,
multiplex=multiplex)
# else:
# newSig = Signal(signalName, (startbyte-1)*8+startbit, signalLength, is_little_endian, is_signed, 1, 0, 0, 1, "", receiver, multiplex)
if not is_little_endian:
# motorola
if motorolaBitFormat == "msb":
newSig.setStartbit(
(startbyte - 1) * 8 + startbit, bitNumbering=1)
elif motorolaBitFormat == "msbreverse":
newSig.setStartbit((startbyte - 1) * 8 + startbit)
else: # motorolaBitFormat == "lsb"
newSig.setStartbit(
(startbyte - 1) * 8 + startbit,
bitNumbering=1,
startLittle=True)
newBo.addSignal(newSig)
newSig.addComment(signalComment)
function = sh.cell(rownum, index['function']).value
value = str(sh.cell(rownum, index['Value']).value)
valueName = sh.cell(rownum, index['ValueName']).value
if valueName == 0:
valueName = "0"
elif valueName == 1:
valueName = "1"
test = valueName
#.encode('utf-8')
factor = 0
unit = ""
factor = sh.cell(rownum, index['function']).value
if type(factor).__name__ == "unicode" or type(
factor).__name__ == "str":
factor = factor.strip()
if " " in factor and factor[0].isdigit():
(factor, unit) = factor.strip().split(" ", 1)
factor = factor.strip()
unit = unit.strip()
newSig.unit = unit
try:
newSig.factor = float(factor)
except:
logger.warn(
"Some error occurred while decoding scale: Signal: %s; \"%s\"" %
(signalName, sh.cell(
rownum, index['function']).value))
else:
unit = factor.strip()
newSig.unit = unit
newSig.factor = 1
if ".." in test:
(mini, maxi) = test.strip().split("..", 2)
unit = ""
try:
newSig.offset = float(mini)
newSig.min = float(mini)
newSig.max = float(maxi)
except:
newSig.offset = 0
elif valueName.__len__() > 0:
if value.strip().__len__() > 0:
value = int(float(value))
newSig.addValues(value, valueName)
maxi = pow(2, signalLength) - 1
newSig.max = float(maxi)
else:
newSig.offset = 0
for frame in db.frames:
frame.updateReceiver()
frame.calcDLC()
launchTypeEnum = "ENUM"
for launchType in launchTypes:
if len(launchType) > 0:
launchTypeEnum += ' "' + launchType + '",'
db.addFrameDefines("GenMsgSendType", launchTypeEnum[:-1])
db.setFdType()
return db
| StarcoderdataPython |
9601825 | <filename>fn_pagerduty/tests/test_resilient_common.py
# (c) Copyright IBM Corp. 2010, 2021. All Rights Reserved.
import pytest
from fn_pagerduty.lib import resilient_common
def test_merge_two_dicts():
a = {"k1": "v1"}
b = {"k2": "v2"}
expected_result = {
"k1": "v1",
"k2": "v2"
}
result = resilient_common.merge_two_dicts(a, b)
for e in expected_result:
assert e in result
| StarcoderdataPython |
1702217 | <gh_stars>0
# This script will query Twitter for the latest tweets using a given hashtag
# Based on each run it will commit the last analyzed tweet to disk, so we
# can look for all newer tweets after that.
# GOTCHAS: If 150 new tweets come in since the last analyzed tweet, we may
# have a gap in tweets we can review. This problem doesn't exist until we make
# our hashtags popular though.
import logging
import os
from configparser import ConfigParser
from pprint import pprint
import tweepy
from brain import Brain
cfg = ConfigParser()
cfg.read(f"{os.path.dirname(os.path.realpath(__file__))}/config.ini")
consumer_key = cfg["twitter"]["consumer_key"]
consumer_secret = cfg["twitter"]["consumer_secret"]
access_token = cfg["twitter"]["access_token"]
access_token_secret = cfg["twitter"]["access_token_secret"]
TAGS = [htag.strip() for htag in cfg["twitter"]["hashtags"].split(",")]
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
API = tweepy.API(auth)
LAST_TWEET_FILE = (
f"{os.path.dirname(os.path.realpath(__file__))}/last_known_retweet.txt"
)
BRAIN = Brain(LAST_TWEET_FILE)
FORMAT = "%(asctime)-15s:%(levelname)s:%(pathname)s:%(funcName)s:%(lineno)d: %(message)s"
logging.basicConfig(format=FORMAT, datefmt="%Y-%m-%d:%H:%M:%S")
logger = logging.getLogger(__name__)
def _is_tweet_toplevel(tweet):
"""is_tweet_toplevel detects if a tweet is a top level tweet.
Top level tweets are identified by not being a response to another tweet,
or being a retweet of another tweet.
Args:
tweet ([Status]): A tweepy Status object
Returns:
bool
"""
reply_to = tweet.in_reply_to_status_id
retweet_of = getattr(tweet, "retweeted_status", None)
# If the tweet is a reply to another tweet, or a retweet of a different
# tweet, it is not top level
if reply_to or retweet_of:
return False
return True
def _retweet(tweet):
tweet.retweet()
return None
def _get_sinceid_for_tag(tag):
return BRAIN[tag].get("id_str", None)
def _set_tag_to_tweet(tag, tweet):
BRAIN[tag] = tweet
def _save_brain():
BRAIN.save_file()
def retweet_tags():
for tag in TAGS:
print(f"Now searching for tag: {tag}")
last_tweet_id = _get_sinceid_for_tag(tag)
public_tweets = API.search(q=tag, count=500, since_id=last_tweet_id)
for tweet in reversed(public_tweets):
print("======")
print(f"{tweet.author.name}: ")
print(f"{len(tweet.text)}")
print(_is_tweet_toplevel(tweet))
if _is_tweet_toplevel(tweet):
try:
_retweet(tweet)
print("I retweeted that")
except Exception as e:
print(f"I got {e}")
pass
print("------")
# Done looking at tweet, now update our brain
print(f"Updating brainfile to tweet id: {tweet.id}")
_set_tag_to_tweet(tag, tweet._json)
_save_brain()
print("Brain saved, moving on")
print("Loop done, good-bye!")
#################
# class Retweeter:
# def __init__(self, tweepyapi, exact_tags=None, like_tags=None, brain=None):
# self.api = tweepyapi
# self.exact_tags = exact_tags
# self.like_tags = like_tags
# if self.like_tags:
# logger.warning("Retweeting like tags is currently not supported")
# def search_for_string(self, string, since_id=None):
# result = self.api.search(q=string, count=500, since_id=since_id)
# return reversed([Tweet(tweet) for tweet in result])
# class Tweet(tweepy.Tweet):
# def __init__(self, tweet):
# self.tweet = tweet
# def functional_section():
# myrt = Retweeter(api, exact_tags=['1', '2'])
# mybrain = Brain(LAST_TWEET_FILE)
# for tag in ["#ValorantLFG", "#ValorantLFM"]:
# myrt.search_for_string(tag, since_id=mybrain[tag].get("id_str", None)
# ###
# # myretweeter = retweeter('#ValorantLFG', '#ValorantLFM')
# # retweet_tags(*tags)
| StarcoderdataPython |
3289390 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from flask.ext.wtf import Form
from wtforms import StringField, BooleanField, SubmitField
from wtforms.validators import Required, Length, EqualTo
from wtforms import ValidationError
from flask.ext.babel import lazy_gettext as _
| StarcoderdataPython |
6446985 | <gh_stars>1-10
__all__ = []
from .resnet import ResNetTorch
from .vgg import VGGTorch
from .alexnet import AlexNetTorch
from .squeezenet import SqueezeNetTorch
from .densenet import DenseNetTorch
from .mobilenet import MobileNetV2Torch
from .resnext import ResNeXtTorch
from .seblocks import SEBasicBlockTorch, SEBottleneckTorch, \
SEBottleneckXTorch
from .unet import UNetTorch, LinkNetTorch
__all__ += [
"AlexNetTorch",
"DenseNetTorch",
"LinkNetTorch",
"MobileNetV2Torch",
"ResNetTorch",
"ResNeXtTorch",
"SEBasicBlockTorch",
"SEBottleneckTorch",
"SEBottleneckXTorch",
"SqueezeNetTorch",
"UNetTorch",
"VGGTorch",
] | StarcoderdataPython |
2432 | <filename>yue/core/explorer/ftpsource.py
from ftplib import FTP,error_perm, all_errors
import posixpath
from io import BytesIO,SEEK_SET
from .source import DataSource
import sys
import re
reftp = re.compile('(ssh|ftp)\:\/\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\/(.*)')
def parseFTPurl( url ):
m = reftp.match( url )
if m:
g = m.groups()
result = {
"mode" : g[0],
"username" : g[2] or "",
"password" : g[3] or "",
"hostname" : g[4] or "",
"port" : int(g[5][1:]) if g[5] else 0,
"path" : g[6] or "/",
}
if result['port'] == 0:
if result['mode'] == ssh:
result['port'] = 22
else:
result['port'] = 21 # ftp port default
return result
raise ValueError("invalid: %s"%url)
def utf8_fix(s):
return ''.join([ a if ord(a)<128 else "%02X"%ord(a) for a in s])
class FTPWriter(object):
"""docstring for FTPWriter"""
def __init__(self, ftp, path):
super(FTPWriter, self).__init__()
self.ftp = ftp
self.path = path
self.file = BytesIO()
def write(self,data):
return self.file.write(data)
def seek(self,pos,whence=SEEK_SET):
return self.file.seek(pos,whence)
def tell(self):
return self.file.tell()
def close(self):
self.file.seek(0)
text = "STOR " + utf8_fix(self.path)
self.ftp.storbinary(text, self.file)
def __enter__(self):
return self
def __exit__(self,typ,val,tb):
if typ is None:
self.close()
class FTPReader(object):
"""docstring for FTPWriter"""
def __init__(self, ftp, path):
super(FTPReader, self).__init__()
self.ftp = ftp
self.path = path
self.file = BytesIO()
# open the file
text = "RETR " + utf8_fix(self.path)
self.ftp.retrbinary(text, self.file.write)
self.file.seek(0)
def read(self,n=None):
return self.file.read(n)
def seek(self,pos,whence=SEEK_SET):
return self.file.seek(pos,whence)
def tell(self):
return self.file.tell()
def close(self):
self.file.close()
def __enter__(self):
return self
def __exit__(self,typ,val,tb):
if typ is None:
self.close()
class FTPSource(DataSource):
"""
there is some sort of problem with utf-8/latin-1 and ftplib
storbinary must accepts a STRING, since it builds a cmd and add
the CRLF to the input argument using the plus operator.
the command fails when given unicode text (ord > 127) and also
fails whenm given a byte string.
"""
# TODO: turn this into a directory generator
# which first loads the directory, then loops over
# loaded items.
# TODO: on windows we need a way to view available
# drive letters
def __init__(self, host, port, username="", password=""):
super(FTPSource, self).__init__()
self.ftp = FTP()
self.ftp.connect(host,port)
self.ftp.login(username,password)
self.hostname = "%s:%d"%(host,port)
def root(self):
return "/"
def close(self):
try:
self.ftp.quit()
except all_errors as e:
sys.stderr.write("Error Closing FTP connection\n")
sys.stderr.write("%s\n"%e)
super().close()
def fix(self, path):
return utf8_fix(path)
def join(self,*args):
return posixpath.join(*args)
def breakpath(self,path):
return [ x for x in path.replace("/","\\").split("\\") if x ]
def relpath(self,path,base):
return posixpath.relpath(path,base)
def normpath(self,path,root=None):
if root and not path.startswith("/"):
path = posixpath.join(root,path)
return posixpath.normpath( path )
def listdir(self,path):
return self.ftp.nlst(path)
def parent(self,path):
# TODO: if path is C:\\ return empty string ?
# empty string returns drives
p,_ = posixpath.split(path)
return p
def move(self,oldpath,newpath):
self.ftp.rename(oldpath,newpath)
def delete(self,path):
# todo support removing directory rmdir()
path = utf8_fix(path)
if self.exists( path ):
if self.isdir(path):
try:
self.ftp.rmd(path)
except Exception as e:
print("ftp delete error: %s"%e)
else:
try:
self.ftp.delete(path)
except Exception as e:
print("ftp delete error: %s"%e)
def open(self,path,mode):
if mode=="wb":
return FTPWriter(self.ftp,path)
elif mode=="rb":
return FTPReader(self.ftp,path)
raise NotImplementedError(mode)
def exists(self,path):
path = utf8_fix(path)
p,n=posixpath.split(path)
lst = set(self.listdir(p))
return n in lst
def isdir(self,path):
path = utf8_fix(path)
try:
return self.ftp.size(path) is None
except error_perm:
# TODO: to think about more later,
# under my use-case, I'm only asking if a path is a directory
# if I Already think it exists. Under the current FTP impl
# ftp.size() fails for various reasons unless the file exists
# and is an accessable file. I can infer that a failure to
# determine the size means that the path is a directory,
# but this does not hold true under other use cases.
# I can't cache listdir calls, but if I could, then I could
# use that to determine if the file exists
return True#self.exists( path )
def mkdir(self,path):
# this is a really ugly quick and dirty solution
path = utf8_fix(path)
if not self.exists(path):
p = self.parent( path )
try:
if not self.exists(p):
self.ftp.mkd( p )
self.ftp.mkd(path)
except Exception as e:
print("ftp mkd error: %s"%e)
def split(self,path):
return posixpath.split(path)
def splitext(self,path):
return posixpath.splitext(path)
def stat(self,path):
try:
size = self.ftp.size(path)
except error_perm:
size = None
result = {
"isDir" : size is None,
"isLink": False,
"mtime" : 0,
"ctime" : 0,
"size" : size or 0,
"name" : self.split(path)[1],
"mode" : 0
}
return result
def stat_fast(self,path):
# not fast for thus file system :(
try:
size = self.ftp.size(path)
except error_perm:
size = None
result = {
"name" : self.split(path)[1],
"size" : size or 0,
"isDir" : size is None,
"isLink" : False,
}
return result
def chmod(self,path,mode):
print("chmod not implemented")
def getExportPath(self,path):
return self.hostname+path
| StarcoderdataPython |
4882085 | from DSAE import Discriminative_SAE
import DSAE.Pre_process as Pre_process
import DSAE.To_full as To_full
import DSAE.Dropout as Dropout
import pandas as pd
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error
from scipy.stats import pearsonr
from sklearn.metrics.pairwise import cosine_similarity
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input_true', type=str, default='data/test.csv')
parser.add_argument('--input_raw', type=str, default='None')
parser.add_argument('--outputdir', type=str, default='data')
parser.add_argument('--dim1', type=int, default=600)
parser.add_argument('--dim2', type=int, default=256)
parser.add_argument('--epoch1', type=int, default=3000)
parser.add_argument('--epoch2', type=int, default=1000)
parser.add_argument('--learning_rate', type=float, default=4e-3)
parser.add_argument('--batch', type=int, default=64)
parser.add_argument('--print_step', type=int, default=200)
args = parser.parse_args()
def main(data1, data2, outdir):
######################## Read Data ########################
data_T = pd.read_csv(data1, index_col=0)
if(data2 == 'None'):
data_raw = Dropout.main(data_T, outdir)
else:
data_raw = pd.read_csv(data2, index_col=0)
data_loss = data_raw
adj = cosine_similarity(data_raw.values)
print(adj)
######################## Data Preprocessing ######################
data_raw_process, row, col, data_loss_part = Pre_process.normalize(data_raw, data_loss) # 500x3000 → 500x1344
######################## Imputation ######################
model = Discriminative_SAE(dims = [args.dim1, args.dim2],
activations = ['sigmoid', 'relu'],
epoch = [args.epoch1, args.epoch2],
loss = 'rmse',
lr = args.learning_rate,
noise = None,
batch_size = args.batch,
print_step = args.print_step,
Adj = adj)
model.fit(data_raw_process, data_loss_part)
predict = model.predict(data_raw_process)
impute_part = pd.DataFrame(predict, index=row, columns=col)
impute = To_full.getAll(impute_part, data_raw)
impute.to_csv(outdir + '/impute.csv')
print("------------------------- The metrics of this {}x{}--------------------------- ".format(data_T.values.shape[0], data_T.values.shape[1]))
print("Mean Absolute Error: MAE = {0:.3f}".format( mean_absolute_error(data_T, impute) ))
print("Mean square error: MSE = {0:.3f}".format( mean_squared_error(data_T, impute) ** 0.5 ))
print("Pearson correlation coefficient: PCC = {0:.3f}".format( pearsonr(data_T.values.reshape(-1), impute.values.reshape(-1))[0] ))
main(args.input_true, args.input_raw, args.outputdir) | StarcoderdataPython |
3366675 | """ Takes user input and prints results """
import getpass
from apscheduler.schedulers.background import BackgroundScheduler
import boiling
import consomme
def bool_to_private_text(private_bool):
""" If is_name_private returns false, return 'public', if true return 'public' """
if private_bool:
return "Private"
return "Public"
def check_insta(list_current_state, username, email, password):
""" Checks current is_private against old is_private """
try:
new_state = boiling.is_name_private(username[0])
if new_state != list_current_state[0]:
list_current_state[0] = new_state
consomme.send_email(bool_to_private_text(new_state), email, password)
print("State is now: " + bool_to_private_text(new_state))
else:
print("No change. Checking again in <interval>.")
except TypeError:
print("Check if a valid instagram username was used.")
def start_checks():
""" Initializes and runs advanced scheduler """
username = [input("Input a valid instagram username: ")]
intervallength = input("Interval length (in minutes) between checks: ")
email = input("Input gmail: ")
password = <PASSWORD>()
list_current_state = [boiling.is_name_private(username[0])]
kwargs = {'username': username,
'list_current_state': list_current_state,
'email': email,
'password': password}
sched = BackgroundScheduler()
sched.start()
print("Checks have begun. \nPress ENTER to stop script. ")
print("State is initially: " + bool_to_private_text(list_current_state[0]))
check_insta(list_current_state, username, email, password)
sched.add_job(check_insta, 'interval', minutes=int(intervallength), kwargs=kwargs)
#sched.print_jobs()
line = input()
while line:
if line == "state":
print("\t" + bool_to_private_text(list_current_state[0]))
if line == "username":
print("\t" + username)
line = input()
# print(boiling.is_name_private('thechristinap'))
start_checks()
| StarcoderdataPython |
4847467 | <reponame>lchx1010/pxf
import os
import shutil
import unittest2 as unittest
from tinctest.lib.system import TINCSystem, TINCSystemException
class TINCSystemTests(unittest.TestCase):
def test_make_dirs(self):
test_dir = os.path.join(os.path.dirname(__file__), 'test_mkdirs')
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
self.assertFalse(os.path.exists(test_dir))
TINCSystem.make_dirs(test_dir)
self.assertTrue(os.path.exists(test_dir))
def test_make_dirs_existing(self):
test_dir = os.path.join(os.path.dirname(__file__), 'test_mkdirs')
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
self.assertFalse(os.path.exists(test_dir))
TINCSystem.make_dirs(test_dir)
self.assertTrue(os.path.exists(test_dir))
# This should fail
with self.assertRaises(OSError) as cm:
TINCSystem.make_dirs(test_dir)
def test_make_dirs_existing_ignore(self):
test_dir = os.path.join(os.path.dirname(__file__), 'test_mkdirs')
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
self.assertFalse(os.path.exists(test_dir))
TINCSystem.make_dirs(test_dir)
self.assertTrue(os.path.exists(test_dir))
# This should not fail and ignore existing error
TINCSystem.make_dirs(test_dir, ignore_exists_error = True)
def test_make_dirs_relative_path(self):
test_dir = 'test/output'
with self.assertRaises(TINCSystemException) as cm:
TINCSystem.make_dirs(test_dir)
| StarcoderdataPython |
3266524 | <reponame>pashakondratyev/ParseBook<filename>parse.py<gh_stars>1-10
import sys
from threads.threads import Threads, Thread, Message
import json, html
THREAD_TAG = '<div class="thread">'
MESSAGE_TAG = '<div class="message">'
def main(messages_path, out_path):
with open(messages_path) as fp:
messages_html = fp.read()
threads = parse_html(messages_html)
with open(out_path, "w") as fp:
json.dump(threads, fp, indent=2)
def parse_html(msgs_html):
threads = Threads()
h1_open = msgs_html.find("<h1>") + len("<h1>")
h1_close = msgs_html.find("</h1>", h1_open)
threads.owner = html.unescape( msgs_html[h1_open:h1_close].strip() )
next_thread = msgs_html.find(THREAD_TAG, h1_close) + len(THREAD_TAG)
while (next_thread < len(msgs_html) ):
thread_index = next_thread
next_thread = msgs_html.find(THREAD_TAG, thread_index)
next_thread = next_thread + len(THREAD_TAG) if next_thread != -1 else len(msgs_html)
thread = get_thread_for_people(msgs_html, thread_index, threads)
next_msg = msgs_html.find(MESSAGE_TAG, thread_index) + len(MESSAGE_TAG)
while (next_msg < next_thread):
msg_index = next_msg
next_msg = get_message(msgs_html, msg_index, thread)
next_msg = next_msg + len(MESSAGE_TAG) if next_msg != -1 else next_thread
# --- end of thread ---
messages_to_list(thread)
# --- end of all threads ---
for people in threads.threads:
thread = threads[people]
threads[people] = {
'people': thread.people,
'messages': thread.message_list,
'size': len(thread.message_list)
}
return threads.__dict__
def messages_to_list(thread):
message = thread.messages
start = len(thread.message_list)
thread.message_list.extend( [None] * thread.size )
for i in range(start, start + thread.size):
thread.message_list[i] = {
'sender': message.sender,
'created_at': message.created_at,
'content': message.content
}
message = message.next_msg
thread.size = 0
thread.messages = Message()
def get_thread_for_people(msgs_html, start, threads):
end = msgs_html.find(MESSAGE_TAG, start)
people = html.unescape( msgs_html[start:end].strip() ).split(', ')
people = [p.split('@')[0] for p in people]
people.sort()
people = " ".join(people)
if not threads[people]: threads[people] = Thread(people)
return threads[people]
def get_message(msgs_html, start, thread):
next_msg = msgs_html.find(MESSAGE_TAG, start)
msg_html = msgs_html[start:next_msg].strip().strip("\n")
sender = get_tag(msg_html, '<span class="user">', '</span>')
created_at = get_tag(msg_html, '<span class="meta">', '</span>')
content = get_tag(msg_html, '<p>', '</p>')
thread.prepend_message( Message(sender, created_at, content) )
thread.size += 1
return next_msg
def get_tag(string, tag_open, tag_close):
start = string.find(tag_open) + len(tag_open)
close = string.find(tag_close, start)
return html.unescape( string[start:close].strip() )
if not len(sys.argv) == 3:
print("ERROR need in and out path")
sys.exit()
main(sys.argv[1], sys.argv[2])
| StarcoderdataPython |
1753272 | <reponame>godsgift/gdohs<gh_stars>0
##########################################################################
#
# IMPORTS
#
##########################################################################
import unittest
import re
import time
import app
from pymongo import MongoClient
##########################################################################
#
# CLASSES
#
##########################################################################
##########################################################################
# Class Name: gdohs_tests
#
# Function Names: test_create_savefile()
# test_checkWhitespace()
# test_string_split_res
#
# Parameters: unittest.TestCase
#
# Posted Data: None
#
# Return Value: None
#
# Description:
# Tests most of the helper functions that can be tested. The tests are
# all done in the manner of passing in the correct expected values
# and passing in the incorrect values. This way, even if we were to
# change the function, as long as we are expecting a specific value,
# we are able to check whether the test passes or not.
#
##########################################################################
class gdohs_tests(unittest.TestCase):
##########################################################################
# Function Names: test_create_savefile()
#
# Parameters: self
#
# Posted Data: None
#
# Return Value: None
#
# Description:
# The test checks whether the argument passed onto the function is
# either "video" or "image". Since we are only expecting 1 or the other,
# if the value that was passed is incorrect, then raise an exception.
# However, if the value that was passed was either "video" or "image",
# it returns the correct value and passes the test.
#
##########################################################################
def test_create_savefile(self):
datetime = time.strftime("%Y-%m-%d,%I%M%S")
filenameVideo = "videos/" + datetime + ".h264"
filenameImage = "motion-images/" + datetime + ".jpeg"
self.assertEqual(app.create_savefile("video"), filenameVideo)
self.assertEqual(app.create_savefile("image"), filenameImage)
with self.assertRaises(Exception):
app.create_savefile("GIF")
##########################################################################
# Function Names: test_checkWhitespace()
#
# Parameters: self
#
# Posted Data: None
#
# Return Value: None
#
# Description:
# The test checks if the returned value has a whitepsace or not. If the
# value that was passed in has a white space, return a False value. If
# the value that was passed in does not have a white space, return
# a True value.
#
##########################################################################
def test_checkWhitespace(self):
self.assertTrue(app.checkWhitespace("TEST"), True)
self.assertFalse(app.checkWhitespace("TE ST"), False)
##########################################################################
# Function Names: test_string_split_res()
#
# Parameters: self
#
# Posted Data: None
#
# Return Value: None
#
# Description:
# The test ensures that the string_split_resO() function is working as
# intended. If the value that was passed in does not match a certain
# format, the test would raise a value error. It must follow the format
# "numberxnumber". If the correct format was given, it returns the
# correct value and the test passes. Otherwise, would fail.
#
##########################################################################
def test_string_split_res(self):
self.assertEqual(app.string_split_res("640x480"), (640,480))
with self.assertRaises(ValueError):
app.string_split_res("freexere")
##########################################################################
#
# MAIN
#
##########################################################################
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
3499082 | """
Encoding and decoding classes for Python 2 client/server communication.
This module is used by both the Python 2 server and the Python 3 client, with
some modification to handle object references.
The encoding supports basic types and data structures. Everything else will
be encoded as an opaque object reference.
The basic encoding algorithm involves recursively iterating over the members of
each container type. For each object traversed, the encoder adds the object
to an internal cache. If an object is seen again, it is encoded as a pointer
to the previous occurrence. The decoder traverses the encoding in the same
order and maintains a similar cache which is used to translate cache pointers.
This simple algorithm runs into trouble when dealing with tuples containing
circular references. During decoding, a tuple's members must be decoded
before the tuple can be created, since the tuple is immutable after
instantiation. But this would create a problem if we encounter a cache pointer
to the tuple before the tuple had been instantiated.
To resolve this issue, we must modify the simple preorder traversal initially
described. When we encounter a mutable collection (list or dict)*, we
initially create an empty placeholder for the collection and come back to it
later once we have traversed all other reachable objects. This ensures that by
the time we encounter a cache pointer the cached object is guaranteed to be
initialized.
For example, suppose we have the following tuple `T`::
T = ([T], 1)
When encoding, we initially add `T` to the cache. Then we encounter the list
`[T]`. We create a placeholder in the encoding and remember it for later.
Next we encode `1`. Finally, we return to `[T]` and recur into the list.
Since `T` is in the cache, we encode the nested occurrence as a cache pointer.
When decoding, we begin decoding the elements of `T`. When we get to the
encoding of `[T]`, we create an empty list and continue, remembering our place
for later. Then we decode `1` and initialize `T` to `([], 1)`, storing it in
the cache. Next we return to the encoded list update the list with its decoded
contents. When we get to the cache reference for `T`, we can look it up in the
cache with no problems since it has already been initialized.
*It is not necessary to do the placeholder procedure for sets, even though they
are mutable, because any circularly-referential data structure must contain a
mutable object, which makes it unhashable.
"""
# Possible improvements:
#
# - Is the complexity really worth it? How likely are circular references
# anyway?
#
# - Could detect and error out instead, or document as a limitation
#
# - Current algorithm requires encoding/decoding to occur in consistent order
# within session. Can we avoid this?
#
# - Is there a way to avoid incurring the costs of caching when not needed?
# maybe a two-pass algorithm that checks before encoding?
import base64
import collections
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
_long = long # noqa
_bytes = str
_unicode = unicode # noqa
_range = xrange # noqa
_items = dict.iteritems
elif PYTHON_VERSION == 3:
_long = int
_bytes = bytes
_unicode = str
_range = range
_items = dict.items
else:
raise Exception("Unsupported Python version: {}".format(PYTHON_VERSION))
_value_types = frozenset({
type(None), type(NotImplemented), type(Ellipsis), bool, int, _long, float,
complex, _bytes, _unicode, bytearray, _range
})
_container_types = frozenset({slice, list, tuple, set, frozenset, dict})
_supported_types = _value_types | _container_types
class EncodingDepth(object):
""" Common values for encoding depth. """
REF = 0 # Encode as a reference
SHALLOW = 1 # Convert top-level object to value
DEEP = -1 # Recursively convert object to value
class BaseEncodingSession(object):
""" Base encoder for Python 2 client and server. """
def __init__(self):
self.session = {}
self.deferred = collections.deque()
def encode(self, obj, depth=EncodingDepth.DEEP):
""" Encode an object. """
data = self._enc(obj, depth)
while self.deferred:
self.deferred.popleft()()
return data
def _enc(self, obj, depth):
t = type(obj)
if depth and any(t is s for s in _supported_types):
if t in _container_types:
# For container types, we include the depth in the cache key.
# This means that if encoding to a finite depth, a given
# container object will be encoded separately at each depth
# where it occurs.
key = id(obj), max(depth, -1)
else:
key = id(obj)
if key in self.session:
return dict(type='cached', index=self.session[key][0])
# Store cached objects to prevent garbage collection
# This ensures that ids uniquely map to objects over the life of
# the session. We can't use a WeakKeyDictionary to avoid this
# because most builtin types do not support weak references.
self.session[key] = len(self.session), obj
# Singleton objects
if obj is None:
return dict(type='None')
elif obj is NotImplemented:
return dict(type='NotImplemented')
elif obj is Ellipsis:
return dict(type='Ellipsis')
# Numerical types
elif t is bool:
return dict(type='bool', value=obj)
elif t is int or t is _long:
return dict(type='int', value=obj)
elif t is float:
return dict(type='float', value=obj)
elif t is complex:
return dict(type='complex', real=obj.real, imag=obj.imag)
# String types
elif t is _bytes:
return self._enc_bdata('bytes', obj)
elif t is _unicode:
return self._enc_bdata('unicode', obj.encode('utf8'))
elif t is bytearray:
return self._enc_bdata('bytearray', obj)
# Range and slice
elif t is _range:
return self._enc_range(obj)
elif t is slice:
return dict(type='slice',
start=self._enc(obj.start, depth-1),
stop=self._enc(obj.stop, depth-1),
step=self._enc(obj.step, depth-1))
# Container types
elif t is list:
d = dict(type='list', items=Placeholder)
self.deferred.append(
lambda: d.update(items=self._enc_items(obj, depth-1)))
return d
elif t is tuple:
return dict(type='tuple', items=self._enc_items(obj, depth-1))
elif t is set:
return dict(type='set', items=self._enc_items(obj, depth-1))
elif t is frozenset:
return dict(type='frozenset',
items=self._enc_items(obj, depth-1))
elif t is dict:
d = dict(type='dict', items=Placeholder)
self.deferred.append(
lambda: d.update(items=[self._enc_kv(key, value, depth-1)
for key, value in _items(obj)]))
return d
else:
# Should never happen
raise AssertionError("Unexpected type: {}".format(t.__name__))
# Encode as reference
return self._enc_ref(obj)
def _enc_bdata(self, type_, data):
""" Encode binary data. """
return dict(type=type_, data=base64.b64encode(data).decode('ascii'))
def _enc_items(self, itr, depth):
""" Encode a collection of items. """
return [self._enc(item, depth) for item in itr]
if PYTHON_VERSION == 2:
def _enc_range(self, range_):
""" Encode a range object. """
start, stop, step = range_.__reduce__()[1]
return dict(type='range', start=start, stop=stop, step=step)
else:
def _enc_range(self, range_):
""" Encode a range object. """
return dict(type='range', start=range_.start, stop=range_.stop,
step=range_.step)
def _enc_kv(self, key, value, depth):
""" Encode a dict key-value pair. """
return dict(key=self._enc(key, depth), value=self._enc(value, depth))
def _enc_ref(self, obj):
""" Encode an object as a reference. """
# Implemented by client/server subclasses
raise NotImplemented()
class BaseDecodingSession(object):
""" Base decoder for Python 2 client and server. """
def __init__(self):
self.session = []
self.deferred = collections.deque()
def decode(self, data):
obj = self._dec(data)
while self.deferred:
self.deferred.popleft()()
return obj
def _dec(self, data):
""" Decode an encoded object. """
dtype = data['type']
if dtype == 'ref':
return self._dec_ref(data)
if dtype == 'cached':
assert self.session[data['index']] is not Placeholder
return self.session[data['index']]
cache_index = len(self.session)
self.session.append(Placeholder)
def _cache(obj):
self.session[cache_index] = obj
return obj
# Singleton objects
if dtype == 'None':
return _cache(None)
elif dtype == 'NotImplemented':
return _cache(NotImplemented)
elif dtype == 'Ellipsis':
return _cache(Ellipsis)
# Numeric types
elif dtype in ('bool', 'int', 'float'):
return _cache(data['value'])
elif dtype == 'complex':
return _cache(complex(real=data['real'], imag=data['imag']))
# String types
elif dtype == 'bytes':
return _cache(self._dec_bdata(data))
elif dtype == 'unicode':
return _cache(self._dec_bdata(data).decode('utf8'))
elif dtype == 'bytearray':
return _cache(bytearray(self._dec_bdata(data)))
# Range and slice
elif dtype == 'range':
return _cache(_range(data['start'], data['stop'], data['step']))
elif dtype == 'slice':
return _cache(slice(self._dec(data['start']),
self._dec(data['stop']),
self._dec(data['step'])))
# Container types
elif dtype == 'list':
lst = _cache([])
self.deferred.append(lambda: lst.extend(self._dec_items(data)))
return lst
elif dtype == 'tuple':
return _cache(tuple(self._dec_items(data)))
elif dtype == 'set':
return _cache(set(self._dec_items(data)))
elif dtype == 'frozenset':
return _cache(frozenset(self._dec_items(data)))
elif dtype == 'dict':
dct = _cache({})
self.deferred.append(
lambda: dct.update(self._dec_dict_items(data)))
return dct
else:
raise TypeError("Invalid data type: {}".format(dtype.__name__))
def _dec_bdata(self, data):
return base64.b64decode(data['data'].encode('ascii'))
def _dec_items(self, data):
return (self._dec(item) for item in data['items'])
def _dec_dict_items(self, data):
return ((self._dec(kv['key']), self._dec(kv['value']))
for kv in data['items'])
def _dec_ref(self, data):
""" Decode an object reference. """
# Implemented by client/server subclasses
raise NotImplemented()
class PlaceholderType(object):
"""
Type for a singleton object to be used as a placeholder.
"""
__slots__ = ()
__hash__ = None # Should not be used as a dict key or set member
Placeholder = PlaceholderType()
| StarcoderdataPython |
6405235 | <reponame>gaufung/CodeBase
import time
import threading
def consumer(cond):
t = threading.current_thread()
with cond:
cond.wait()
print('{}: resource is avaiable to the consumer'.format(t.name))
return
def producer(cond):
t = threading.current_thread()
with cond:
print('{}: Making resource available'.format(t.name))
cond.notifyAll()
return
condition = threading.Condition()
c1 = threading.Thread(target=consumer,args=(condition,))
c2 = threading.Thread(target=consumer, args=(condition,))
p = threading.Thread(target=producer, args=(condition,))
c1.start()
time.sleep(1)
c2.start()
time.sleep(2)
p.start() | StarcoderdataPython |
9749635 | <filename>logger/asynx.py
from functools import wraps
from multiprocessing import Process, get_context
from multiprocessing.queues import Queue
from threading import Thread
import time
from multiprocessing import Lock
class BlockedQueue(Queue):
def __init__(self, maxsize=-1, block=True, timeout=None):
self.block = block
self.timeout = timeout
super().__init__(maxsize, ctx=get_context())
def put(self, obj, block=True, timeout=None):
super().put(obj, block=self.block, timeout=self.timeout)
def get(self, block=True, timeout=None):
if self.empty():
return None
return super().get(block=self.block, timeout=self.timeout)
def _execute(queue, f, *args, **kwargs):
try:
queue.put(f(*args, **kwargs))
except Exception as e:
queue.put(e)
def threaded(timeout=None, block=True):
def decorator(func):
queue = BlockedQueue(1, block, timeout)
@wraps(func)
def wrapper(*args, **kwargs):
args = (queue, func) + args
t = Thread(target=_execute, args=args, kwargs=kwargs)
t.start()
return queue.get()
return wrapper
return decorator
def processed(timeout=None, block=True):
def decorator(func):
queue = BlockedQueue(1, block, timeout)
@wraps(func)
def wrapper(*args, **kwargs):
args = (queue, func) + args
p = Process(target=_execute, args=args, kwargs=kwargs)
p.start()
return queue.get()
return wrapper
return decorator
def async_call(async_api=Thread, timeout=None, block=True):
def decorator(func):
queue = BlockedQueue(1, block, timeout)
@wraps(func)
def wrapper(*args, **kwargs):
args = (queue, func) + args
async = async_api(target=_execute, args=args, kwargs=kwargs)
async.start()
return queue.get()
return wrapper
return decorator
def scheduled(period, delay=None, loop_count=None):
delay = delay or 0
loop_count = loop_count or 0
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
counter = 0
time.sleep(delay)
while True:
start = time.time()
if loop_count and loop_count > 0:
if counter == loop_count:
break
counter += 1
func(*args, **kwargs)
run_time = time.time() - start
if run_time < period:
time.sleep(period - run_time)
return wrapper
return decorator
simple_lock = Lock()
def synchronized(lock=simple_lock):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return wrapper
return decorator
if __name__ == '__main__':
@threaded(block=False)
def test1(x):
time.sleep(x)
print("test 1")
@processed(block=False)
def test2(x):
time.sleep(x)
print("test 2")
@threaded(block=False)
@scheduled(period=2, loop_count=3)
def test3(x):
time.sleep(x)
print("test 3")
@threaded()
@scheduled(period=1, loop_count=2)
@processed()
def test_pow(x):
print(x * x)
@threaded()
@synchronized()
def lock_test_a():
print('lock_test_a')
@async_call(Thread)
@synchronized()
def lock_test_b():
print('lock_test_b')
test3(0)
test1(2)
test2(1)
test_pow(5)
lock_test_a()
lock_test_b()
| StarcoderdataPython |
11305687 | from src.gtk_helper import GtkHelper
class InfobarWarningsViewModel:
def __init__(self, infobar, message):
self._infobar = infobar
self._message = message
@property
@GtkHelper.invoke_func_sync
def message(self):
return self._message.get_text()
@message.setter
@GtkHelper.invoke_func
def message(self, value):
if isinstance(value, Exception):
self._message.set_text('{}: {}'.format(type(value).__name__, str(value)))
else:
self._message.set_text(str(value))
self.show(True)
@GtkHelper.invoke_func
def show(self, show):
self._infobar.show() if show else self._infobar.hide()
| StarcoderdataPython |
11372802 | <filename>tfsrep/lib/assets.py
import plotly.graph_objs as go
import plotly.offline as offline
from tfsrep.lib.template import Template
class Assets:
def __init__(self, config, logger, data):
self.config = config
self.logger = logger
self.data = data
def generate(self):
self.set_team()
setattr(self, "epics", self.get_boxes(self.data.epics))
setattr(self, "stories", self.get_boxes(self.data.stories))
setattr(self, "features", self.get_boxes(self.data.features))
self.close()
return self
def close(self):
del self.config
del self.logger
del self.data
def set_team(self):
self.logger.info('Set Team')
self.logger.debug('Team: ' + self.config.api_project)
team = {}
team['name'] = self.config.api_project
setattr(self, "team", team)
def get_boxes(self, items):
parent = {}
parent['chart_list'] = []
for x in self.config.field_map:
d = {}
if hasattr(items, self.config.field_map[x]['field_name'] + '_counts'):
data = getattr(items, self.config.field_map[x]['field_name'] + '_counts')
self.generate_boxes(d, data)
parent[self.config.field_map[x]['field_name']] = d
parent['chart_list'].append(d['bar_chart'].split('"')[1])
return parent
def generate_boxes(self, obj, data):
obj['box_text'] = ''
x = []
y = []
j = 0
for key, value in data.items():
x.append(key)
y.append(value)
box = Template(self.config, self.logger)
box.get("status_box.html")
box.render(title=key, number=value, background_color=self.config.box_color_list[j])
obj['box_text'] = obj['box_text'] + box.text
j = 0 if j == 4 else j + 1
obj['bar_chart'] = offline.plot([go.Bar(x=x, y=y)], output_type='div', validate=False, show_link=False, auto_open=False, include_plotlyjs=False)
return obj
| StarcoderdataPython |
11351228 | <gh_stars>1-10
import unittest
from htmlcomp import *
from htmlcomp.elements import *
@component
def RedBox(*children, **attributes):
return div(*children, style="background-color: red;", **attributes)
class OrderedList(Element):
def parse_items(items):
return items.split(",")
def default_attributes():
return dict(_class={"list", "fancy"})
def transform(*, items, **attributes):
return ol(*[li(item) for item in items], **attributes)
class TestHtmlComp(unittest.TestCase):
def _test_html(self, html):
root = Element.parse(html)
self.assertEqual(root, Element.parse(str(root)))
self.assertEqual(root, eval(repr(root)))
def test_div(self):
self._test_html('<div id="greeting" class="apple banana">Hello, <strong>world</strong>!</div>')
def test_void_element(self):
self._test_html('<div>Look, <img src="apple.png" alt="Photo of a green apple"> apple!</div>')
def test_erroneous_self_closing_void_element(self):
self.assertEqual(
Element.parse('<div>Oops, <img src="banana.png"/> I did it again!</div>'),
fragment(div("Oops, ", img(src="banana.png"), " I did it again!"))
)
def test_explicit_element_name(self):
element_text = "The quick brown fox jumps over the lazy dog"
element_id = "pangram"
self.assertEqual(
Element("p", element_text, id=element_id),
p(element_text, id=element_id)
)
def test_callable(self):
animals = ul(li("cat"))
new_animals = animals(li("dog"), id="animals")
self.assertIs(animals, new_animals)
animals(li("fish"))
self.assertEqual(
ul(li("cat"), li("dog"), li("fish"), id="animals"),
animals
)
def test_attribute(self):
element = div()
self.assertFalse("id" in element)
element(id="foo")
self.assertTrue("id" in element)
del element["id"]
self.assertFalse("id" in element)
def test_children(self):
element = p(strong("Lorem ipsum"), " ", em("dolor sit amet"), ".")
self.assertEqual(len(element), 4)
self.assertEqual(element[1:5:2], [" ", "."])
del element[2]
self.assertEqual(len(element), 3)
self.assertEqual(element[2], ".")
self.assertEqual(element[-1], ".")
def test_function_component(self):
element = RedBox(p("some text"), "stuff", blockquote("That's pretty rad!"), id="content")
self.assertEqual(
Element.parse(str(element)),
fragment(div(id="content", style="background-color: red;")(
p("some text"),
"stuff",
blockquote("That's pretty rad!"),
))
)
def test_class_component(self):
element = Element.parse('<orderedlist items="alpha,beta,gamma" id="greek-letters"/>')
self.assertEqual(
Element.parse(str(element)),
Element.parse("".join([
'<ol id="greek-letters" class="list fancy">',
'<li>alpha</li>',
'<li>beta</li>',
'<li>gamma</li>',
'</ol>'
]))
)
element = Element.parse('<orderedlist items="hi">oops!</orderedlist>')
with self.assertRaises(TypeError):
str(element)
element[0][:] = []
self.assertEqual(
Element.parse(str(element)),
fragment(ol(_class={"list", "fancy"})(li("hi")))
)
def test_shallow_normalize(self):
element = div(
"a",
fragment("b", "c", div(), fragment("d"), "e"),
"f",
div(),
"",
div(),
)
element.shallow_normalize()
self.assertEqual(
element,
div(
"abc",
div(),
fragment("d"),
"ef",
div(),
div(),
)
)
def test_normalize(self):
element = div(
"a",
fragment(
"b",
fragment(
"",
"c",
div(),
"d",
"",
),
"e",
fragment(
"f",
div(),
"",
div(),
),
"",
div(),
"g"
),
"h"
)
element.normalize()
self.assertEqual(
element,
div(
"abc",
div(),
"def",
div(),
div(),
div(),
"gh"
)
)
def test_mismatched_tags(self):
with self.assertRaises(ParseError):
Element.parse("<div></p>")
def test_extra_closing_tag(self):
with self.assertRaises(ParseError):
Element.parse("<p></p></div>")
def test_contains(self):
element = p("hello", _class=set(), id="greeting")
self.assertTrue("_class" in element)
self.assertFalse("class" in element)
self.assertTrue("id" in element)
self.assertFalse("hello" in element)
with self.assertRaises(TypeError):
0 in element
with self.assertRaises(TypeError):
[] in element
def test_getitem(self):
element = p("hello", "there", _class=set(), id="greeting")
self.assertEqual(element[0], "hello")
self.assertEqual(element[1], "there")
self.assertEqual(element[-1], "there")
with self.assertRaises(IndexError):
element[2]
with self.assertRaises(IndexError):
element[-3]
self.assertEqual(element[-1::-1], ["there", "hello"])
self.assertEqual(element["_class"], set())
self.assertEqual(element["id"], "greeting")
with self.assertRaises(KeyError):
element["class"]
with self.assertRaises(TypeError):
element[None]
| StarcoderdataPython |
6641366 | <filename>api/tests/namespaces/v1/test_tags.py<gh_stars>1-10
"""This module contains unit tests for tag resource."""
from functools import partial
import json
import pytest
from pipwatch_api.namespaces.v1.tags import tag_representation_structure
from tests.namespaces.v1.conftest import get_model_repr
from tests.utils import JSONResponse
get_tag_repr = partial(get_model_repr, model=tag_representation_structure, id=1, name="test_tag")
get_tag_repr_empty = partial(get_model_repr, model=tag_representation_structure)
def test_default_get_returns_all_tags(app_client, default_store_fixture, mocker) -> None:
"""Endpoint should return list of all tags."""
expected_response = [get_tag_repr(), get_tag_repr(id=2)]
default_store_fixture.read_all.return_value = expected_response
mocker.patch("pipwatch_api.namespaces.v1.tags.DefaultStore", return_value = default_store_fixture)
response: JSONResponse = app_client.get("/api/v1/tags/")
assert response.status_code == 200
assert response.content_type == "application/json"
assert response.json == expected_response
post_create_test_data = [
(json.dumps(get_tag_repr()), "application/json", (201, "application/json", get_tag_repr())),
("some random text", "text", (400, "application/json", get_tag_repr_empty())),
]
@pytest.mark.parametrize("payload, content_type, asserts", post_create_test_data)
def test_post_creates_a_new_tag(payload, content_type, asserts, app_client, default_store_fixture, mocker) -> None:
"""Endpoint should create new tag and return it."""
default_store_fixture.create.return_value = asserts[2]
mocker.patch("pipwatch_api.namespaces.v1.tags.DefaultStore", return_value = default_store_fixture)
response: JSONResponse = app_client.post("/api/v1/tags/", data=payload, content_type=content_type)
assert response.status_code == asserts[0]
assert response.content_type == asserts[1]
assert response.json == asserts[2]
get_id_test_data = [
(1, get_tag_repr(), (200, "application/json", get_tag_repr())),
(2, None, (404, "application/json", get_tag_repr_empty()))
]
@pytest.mark.parametrize("document_id, mock_response, asserts", get_id_test_data)
def test_get_id_returns_document(document_id, mock_response, asserts, app_client,
default_store_fixture, mocker) -> None:
"""Endpoint should get a tag with given id."""
default_store_fixture.read.return_value = mock_response
mocker.patch("pipwatch_api.namespaces.v1.tags.DefaultStore", return_value = default_store_fixture)
response: JSONResponse = app_client.get("/api/v1/tags/{0}".format(document_id))
default_store_fixture.read.assert_called_with(document_id)
assert response.status_code == asserts[0]
assert response.content_type == asserts[1]
assert response.json == asserts[2]
put_test_data = [
(1, get_tag_repr(), (get_tag_repr(), "application/json"), (200, "application/json", get_tag_repr())),
(2, -1, ("test", "text"), (400, "application/json", get_tag_repr_empty())),
(32, None, (get_tag_repr(id=-21), "application/json"), (404, "application/json", get_tag_repr_empty()))
]
@pytest.mark.parametrize("document_id, mock_response, payload, asserts", put_test_data)
def test_put_updates_document(document_id, mock_response, payload,
asserts, app_client, default_store_fixture, mocker) -> None:
"""Endpoint should update tag with given id."""
default_store_fixture.update.return_value = mock_response
mocker.patch("pipwatch_api.namespaces.v1.tags.DefaultStore", return_value = default_store_fixture)
response: JSONResponse = app_client.put("/api/v1/tags/{0}".format(document_id),
data=json.dumps(payload[0]),
content_type=payload[1])
if mock_response != -1:
default_store_fixture.update.assert_called_with(document_id, payload[0])
assert response.status_code == asserts[0]
assert response.content_type == asserts[1]
assert response.json == asserts[2]
def test_delete_tag(app_client, default_store_fixture, mocker) -> None:
"""Endpoint should create new tag and return it."""
mocker.patch("pipwatch_api.namespaces.v1.tags.DefaultStore", return_value = default_store_fixture)
document_id = 1
response: JSONResponse = app_client.delete("/api/v1/tags/{}".format(document_id))
assert response.status_code == 204
assert response.content_type == "application/json"
| StarcoderdataPython |
382520 | """Module for working with Illumina FASTQ files."""
import os
import gzip
from dtoolsid.utils import is_file_extension_in_list
def parse_fastq_title_line(fastq_title_line):
def illumina_bool(x):
if x == "Y":
return True
if x == "N":
return False
raise(ValueError)
component_names = [
("instrument", str),
("run_number", int),
("flowcell_id", str),
("lane", int),
("tile", int),
("x_pos", int),
("y_pos", int),
("read", int),
("is_filtered", illumina_bool),
("control_number", int),
("index_sequence", str)
]
assert fastq_title_line[0] == '@'
words = fastq_title_line[1:].split(" ")
assert len(words) == 2
components = words[0].split(":") + words[1].split(":")
assert len(components) == len(component_names)
# We were going through a functional phase
return {
name: cast_func(component)
for (name, cast_func), component
in zip(component_names, components)
}
def extract_metadata_from_fastq_file_object(fh):
first_line = fh.readline().strip()
try:
first_line = first_line.decode('utf-8')
except AttributeError:
pass
return parse_fastq_title_line(first_line)
def extract_metadata_from_fastq_file(filename):
try:
with open(filename) as fh:
metadata = extract_metadata_from_fastq_file_object(fh)
except UnicodeDecodeError:
with gzip.open(filename, 'rb') as fh:
metadata = extract_metadata_from_fastq_file_object(fh)
return metadata
def create_illumina_metadata_overlay(dataset):
"""Create overlay derived from Illumina FQ metadata, and write it to
dataset."""
illumina_metadata_overlay = dataset.empty_overlay()
for identifier in dataset.identifiers:
abspath = dataset.abspath_from_identifier(identifier)
if is_file_extension_in_list(abspath, ['fq', 'fq.gz']):
metadata = extract_metadata_from_fastq_file(abspath)
illumina_metadata_overlay[identifier] = metadata
dataset.persist_overlay(
"illumina_metadata",
illumina_metadata_overlay
)
| StarcoderdataPython |
180529 | <reponame>omk42/a<filename>Arrays/find_triplets.py
# Python3 program to count triplets with
# sum smaller than a given value
# Function to count triplets with sum smaller
# than a given value
def countTriplets(arr, n, sum):
# Sort input array
arr.sort()
# Initialize result
ans = 0
# Every iteration of loop counts triplet with
# first element as arr[i].
for i in range(0, n - 2):
# Initialize other two elements as corner elements
# of subarray arr[j+1..k]
j = i + 1
k = n - 1
# Use Meet in the Middle concept
while (j < k):
# If sum of current triplet is more or equal,
# move right corner to look for smaller values
if (arr[i] + arr[j] + arr[k] >= sum):
k = k - 1
# Else move left corner
else:
# This is important. For current i and j, there
# can be total k-j third elements.
ans += (k - j)
j = j + 1
return ans
# Driver program
if __name__ == '__main__':
arr = [5, 3, 4, 7,1]
n = len(arr)
sum = 12
print(countTriplets(arr, n, sum))
| StarcoderdataPython |
4935255 | <reponame>ztang4/codetest
import plotly.graph_objects as go
import numpy as np
import dash_html_components as html
import dash_core_components as dcc
import plotly.graph_objects as go
import dash
BODY_COLOR = "#8e44ad"
BODY_OUTLINE_WIDTH = 10
AXIS_ZERO_LINE_COLOR = "#ffa801"
GROUND_COLOR = "rgb(240, 240, 240)"
PAPER_BG_COLOR = "white"
LEGENDS_BG_COLOR = "rgba(255, 255, 255, 0.5)"
LEGEND_FONT_COLOR = "#34495e"
data = [
{'color': 'rgba(244,22,100,0.6)',
'opacity': 0.9,
'type': 'mesh3d',
'x': [100.0, 100.0, -100.0, -100.0, -100.0, 100.0, 100.0],
'y': [0.0, 100.0, 100.0, 0.0, -100.0, -100.0, 0.0],
'z': [100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]
},
{
"line": {"color": BODY_COLOR, "opacity": 1, "width": BODY_OUTLINE_WIDTH},
"name": "body",
"showlegend": True,
"type": "scatter3d",
"uid": "1f821e07-2c02-4a64-8ce3-61ecfe2a91b6",
"x": [100.0, 100.0, -100.0, -100.0, -100.0, 100.0, 100.0],
"y": [0.0, 100.0, 100.0, 0.0, -100.0, -100.0, 0.0],
"z": [100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0],
}
]
HEXAPOD_FIGURE = {
"data": data,
"layout": {
"paper_bgcolor": PAPER_BG_COLOR,
"hovermode": "closest",
"legend": {
"x": 0,
"y": 0,
"bgcolor": LEGENDS_BG_COLOR,
"font": {"family": "courier", "size": 12, "color": LEGEND_FONT_COLOR},
},
"margin": {"b": 20, "l": 10, "r": 10, "t": 20},
"scene": {
"aspectmode": "manual",
"aspectratio": {"x": 1, "y": 1, "z": 1},
"camera": {
"center": {
"x": 0.0348603742736399,
"y": 0.16963779995083,
"z": -0.394903376555686,
},
"eye": {
"x": 0.193913968006015,
"y": 0.45997575676993,
"z": -0.111568465000231,
},
"up": {"x": 0, "y": 0, "z": 1},
},
"xaxis": {
"nticks": 1,
"range": [-600, 600],
"zerolinecolor": AXIS_ZERO_LINE_COLOR,
"showbackground": False,
},
"yaxis": {
"nticks": 1,
"range": [-600, 600],
"zerolinecolor": AXIS_ZERO_LINE_COLOR,
"showbackground": False,
}
}
}
}
# HEXAPOD_FIGURE = {
# "data": data,
# 'layout': {'template': '...'}
# }
app = dash.Dash(__name__)
app.layout = html.Div(
children=[
html.Div(className='eight columns div-for-charts bg-grey',
children=[
dcc.Graph(id='timeseries', figure=HEXAPOD_FIGURE)
])
]
)
if __name__ == '__main__':
app.run_server(debug=True) | StarcoderdataPython |
6419066 | <reponame>DomGonthier/PecheFantome
import GGlib
import mysql.connector
import gpxpy
#import gpxpy.gpx # needed if installed via : apt install python3-gpxpy
from xml.etree import ElementTree as ET
#############################################################
# ****** ******* ** ****** **** ** ******** **********
# **////**/**////** **** /*////** /**/** /**/**///// /////**///
# ** // /** /** **//** /* /** /**//** /**/** /**
# /** /******* ** //** /****** /** //** /**/******* /**
# /** /**///** **********/*//// **/** //**/**/**//// /**
# //** **/** //** /**//////**/* /**/** //****/** /**
# //****** /** //**/** /**/******* /** //***/******** /**
# ////// // // // // /////// // /// //////// //
#
#
# __ __
# / <` '> \
# ( / @ @ \ )
# \(_ _\_/_ _)/
# (\ `-/ \-' /)
# "===\ /==="
# .==')___(`==.
# ' .=' `=.
#
##############################################################
def get_elements(element_name,element_description):
db = mysql.connector.connect(
host="cidco.ca",
user="crabnet",
password="<PASSWORD>$",
database="crabnet"
)
cursor = db.cursor()
cursor.execute("SELECT * FROM crabnet.dfo_engins WHERE type='CASIER/CAGE - TRAP/POT'")
result = cursor.fetchall()
gpx = gpxpy.gpx.GPX()
gpx.name = element_name
gpx.description = element_description
# Pour tous les casiers rapportes
for trap in result:
longitude = trap[6]
latitude = trap[7]
waypoint = gpxpy.gpx.GPXWaypoint()
waypoint.longitude = longitude
waypoint.latitude = latitude
waypoint.name = "Casier {}".format(trap[0])
waypoint.description = trap[2]
gpx.waypoints.append(waypoint)
areas = gpx.to_xml()
return areas
def extract_coordinates(areas):
root = ET.fromstring(areas)
liste_coordinates = []
for i in range(1, len(root)):
coord = root[i].attrib
latitude = float(coord['lat'])
longitude = float(coord['lon'])
coordinate = [longitude, latitude]
liste_coordinates += [coordinate]
return liste_coordinates
def export_to(areas,export_format,filename):
if export_format == "gpx":
with open(filename+".gpx", "w") as f:
f.write(areas)
elif export_format == "shapefile":
liste_coordinates = extract_coordinates(areas)
GGlib.ecriture_points_2shp(liste_coordinates,filename)
elif export_format == "csv":
liste_coordinates = extract_coordinates(areas)
GGlib.write_points_2csv(liste_coordinates,filename)
else:
error = "error: not a valid format ! \n options are : gpx, shapefile, csv"
print(error)
| StarcoderdataPython |
1794160 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from osc_lib.tests import utils
from tripleoclient import exceptions as ex
from tripleoclient.workflows import base
class TestBaseWorkflows(utils.TestCommand):
def test_wait_for_messages_success(self):
payload_a = {
'status': 'ERROR',
'execution_id': 2,
'root_execution_id': 1
}
payload_b = {
'status': 'ERROR',
'execution_id': 1,
'root_execution_id': 1
}
mistral = mock.Mock()
websocket = mock.Mock()
websocket.wait_for_messages.return_value = iter([payload_a, payload_b])
execution = mock.Mock()
execution.id = 1
messages = list(base.wait_for_messages(mistral, websocket, execution))
self.assertEqual([payload_a, payload_b], messages)
self.assertFalse(mistral.executions.get.called)
websocket.wait_for_messages.assert_called_with(timeout=None)
def test_wait_for_messages_timeout(self):
mistral = mock.Mock()
websocket = mock.Mock()
websocket.wait_for_messages.side_effect = ex.WebSocketTimeout
execution = mock.Mock()
execution.id = 1
messages = base.wait_for_messages(mistral, websocket, execution)
self.assertRaises(ex.WebSocketTimeout, list, messages)
self.assertTrue(mistral.executions.get.called)
websocket.wait_for_messages.assert_called_with(timeout=None)
def test_wait_for_messages_connection_closed(self):
mistral = mock.Mock()
websocket = mock.Mock()
websocket.wait_for_messages.side_effect = ex.WebSocketConnectionClosed
execution = mock.Mock()
execution.id = 1
messages = base.wait_for_messages(mistral, websocket, execution)
self.assertRaises(ex.WebSocketConnectionClosed, list, messages)
self.assertTrue(mistral.executions.get.called)
websocket.wait_for_messages.assert_called_with(timeout=None)
def test_wait_for_messages_different_execution(self):
payload_a = {
'status': 'RUNNING',
'execution_id': 'aaaa',
'root_execution_id': 'aaaa'
}
payload_b = {
'status': 'RUNNING',
'execution_id': 'bbbb',
'root_execution_id': 'bbbb'
}
mistral = mock.Mock()
mistral.executions.get.return_value.state = 'RUNNING'
websocket = mock.Mock()
websocket.wait_for_messages.return_value = iter([payload_a, payload_b])
execution = mock.Mock()
execution.id = 'aaaa'
messages = list(base.wait_for_messages(mistral, websocket, execution))
# Assert only payload_a was returned
self.assertEqual([payload_a], messages)
websocket.wait_for_messages.assert_called_with(timeout=None)
def test_backwards_compat_wait_for_messages_success(self):
payload_a = {
'status': 'ERROR',
'execution': {'id': 2,
'root_execution_id': 1}
}
payload_b = {
'status': 'ERROR',
'execution': {'id': 1,
'root_execution_id': 1}
}
mistral = mock.Mock()
websocket = mock.Mock()
websocket.wait_for_messages.return_value = iter([payload_a, payload_b])
execution = mock.Mock()
execution.id = 1
messages = list(base.wait_for_messages(mistral, websocket, execution))
self.assertEqual([payload_a, payload_b], messages)
self.assertFalse(mistral.executions.get.called)
websocket.wait_for_messages.assert_called_with(timeout=None)
def test_backwards_compatible_call_with_different_execution(self):
payload_a = {
'status': 'RUNNING',
'execution': {'id': 'aaaa',
'root_execution_id': 'aaaa'}
}
payload_b = {
'status': 'RUNNING',
'execution': {'id': 'bbbb',
'root_execution_id': 'bbbb'}
}
mistral = mock.Mock()
mistral.executions.get.return_value.state = 'RUNNING'
websocket = mock.Mock()
websocket.wait_for_messages.return_value = iter([payload_a, payload_b])
execution = mock.Mock()
execution.id = 'aaaa'
messages = list(base.wait_for_messages(mistral, websocket, execution))
# Assert only payload_a was returned
self.assertEqual([payload_a], messages)
websocket.wait_for_messages.assert_called_with(timeout=None)
def test_wait_for_messages_execution_complete(self):
payload_a = {
'status': 'RUNNING',
'execution_id': 'aaaa',
'root_execution_id': 'aaaa'
}
payload_b = {
'status': 'SUCCESS',
'execution_id': 'aaaa',
'root_execution_id': 'aaaa'
}
mistral = mock.Mock()
mistral.executions.get.return_value.state = 'SUCCESS'
mistral.executions.get.return_value.output = json.dumps(payload_b)
websocket = mock.Mock()
websocket.wait_for_messages.return_value = iter([payload_a])
execution = mock.Mock()
execution.id = 'aaaa'
messages = list(base.wait_for_messages(mistral, websocket, execution))
# Assert only payload_b was returned
self.assertEqual([payload_a, payload_b], messages)
mistral.executions.get.assert_called_with('aaaa')
websocket.wait_for_messages.assert_called_with(timeout=None)
| StarcoderdataPython |
6520341 | <reponame>bbrzycki/evolution-project
import uuid
import math
def pure_replication(organism, population_dict, world, position_hash_table=None):
"""
Replace organism with two organism with similar parameters.
Essentially, only differences in parameters are organism id,
ancestry, age, and water / food levels.
"""
new_organism_list = []
# Generate new organisms
for i in range(2):
child = organism.get_child()
if organism.drinking_type is not None:
child.update_parameter('water_current',
math.floor(organism.water_current / 2),
in_place=True)
if organism.eating_type is not None:
child.update_parameter('food_current',
math.floor(organism.food_current / 2),
in_place=True)
new_organism_list.append(child)
new_organism_list.append(organism.die('replication'))
return new_organism_list
| StarcoderdataPython |
11252350 | <filename>Cours-4/Programmes-Python/M3-6.py
maliste = [2, 1, -5, 8, 7, 3, 4]
i = 0
somme = 0 # commence à 0
while i < len(maliste):
somme = somme + maliste[i]
i = i + 1
print("La somme des éléments de ma liste est", somme)
# ici 2 + 1 + (-5) + 8 + 7 + 3 + 4 = 20
| StarcoderdataPython |
5005484 | import pytest
from iocage.lib.ioc_destroy import IOCDestroy
from iocage.lib.ioc_list import IOCList
require_root = pytest.mark.require_root
require_zpool = pytest.mark.require_zpool
@require_root
@require_zpool
def test_destroy():
jails, paths = IOCList("uuid").list_datasets()
uuid = jails["newtest"]
uuid_short = jails["newtest_short"]
path = paths["newtest"]
path_short = paths["newtest_short"]
IOCDestroy(uuid, "newtest", path).destroy_jail()
IOCDestroy(uuid_short, "newtest_short", path_short).destroy_jail()
assert True == True
| StarcoderdataPython |
1646442 | """
Copyright 2019 <NAME>
ARIN
REST API Documentation
https://www.arin.net/resources/whoisrws/whois_api.html
Web Query
https://whois.arin.net/ui/advanced.jsp
"""
| StarcoderdataPython |
5099563 | class AttachedPropertyBrowsableForTypeAttribute(AttachedPropertyBrowsableAttribute,_Attribute):
"""
Specifies that an attached property is browsable only for elements that derive from a specified type.
AttachedPropertyBrowsableForTypeAttribute(targetType: Type)
"""
def Equals(self,obj):
"""
Equals(self: AttachedPropertyBrowsableForTypeAttribute,obj: object) -> bool
Determines whether the current
System.Windows.AttachedPropertyBrowsableForTypeAttribute�.NET Framework
attribute is equal to a specified object.
obj: The System.Windows.AttachedPropertyBrowsableForTypeAttribute to compare to the
current System.Windows.AttachedPropertyBrowsableForTypeAttribute.
Returns: true if the specified System.Windows.AttachedPropertyBrowsableForTypeAttribute
is equal to the current
System.Windows.AttachedPropertyBrowsableForTypeAttribute; otherwise,false.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: AttachedPropertyBrowsableForTypeAttribute) -> int
Returns the hash code for this
System.Windows.AttachedPropertyBrowsableForTypeAttribute�.NET Framework
attribute.
Returns: An unsigned 32-bit integer value.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,targetType):
""" __new__(cls: type,targetType: Type) """
pass
def __ne__(self,*args):
pass
TargetType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the base type that scopes the use of the attached property where this�.NET Framework attribute�applies.
Get: TargetType(self: AttachedPropertyBrowsableForTypeAttribute) -> Type
"""
TypeId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a unique type identifier for this System.Windows.AttachedPropertyBrowsableForTypeAttribute�.NET Framework attribute.
Get: TypeId(self: AttachedPropertyBrowsableForTypeAttribute) -> object
"""
| StarcoderdataPython |
1784847 | from . import good, fail
# dummy
good('*', 0)
good('*', [1])
good('*', None)
fail('#', None)
good('*|#', None)
fail('*,#', None)
| StarcoderdataPython |
328870 | from .anagram import Anagram | StarcoderdataPython |
1771470 | from numpy import *
from matplotlib.pyplot import *
x=loadtxt('cluster_test.txt')
N=x.shape[0]
M=zeros((N,N))
# critical distance
dc=7
for i in range(N):
for j in range(i+1, N):
M[j, i] = M[i, j] = (sum((x[i, :]-x[j, :])**2))**0.5
print('matrix of distances is ready')
l1=[]
l2=[]
l3=[]
res=[]
Q=range(N)
# we need change 0 in diagonal matrix then we can found min value of matrix
maxi=M.max()
for i in range(N):
M[i,i]=maxi
# NNS algorithm
fv=True
while fv:
minim = M[Q[0], Q[1]]
iz = Q[0]
jz = Q[1]
for i in Q:
for j in Q:
if M[i, j] < minim:
iz = i
jz = j
minim = M[i, j]
l1 = []
l2 = [iz, jz]
l3 = []
f = True
while f:
for el in l2:
for i in Q:
if M[el, i] < dc:
if (i not in l1) and (i not in l2) and (i not in l3):
l3.append(i)
if not l3:
f = False
l1.extend(l2)
l2 = l3
l3 = []
res.append(l1)
Q = [el for el in Q if el not in l1]
if not Q:
fv = False
x2 = x.take(l1,0)
plot(x2[:,0], x2[:,1], '.')
grid()
show()
# savefig('NNS_cluster.png')
| StarcoderdataPython |
1623255 | # modified according to https://github.com/zhixuhao/unet
import keras
import numpy as np
import os
# import glob
import skimage.io as io
import skimage.transform as trans
from keras import backend as K
from keras.applications.vgg19 import VGG19
from keras.preprocessing import image
from keras.applications.vgg19 import preprocess_input
from keras.models import Model
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.preprocessing.image import ImageDataGenerator
def unet(pretrained_weights = None,input_size = (224,224,1)):
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model(input = inputs, output = conv10)
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
#model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
return model
def trainGenerator(batch_size,train_path,image_folder,mask_folder,aug_dict,image_color_mode = "grayscale",
mask_color_mode = "grayscale",image_save_prefix = "image",mask_save_prefix = "mask",
flag_multi_class = False,num_class = 2,save_to_dir = None,target_size = (224,224),seed = 1):
'''
can generate image and mask at the same time
use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same
if you want to visualize the results of generator, set save_to_dir = "your path"
'''
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(
train_path,
classes = [image_folder],
class_mode = None,
color_mode = image_color_mode,
target_size = target_size,
batch_size = batch_size,
save_to_dir = save_to_dir,
save_prefix = image_save_prefix,
seed = seed)
mask_generator = mask_datagen.flow_from_directory(
train_path,
classes = [mask_folder],
class_mode = None,
color_mode = mask_color_mode,
target_size = target_size,
batch_size = batch_size,
save_to_dir = save_to_dir,
save_prefix = mask_save_prefix,
seed = seed)
train_generator = zip(image_generator, mask_generator)
for (img,mask) in train_generator:
img,mask = adjustData(img,mask,flag_multi_class,num_class)
yield (img,mask)
def adjustData(img,mask,flag_multi_class,num_class):
if(flag_multi_class):
img = img / 255
mask = mask[:,:,:,0] if(len(mask.shape) == 4) else mask[:,:,0]
new_mask = np.zeros(mask.shape + (num_class,))
for i in range(num_class):
#for one pixel in the image, find the class in mask and convert it into one-hot vector
#index = np.where(mask == i)
#index_mask = (index[0],index[1],index[2],np.zeros(len(index[0]),dtype = np.int64) + i) if (len(mask.shape) == 4) else (index[0],index[1],np.zeros(len(index[0]),dtype = np.int64) + i)
#new_mask[index_mask] = 1
new_mask[mask == i,i] = 1
new_mask = np.reshape(new_mask,(new_mask.shape[0],new_mask.shape[1]*new_mask.shape[2],new_mask.shape[3])) if flag_multi_class else np.reshape(new_mask,(new_mask.shape[0]*new_mask.shape[1],new_mask.shape[2]))
mask = new_mask
elif(np.max(img) > 1):
img = img / 255
mask = mask /255
mask[mask > 0.5] = 1
mask[mask <= 0.5] = 0
return (img,mask)
def testGenerator(test_path,num_image = 30,target_size = (224,224),flag_multi_class = False,as_gray = True):
for i in range(588):
img = io.imread(os.path.join(test_path,"%d.png"%(i+0)),as_gray = as_gray)
img = img / 255
img = trans.resize(img,target_size)
img = np.reshape(img,img.shape+(1,)) if (not flag_multi_class) else img
img = np.reshape(img,(1,)+img.shape)
yield img
def labelVisualize(num_class,color_dict,img):
img = img[:,:,0] if len(img.shape) == 3 else img
img_out = np.zeros(img.shape + (3,))
for i in range(num_class):
img_out[img == i,:] = color_dict[i]
return img_out / 255
def saveResult(save_path,npyfile,flag_multi_class = False,num_class = 2):
for i,item in enumerate(npyfile):
img = labelVisualize(num_class,COLOR_DICT,item) if flag_multi_class else item[:,:,0]
io.imsave(os.path.join(save_path,"%d_predict.png"%(i+0)),img)
| StarcoderdataPython |
4881310 | import speech_recognition as sr
from TTSEngine import TTSEngine
def listen():
rec = sr.Recognizer()
mic = sr.Microphone()
rec.pause_threshold = 2
with mic as source:
print('Listening...')
audio = rec.listen(source)
try:
res = rec.recognize_google(audio, language='en-us')
return res
except sr.UnknownValueError as err:
return None
def wait_for(param, engine: TTSEngine):
engine.say(param)
res = listen()
while not res:
res = listen()
return res
| StarcoderdataPython |
1994695 | import re
from datetime import datetime
def parameters_in_request_json(request_json, parameter_list):
for parameter in parameter_list:
if parameter not in request_json:
return False
return True
def valid_user_params(request_json):
email_regex = re.compile("[^@]+@[^@]+\\.[^@]+")
password_regex = re.compile("^(?=.*\\d).{4,15}$")
if "email_address" in request_json:
if not email_regex.match(request_json["email_address"]):
return False
if "password" in request_json:
if not password_regex.match(request_json["password"]):
return False
return True
def validate_experience_params(request_json):
if "start_date" in request_json:
try:
datetime.strptime(request_json["start_date"], "%m-%d-%Y")
except ValueError:
return False
if "end_date" in request_json:
try:
datetime.strptime(request_json["end_date"], "%m-%d-%Y")
except ValueError:
return False
return True
| StarcoderdataPython |
4939657 | <reponame>check-spelling/drizzlepac
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 ai :
"""
Functions to compute and apply infinitesimal rotations to correct small shifts
and rotations in spherical coordinates
Uses formulae from `Budavari & Lubow (2012, ApJ, 761, 188) <http://adsabs.harvard.edu/abs/2012ApJ...761..188B>`_
Created by <NAME> from IDL versions, 2017 January 12
Added getxyshiftrot function for X,Y pixel coords, 2017 June 15
Path
----
HLApipeline/regression_testing/infrot.py
Dependencies
------------
None.
Inputs
------
None.
Classes and Functions
---------------------
"""
import numpy as np
def getxyshiftrot(x1,y1,x2,y2,xref=None,yref=None):
"""
Compute xshift, yshift, rotation to shift/rotate positions in
first set of arrays to match those in second set. Positions are in pixels for
an assumed small field.
Calls getinfrot to do the fit.
Usage: xshift, yshift, rotation = getxyshiftrot(x1,y1,x2,y2)
Important notes:
- Input arrays x1, y1, x2, and y2 positions are in pixels.
- Position lists must be matched with the same number of entries.
:param x1: Input array #1, X components.
:param y1: Input array #1, Y components.
:param x2: Input array #2, X components.
:param y2: Input array #2, Y components.
:param xref: Position of X reference pixel. If not specified, default value = mean(x1)
:param yref: Position of y reference pixel. If not specified, default value = mean(y1)
:type x1: numpy.ndarray
:type y1: numpy.ndarray
:type x2: numpy.ndarray
:type y2: numpy.ndarray
:type xref: float
:type yref: float
:return: X and Y shifts in pixels and rotation angle in degrees
"""
x1 = np.asarray(x1)
y1 = np.asarray(y1)
x2 = np.asarray(x2)
y2 = np.asarray(y2)
if x1.ndim != 1 or y1.ndim != 1 or x2.ndim != 1 or y2.ndim != 1:
raise ValueError("x1,y1,x2,y2 must be 1-D arrays")
n = x1.size
if y1.size != n or x2.size != n or y2.size != n:
raise ValueError("x1,y1,x2,y2 must be matching-length 1-D arrays")
# scale values to fake RA & Dec with range 0.05 degrees (approximate HST camera field)
if xref is None: xref = x1.mean()
if yref is None: yref = y1.mean()
pxsize = x1.max() - x1.min()
pysize = y1.max() - y1.min()
pscale = 0.05 / max(pxsize,pysize)
ra1 = (xref - x1) * pscale
ra2 = (xref - x2) * pscale
dec1 = (y1 - yref) * pscale
dec2 = (y2 - yref) * pscale
omega = getinfrot(ra1,dec1,ra2,dec2)
# convert to degrees
omega = omega * (180/np.pi)
xshift = -omega[2]/pscale
yshift = -omega[1]/pscale
rotation = omega[0]
return (xshift, yshift, rotation)
def getinfrot(ra1, dec1, ra2, dec2, radians=False):
"""
Compute 3-element infinitesimal rotation vector to shift/rotate positions in
first set of arrays to match those in second set.
Uses formulae from `Budavari & Lubow (2012, ApJ, 761, 188) <http://adsabs.harvard.edu/abs/2012ApJ...761..188B>`_
Usage: omega = getinfrot(ra1,dec1,ra2,dec2)
Important Notes:
- Input arrays ra1, dec1, ra2, and dec2 positions are in degrees
- Position lists must be matched with the same number of entries.
:param ra1: Input array #1, right ascension components.
:param dec1: Input array #1, declination components.
:param ra2: Input array #2, right ascension components.
:param dec2: Input array #2, declination components.
:param radians: If True, ra/dec values are in radians instead of degrees. Default value = False
:type ra1: numpy.ndarray
:type dec1: numpy.ndarray
:type ra2: numpy.ndarray
:type dec2: numpy.ndarray
:type radians: Boolean
:return: 3-element infinitesimal rotation vector omega such that r1(shifted) = r1 + cross(omega,r1)
"""
ra1 = np.asarray(ra1)
dec1 = np.asarray(dec1)
ra2 = np.asarray(ra2)
dec2 = np.asarray(dec2)
if ra1.ndim != 1 or dec1.ndim != 1 or ra2.ndim != 1 or dec2.ndim != 1:
raise ValueError("ra1,dec1,ra2,dec2 must be 1-D arrays")
n = ra1.size
if dec1.size != n or ra2.size != n or dec2.size != n:
raise ValueError("ra1,dec1,ra2,dec2 must be matching-length 1-D arrays")
c = radec2xyz(ra2,dec2,radians=radians)
r = radec2xyz(ra1,dec1,radians=radians)
b = np.cross(r,c)
a = -vdyadicp(r,r)
if b.ndim == 2:
b = b.sum(axis=0)
a = a.sum(axis=0)
np.fill_diagonal(a, a.diagonal() + n)
return np.dot(np.linalg.inv(a), b)
def applyomega(rain, decin, omega, radians=False):
"""
Apply the 3-element infinitesimal rotation vector computed by getinfrot()
to a set of RA and Dec positions.
Usage: raout, decout = applyomega(rain, decin, omega)
:param rain: Input right ascension positions in degrees (or radians)
:param decin: Input declination positions in degrees (or radians)
:param omega: 3-element infinitesimal rotation vector
:param radians: If True, ra/dec values are in radians instead of degrees. Default value = False
:type rain: numpy.ndarray
:type decin: numpy.ndarray
:type omega: numpy.ndarray
:type radians: Boolean
:return: the RA and Dec output positions in degrees (or radians)
"""
xyz = radec2xyz(rain,decin,radians=radians)
xyz += np.cross(omega,xyz)
raout, decout = xyz2radec(xyz, radians=radians)
return raout, decout
def radec2xyz(ra, dec, radians=False):
"""
Convert RA, Dec to Cartesian (x, y, z) coordinates
Usage: xyz = radec2xyz(ra, dec)
Important Notes:
- inputs *ra* and *dec* must match in shape
:param ra: Input right ascension positions in degrees
:param dec: Input declination positions in degrees
:param radians: If True, ra/dec values are in radians instead of degrees. Default value = False
:type ra: numpy.ndarray
:type dec: numpy.ndarray
:type radians: Boolean
:return: [\*,3] array with normalized cartesian coordinates
"""
ra = np.asarray(ra)
dec = np.asarray(dec)
s = ra.shape
if s != dec.shape:
raise ValueError("ra,dec must be same-shape arrays")
if not radians:
dtor = np.pi/180
ra = ra * dtor
dec = dec * dtor
c = np.empty(s + (3,), dtype=float)
cdec = np.cos(dec)
c[:,0] = np.cos(ra)*cdec
c[:,1] = np.sin(ra)*cdec
c[:,2] = np.sin(dec)
return c
def xyz2radec(xyz, radians=False):
"""
Convert Cartesian (x, y, z) coordinates to RA, Dec
Usage: ra, dec = xyz2radec(xyz)
:param xyz: [\*,3] array with normalized cartesian coordinates. May be multi-dimensional but last dimension must be 3, e.g., shape = (10,10,3).
:param radians: If True, ra/dec values are returned in radians instead of degrees. Default value = False
:type xyz: numpy.ndarray
:type radians: Boolean
:return: ra and dec arrays with normalized cartesian coordinates in degrees
"""
xyz = np.asarray(xyz)
s = xyz.shape
if s[-1] != 3:
raise ValueError('xyz last dimension must be 3')
# reshape to be a 2-D array [n,3]
n = xyz.size/3
c = np.reshape(xyz,(n,3))
# normalize to unity (for safety)
norm = np.sqrt((c**2).sum(axis=-1))
c = c/norm[:,None]
dec = np.arcsin(c[:,2])
ra = np.arctan2(c[:,1],c[:,0])
# force into range 0 to 2*pi
w = np.where(ra < 0)
ra[w] = ra[w] + 2*np.pi
if not radians:
# convert to degrees
radeg = 180/np.pi
ra = ra * radeg
dec = dec * radeg
# reshape using original dimensions of xyz
ra = ra.reshape(s[:-1])
dec = dec.reshape(s[:-1])
return (ra, dec)
def vdyadicp(a, b):
"""
Return dyadic product of arrays a, b
:param a: [\*,3] input array. May be multidimensional (e.g. 5,4,3) as long as last dimension is 3.
:param b: [\*,3] input array. May be multidimensional (e.g. 5,4,3) as long as last dimension is 3.
:type a: numpy.ndarray
:type b: numpy.ndarray
:return: [\*, 3, 3] array of outer products
"""
a = np.asarray(a)
b = np.asarray(b)
s = a.shape
if s[-1] != 3:
raise ValueError('a,b last dimension must be 3')
if s != b.shape:
raise ValueError('a,b must have same shape [...,3]')
return np.einsum('...i,...j->...ij', a, b)
if __name__ == "__main__":
print('='*40)
ra1 = np.array([1.0, 1.1, 1.2, 1.3])
dec1 = 45.0 + np.zeros(ra1.size,dtype=float)
ra2 = ra1
dec2 = dec1 + (ra1-ra1[0])*1.e-3
omega = getinfrot(ra1, dec1, ra2, dec2)
print('ra1 ',ra1)
print('dec1 ',dec1)
print('omega',omega)
print('ra2 ',ra2)
print('dec2 ',dec2)
print('rotated values')
ra1r, dec1r = applyomega(ra1,dec1,omega)
print('ra1r ',ra1r)
print('dec1r',dec1r)
print('check')
print('ra diff ', ra1r-ra2)
print('dec diff', dec1r-dec2)
print('='*40)
ra1 = np.array([1.0, 1.1, 1.2, 1.3])
dec1 = 45.0 + np.zeros(ra1.size,dtype=float)
ra2 = ra1
dec2 = dec1 + 1.e-3
omega = getinfrot(ra1, dec1, ra2, dec2)
print('ra1 ',ra1)
print('dec1 ',dec1)
print('omega',omega)
print('ra2 ',ra2)
print('dec2 ',dec2)
print('rotated values')
ra1r, dec1r = applyomega(ra1,dec1,omega)
print('ra1r ',ra1r)
print('dec1r',dec1r)
print('ra diff ', ra1r-ra2)
print('dec diff', dec1r-dec2)
| StarcoderdataPython |
6667805 | # -*- coding: utf-8 -*-
import re
import time
__all__ = [
'EventPredictor'
]
class EventPredictor:
"""
Class to classify a new audio signal and determine if it's a baby cry
"""
def __init__(self, model):
self.model = model
def classify(self, new_signal):
"""
Make prediction with trained model
:param new_signal: 1d array, 34 features
:return: feature label string
"""
t1 = time.time()
category = self.model.predict(new_signal)
#------------<<<
print 'predict time cost:',time.time()-t1
t1 = time.time()
#------------>>>
# print 'baby_cry_predictor category:',category
# category is an array of the kind array(['004 - Baby cry'], dtype=object)
return category
| StarcoderdataPython |
9613340 | metros = float(input('Digite um valor em metros: '))
centimetros = metros * 100
milimetros = metros * 1000
decimetros = metros * 10
decametro = metros / 10
hectometro = metros / 100
kilometro = metros / 1000
print('A conversão de metro para CM foi {} \n e MM foi {} \n em DM foi {} \n em DAM foi {} \n em HM {} \n e por fim em KM {}'.format(centimetros, milimetros, decimetros, decametro, hectometro, kilometro))
| StarcoderdataPython |
5054401 |
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
from scipy.fftpack import dct
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
import librosa
import pickle
from PixelShift.explore_data import PixelShiftSound
# SVM libraies
from sklearn import svm
from sklearn import preprocessing
import sklearn.metrics
import pickle
'''可视化: 混淆矩阵'''
def plot_confusion_matrix(cm, labels_name, title,figname):
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # 归一化
plt.imshow(cm, interpolation='nearest') # 在特定的窗口上显示图像
plt.title(title) # 图像标题
plt.colorbar()
num_local = np.array(range(len(labels_name)))
plt.xticks(num_local, labels_name, rotation=90) # 将标签印在x轴坐标上
plt.yticks(num_local, labels_name) # 将标签印在y轴坐标上
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(figname, format='png')
plt.show()
'''预加重'''
# 首先对数据进行预加重
def pre_emphasis_func(signal):
pre_emphasis = 0.97
emphasized_signal = np.append(signal[0], signal[1:] - pre_emphasis * signal[:-1])
return emphasized_signal
'''窗口化'''
# 让每一帧的2边平滑衰减,这样可以降低后续傅里叶变换后旁瓣的强度,取得更高质量的频谱。
def Windowing(frames,frame_length):
hamming = np.hamming(frame_length)
# hamming = 0.54 - 0.46 * np.cos(2 * np.pi * np.arange(0, frame_length) / (frame_length - 1))
windowed_frames =frames*hamming
return windowed_frames
''' 傅里叶变换'''
# 对每一帧的信号,进行快速傅里叶变换,对于每一帧的加窗信号,进行N点FFT变换,也称短时傅里叶变换(STFT),N通常取256或512,然后用如下的公式计算能量谱
def FFT(frames,NFFT):
NFFT = 512
mag_frames = np.absolute(np.fft.rfft(frames, NFFT))
pow_frames = ((1.0 / NFFT) * (mag_frames ** 2))
print(pow_frames.shape)
return pow_frames
'''fank特征 40 dim'''
def get_fBank(powd_frames,sameple_rate,NFFT,nfilt):
'''
:param frames: Frames after NFFT
:param sameple_rate: 采样率
:param nift: 规定有多少个mel滤波器
:return: FBank Features
'''
''' 规定mel值的上限和下限'''
low_freq_mel = 0
# 根据葵姐斯特采样定理可得
high_freq_mel = 2595 * np.log10(1 + (sameple_rate / 2) / 700)
# 所有的mel中心点,为了方便后面计算mel滤波器组,左右两边各补一个中心点
mel_points = np.linspace(low_freq_mel, high_freq_mel, nfilt + 2)
hz_points = 700 * (10 ** (mel_points / 2595) - 1)
# 各个mel滤波器在能量谱对应点的取值
fbank = np.zeros((nfilt, int(NFFT / 2 + 1)))
# 各个mel滤波器中心点对应FFT的区域编码,找到有值的位置
bin = (hz_points / (sameple_rate / 2)) * (NFFT / 2)
for i in range(1, nfilt + 1):
left = int(bin[i - 1])
center = int(bin[i])
right = int(bin[i + 1])
for j in range(left, center):
fbank[i - 1, j + 1] = (j + 1 - bin[i - 1]) / (bin[i] - bin[i - 1])
for j in range(center, right):
fbank[i - 1, j + 1] = (bin[i + 1] - (j + 1)) / (bin[i + 1] - bin[i])
filter_banks = np.dot(powd_frames, fbank.T)
filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks)
filter_banks = 20 * np.log10(filter_banks) # dB
print(filter_banks.shape)
return fbank,filter_banks
''' 获取MFCC特征'''
def get_mfcc_features(num_ceps,filter_banks,lifted=False,cep_lifter=23):
# 使用DCT,提取2-13维,得到MFCC特征
num_ceps = 12
mfcc = dct(filter_banks, type=2, axis=1, norm='ortho')[:, 1:(num_ceps + 1)]
if (lifted):
# 对Mfcc进行升弦,平滑这个特征
cep_lifter = 23
(nframes, ncoeff) = mfcc.shape
n = np.arange(ncoeff)
lift = 1 + (cep_lifter / 2) * np.sin(np.pi * n / cep_lifter)
mfcc *= lift
return mfcc
if __name__ == "__main__":
'''建立标签index与Viseme的映射关系'''
category_mapping ={0:"sil",1:"PP",2:"FF",3:"TH",4:"DD",5:"kk",6:"CH",7:"SS",8:"nn",9:"RR",
10:"aa",11:"E",12:"ih",13:"oh",14:"ou"}
'''得到数据集中的音频域与标签数据,并将其转换为mfcc特征'''
ps = PixelShiftSound(sample_rate=16000,frame_duration=0.016,frame_shift_duration=0.008,datatype=2)
wav_data,wav_label = ps.get_all_wav_data()
nums_of_data = wav_data.shape[0]
print("Wav Frame data:",wav_data.shape)
print("Wav Frame label:",wav_label.shape)
for i in range(len(wav_data)):
# 预加重和窗口化处理
wav_data[i] = pre_emphasis_func(wav_data[i])
wav_data = Windowing(wav_data,len(wav_data[0]))
# 对每一帧进行傅里叶变换
fft_data = FFT(wav_data,512)
print("Wav Frame data After FFT:",wav_data.shape)
fbank,filter_banks=get_fBank(fft_data,16000,512,40)
print("Wav Frame data After FBanks:",wav_data.shape)
# 到这里都是OK的
mfcc_data = get_mfcc_features(num_ceps=12,filter_banks=filter_banks,
lifted=True)
print("Wav Frams's MFCC features",mfcc_data.shape)
label_data_max = np.zeros(nums_of_data)
# 方法一: 当sil 不为1的时候,去掉sil取第二大作为分类结果
'''得到label的特征,取最大值的下标作为index'''
cnt = 0
for index,vector in enumerate(wav_label):
lst = vector.tolist()
if (lst[0]==1):
max_index =0
label_data_max[index] = max_index
else:
list_a_max_list = max(lst[1:]) #返回最大值
max_index = lst.index(list_a_max_list) # 返回最大值的索引
label_data_max[index] = max_index
if max_index!=0:
cnt =cnt+1
print("取最大值后的wav label data:", label_data_max.shape)
''' 准备利用SVM进行分类 '''
# 首先对数据进行Normalization
trainig_data = preprocessing.scale(mfcc_data)
# 对数据进行shuffer操作,混乱化
state = np.random.get_state()
np.random.shuffle(trainig_data)
np.random.set_state(state)
np.random.shuffle(label_data_max)
# Save into a pickle file
dct={"MFCC":trainig_data,"labels":label_data_max}
with open("mfcc15_UNDO",'wb') as f1:
pickle.dump(dct,f1)
print("DONE")
# # 建立一个SVM classifier
# # Using RBF as Kernel Functions
# print("Start Training")
# classifier =svm.SVC(C=2,kernel='rbf',gamma='auto',decision_function_shape='ovr')
# classifier.fit(trainig_data,label_data_max.ravel())
# print("Training is Done!")
# # Metrics
# print("Training accuracy is ",classifier.score(trainig_data,label_data_max))
| StarcoderdataPython |
4986206 | <gh_stars>0
import logging
from typing import Tuple
import sqlalchemy as sa
from aiohttp import web
from psycopg2 import Error as DbApiError
from tenacity import retry
from servicelib.aiopg_utils import PostgresRetryPolicyUponOperation
from .models import tokens
from .settings import APP_CONFIG_KEY, APP_DB_ENGINE_KEY
log = logging.getLogger(__name__)
@retry(**PostgresRetryPolicyUponOperation(log).kwargs)
async def _get_tokens_from_db(engine, userid):
async with engine.acquire() as conn:
stmt = sa.select([tokens,]).where(tokens.c.user_id == userid)
result = await conn.execute(stmt)
row = await result.first()
data = dict(row) if row else {}
return data
async def get_api_token_and_secret(request: web.Request, userid) -> Tuple[str, str]:
# FIXME: this is a temporary solution. This information should be sent in some form
# from the client side together with the userid?
engine = request.app.get(APP_DB_ENGINE_KEY, None)
# defaults from config if any, othewise None
defaults = request.app[APP_CONFIG_KEY].get("test_datcore", {})
api_token, api_secret = defaults.get("api_token"), defaults.get("api_secret")
if engine:
try:
data = await _get_tokens_from_db(engine, userid)
except DbApiError:
# NOTE this shall not log as error since is a possible outcome with an alternative
log.warning(
"Cannot retrieve tokens for user %s in pgdb %s",
userid,
engine,
exc_info=True,
)
else:
data = data.get("token_data", {})
api_token = data.get("token_key", api_token)
api_secret = data.get("token_secret", api_secret)
return api_token, api_secret
| StarcoderdataPython |
1692589 | <filename>example/diff_imports/import_from_module.py
import time
def do_some_stuff():
time.sleep(1)
| StarcoderdataPython |
5095814 | import time
import collections
import Qt
from Qt import QtWidgets, QtCore, QtGui
import qtawesome
from openpype.style import (
get_objected_colors,
get_default_tools_icon_color,
)
from openpype.tools.flickcharm import FlickCharm
from .views import (
TreeViewSpinner,
DeselectableTreeView
)
from .widgets import PlaceholderLineEdit
from .models import RecursiveSortFilterProxyModel
from .lib import (
DynamicQThread,
get_asset_icon
)
if Qt.__binding__ == "PySide":
from PySide.QtGui import QStyleOptionViewItemV4
elif Qt.__binding__ == "PyQt4":
from PyQt4.QtGui import QStyleOptionViewItemV4
ASSET_ID_ROLE = QtCore.Qt.UserRole + 1
ASSET_NAME_ROLE = QtCore.Qt.UserRole + 2
ASSET_LABEL_ROLE = QtCore.Qt.UserRole + 3
ASSET_UNDERLINE_COLORS_ROLE = QtCore.Qt.UserRole + 4
class AssetsView(TreeViewSpinner, DeselectableTreeView):
"""Asset items view.
Adds abilities to deselect, show loading spinner and add flick charm
(scroll by mouse/touchpad click and move).
"""
def __init__(self, parent=None):
super(AssetsView, self).__init__(parent)
self.setIndentation(15)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.setHeaderHidden(True)
self._flick_charm_activated = False
self._flick_charm = FlickCharm(parent=self)
self._before_flick_scroll_mode = None
def activate_flick_charm(self):
if self._flick_charm_activated:
return
self._flick_charm_activated = True
self._before_flick_scroll_mode = self.verticalScrollMode()
self._flick_charm.activateOn(self)
self.setVerticalScrollMode(self.ScrollPerPixel)
def deactivate_flick_charm(self):
if not self._flick_charm_activated:
return
self._flick_charm_activated = False
self._flick_charm.deactivateFrom(self)
if self._before_flick_scroll_mode is not None:
self.setVerticalScrollMode(self._before_flick_scroll_mode)
def mousePressEvent(self, event):
index = self.indexAt(event.pos())
if not index.isValid():
modifiers = QtWidgets.QApplication.keyboardModifiers()
if modifiers == QtCore.Qt.ShiftModifier:
return
elif modifiers == QtCore.Qt.ControlModifier:
return
super(AssetsView, self).mousePressEvent(event)
def set_loading_state(self, loading, empty):
"""Change loading state.
TODO: Separate into 2 individual methods.
Args:
loading(bool): Is loading.
empty(bool): Is model empty.
"""
if self.is_loading != loading:
if loading:
self.spinner.repaintNeeded.connect(
self.viewport().update
)
else:
self.spinner.repaintNeeded.disconnect()
self.viewport().update()
self.is_loading = loading
self.is_empty = empty
class UnderlinesAssetDelegate(QtWidgets.QItemDelegate):
"""Item delegate drawing bars under asset name.
This is used in loader and library loader tools. Multiselection of assets
may group subsets by name under colored groups. Selected color groups are
then propagated back to selected assets as underlines.
"""
bar_height = 3
def __init__(self, *args, **kwargs):
super(UnderlinesAssetDelegate, self).__init__(*args, **kwargs)
asset_view_colors = get_objected_colors()["loader"]["asset-view"]
self._selected_color = (
asset_view_colors["selected"].get_qcolor()
)
self._hover_color = (
asset_view_colors["hover"].get_qcolor()
)
self._selected_hover_color = (
asset_view_colors["selected-hover"].get_qcolor()
)
def sizeHint(self, option, index):
"""Add bar height to size hint."""
result = super(UnderlinesAssetDelegate, self).sizeHint(option, index)
height = result.height()
result.setHeight(height + self.bar_height)
return result
def paint(self, painter, option, index):
"""Replicate painting of an item and draw color bars if needed."""
# Qt4 compat
if Qt.__binding__ in ("PySide", "PyQt4"):
option = QStyleOptionViewItemV4(option)
painter.save()
item_rect = QtCore.QRect(option.rect)
item_rect.setHeight(option.rect.height() - self.bar_height)
subset_colors = index.data(ASSET_UNDERLINE_COLORS_ROLE) or []
subset_colors_width = 0
if subset_colors:
subset_colors_width = option.rect.width() / len(subset_colors)
subset_rects = []
counter = 0
for subset_c in subset_colors:
new_color = None
new_rect = None
if subset_c:
new_color = QtGui.QColor(*subset_c)
new_rect = QtCore.QRect(
option.rect.left() + (counter * subset_colors_width),
option.rect.top() + (
option.rect.height() - self.bar_height
),
subset_colors_width,
self.bar_height
)
subset_rects.append((new_color, new_rect))
counter += 1
# Background
if option.state & QtWidgets.QStyle.State_Selected:
if len(subset_colors) == 0:
item_rect.setTop(item_rect.top() + (self.bar_height / 2))
if option.state & QtWidgets.QStyle.State_MouseOver:
bg_color = self._selected_hover_color
else:
bg_color = self._selected_color
else:
item_rect.setTop(item_rect.top() + (self.bar_height / 2))
if option.state & QtWidgets.QStyle.State_MouseOver:
bg_color = self._hover_color
else:
bg_color = QtGui.QColor()
bg_color.setAlpha(0)
# When not needed to do a rounded corners (easier and without
# painter restore):
painter.fillRect(
option.rect,
QtGui.QBrush(bg_color)
)
if option.state & QtWidgets.QStyle.State_Selected:
for color, subset_rect in subset_rects:
if not color or not subset_rect:
continue
painter.fillRect(subset_rect, QtGui.QBrush(color))
# Icon
icon_index = index.model().index(
index.row(), index.column(), index.parent()
)
# - Default icon_rect if not icon
icon_rect = QtCore.QRect(
item_rect.left(),
item_rect.top(),
# To make sure it's same size all the time
option.rect.height() - self.bar_height,
option.rect.height() - self.bar_height
)
icon = index.model().data(icon_index, QtCore.Qt.DecorationRole)
if icon:
mode = QtGui.QIcon.Normal
if not (option.state & QtWidgets.QStyle.State_Enabled):
mode = QtGui.QIcon.Disabled
elif option.state & QtWidgets.QStyle.State_Selected:
mode = QtGui.QIcon.Selected
if isinstance(icon, QtGui.QPixmap):
icon = QtGui.QIcon(icon)
option.decorationSize = icon.size() / icon.devicePixelRatio()
elif isinstance(icon, QtGui.QColor):
pixmap = QtGui.QPixmap(option.decorationSize)
pixmap.fill(icon)
icon = QtGui.QIcon(pixmap)
elif isinstance(icon, QtGui.QImage):
icon = QtGui.QIcon(QtGui.QPixmap.fromImage(icon))
option.decorationSize = icon.size() / icon.devicePixelRatio()
elif isinstance(icon, QtGui.QIcon):
state = QtGui.QIcon.Off
if option.state & QtWidgets.QStyle.State_Open:
state = QtGui.QIcon.On
actual_size = option.icon.actualSize(
option.decorationSize, mode, state
)
option.decorationSize = QtCore.QSize(
min(option.decorationSize.width(), actual_size.width()),
min(option.decorationSize.height(), actual_size.height())
)
state = QtGui.QIcon.Off
if option.state & QtWidgets.QStyle.State_Open:
state = QtGui.QIcon.On
icon.paint(
painter, icon_rect,
QtCore.Qt.AlignLeft, mode, state
)
# Text
text_rect = QtCore.QRect(
icon_rect.left() + icon_rect.width() + 2,
item_rect.top(),
item_rect.width(),
item_rect.height()
)
painter.drawText(
text_rect, QtCore.Qt.AlignVCenter,
index.data(QtCore.Qt.DisplayRole)
)
painter.restore()
class AssetModel(QtGui.QStandardItemModel):
"""A model listing assets in the active project.
The assets are displayed in a treeview, they are visually parented by
a `visualParent` field in the database containing an `_id` to a parent
asset.
Asset document may have defined label, icon or icon color.
Loading of data for model happens in thread which means that refresh
is not sequential. When refresh is triggered it is required to listen for
'refreshed' signal.
Args:
dbcon (AvalonMongoDB): Ready to use connection to mongo with.
parent (QObject): Parent Qt object.
"""
_doc_fetched = QtCore.Signal()
refreshed = QtCore.Signal(bool)
# Asset document projection
_asset_projection = {
"name": 1,
"parent": 1,
"data.visualParent": 1,
"data.label": 1,
"data.icon": 1,
"data.color": 1
}
def __init__(self, dbcon, parent=None):
super(AssetModel, self).__init__(parent=parent)
self.dbcon = dbcon
self._refreshing = False
self._doc_fetching_thread = None
self._doc_fetching_stop = False
self._doc_payload = []
self._doc_fetched.connect(self._on_docs_fetched)
self._item_ids_with_color = set()
self._items_by_asset_id = {}
self._last_project_name = None
@property
def refreshing(self):
return self._refreshing
def get_index_by_asset_id(self, asset_id):
item = self._items_by_asset_id.get(asset_id)
if item is not None:
return item.index()
return QtCore.QModelIndex()
def get_indexes_by_asset_ids(self, asset_ids):
return [
self.get_index_by_asset_id(asset_id)
for asset_id in asset_ids
]
def get_index_by_asset_name(self, asset_name):
indexes = self.get_indexes_by_asset_names([asset_name])
for index in indexes:
if index.isValid():
return index
return indexes[0]
def get_indexes_by_asset_names(self, asset_names):
asset_ids_by_name = {
asset_name: None
for asset_name in asset_names
}
for asset_id, item in self._items_by_asset_id.items():
asset_name = item.data(ASSET_NAME_ROLE)
if asset_name in asset_ids_by_name:
asset_ids_by_name[asset_name] = asset_id
asset_ids = [
asset_ids_by_name[asset_name]
for asset_name in asset_names
]
return self.get_indexes_by_asset_ids(asset_ids)
def refresh(self, force=False):
"""Refresh the data for the model.
Args:
force (bool): Stop currently running refresh start new refresh.
"""
# Skip fetch if there is already other thread fetching documents
if self._refreshing:
if not force:
return
self.stop_refresh()
project_name = self.dbcon.Session.get("AVALON_PROJECT")
clear_model = False
if project_name != self._last_project_name:
clear_model = True
self._last_project_name = project_name
if clear_model:
self._clear_items()
# Fetch documents from mongo
# Restart payload
self._refreshing = True
self._doc_payload = []
self._doc_fetching_thread = DynamicQThread(self._threaded_fetch)
self._doc_fetching_thread.start()
def stop_refresh(self):
self._stop_fetch_thread()
def clear_underlines(self):
for asset_id in set(self._item_ids_with_color):
self._item_ids_with_color.remove(asset_id)
item = self._items_by_asset_id.get(asset_id)
if item is not None:
item.setData(None, ASSET_UNDERLINE_COLORS_ROLE)
def set_underline_colors(self, colors_by_asset_id):
self.clear_underlines()
for asset_id, colors in colors_by_asset_id.items():
item = self._items_by_asset_id.get(asset_id)
if item is None:
continue
item.setData(colors, ASSET_UNDERLINE_COLORS_ROLE)
self._item_ids_with_color.add(asset_id)
def _clear_items(self):
root_item = self.invisibleRootItem()
root_item.removeRows(0, root_item.rowCount())
self._items_by_asset_id = {}
self._item_ids_with_color = set()
def _on_docs_fetched(self):
# Make sure refreshing did not change
# - since this line is refreshing sequential and
# triggering of new refresh will happen when this method is done
if not self._refreshing:
self._clear_items()
return
self._fill_assets(self._doc_payload)
self.refreshed.emit(bool(self._items_by_asset_id))
self._stop_fetch_thread()
def _fill_assets(self, asset_docs):
# Collect asset documents as needed
asset_ids = set()
asset_docs_by_id = {}
asset_ids_by_parents = collections.defaultdict(set)
for asset_doc in asset_docs:
asset_id = asset_doc["_id"]
asset_data = asset_doc.get("data") or {}
parent_id = asset_data.get("visualParent")
asset_ids.add(asset_id)
asset_docs_by_id[asset_id] = asset_doc
asset_ids_by_parents[parent_id].add(asset_id)
# Prepare removed asset ids
removed_asset_ids = (
set(self._items_by_asset_id.keys()) - set(asset_docs_by_id.keys())
)
# Prepare queue for adding new items
asset_items_queue = collections.deque()
# Queue starts with root item and 'visualParent' None
root_item = self.invisibleRootItem()
asset_items_queue.append((None, root_item))
while asset_items_queue:
# Get item from queue
parent_id, parent_item = asset_items_queue.popleft()
# Skip if there are no children
children_ids = asset_ids_by_parents[parent_id]
# Go through current children of parent item
# - find out items that were deleted and skip creation of already
# existing items
for row in reversed(range(parent_item.rowCount())):
child_item = parent_item.child(row, 0)
asset_id = child_item.data(ASSET_ID_ROLE)
# Remove item that is not available
if asset_id not in children_ids:
if asset_id in removed_asset_ids:
# Remove and destroy row
parent_item.removeRow(row)
else:
# Just take the row from parent without destroying
parent_item.takeRow(row)
continue
# Remove asset id from `children_ids` set
# - is used as set for creation of "new items"
children_ids.remove(asset_id)
# Add existing children to queue
asset_items_queue.append((asset_id, child_item))
new_items = []
for asset_id in children_ids:
# Look for item in cache (maybe parent changed)
item = self._items_by_asset_id.get(asset_id)
# Create new item if was not found
if item is None:
item = QtGui.QStandardItem()
item.setEditable(False)
item.setData(asset_id, ASSET_ID_ROLE)
self._items_by_asset_id[asset_id] = item
new_items.append(item)
# Add item to queue
asset_items_queue.append((asset_id, item))
if new_items:
parent_item.appendRows(new_items)
# Remove cache of removed items
for asset_id in removed_asset_ids:
self._items_by_asset_id.pop(asset_id)
# Refresh data
# - all items refresh all data except id
for asset_id, item in self._items_by_asset_id.items():
asset_doc = asset_docs_by_id[asset_id]
asset_name = asset_doc["name"]
if item.data(ASSET_NAME_ROLE) != asset_name:
item.setData(asset_name, ASSET_NAME_ROLE)
asset_data = asset_doc.get("data") or {}
asset_label = asset_data.get("label") or asset_name
if item.data(ASSET_LABEL_ROLE) != asset_label:
item.setData(asset_label, QtCore.Qt.DisplayRole)
item.setData(asset_label, ASSET_LABEL_ROLE)
has_children = item.rowCount() > 0
icon = get_asset_icon(asset_doc, has_children)
item.setData(icon, QtCore.Qt.DecorationRole)
def _threaded_fetch(self):
asset_docs = self._fetch_asset_docs()
if not self._refreshing:
return
self._doc_payload = asset_docs
# Emit doc fetched only if was not stopped
self._doc_fetched.emit()
def _fetch_asset_docs(self):
if not self.dbcon.Session.get("AVALON_PROJECT"):
return []
project_doc = self.dbcon.find_one(
{"type": "project"},
{"_id": True}
)
if not project_doc:
return []
# Get all assets sorted by name
return list(self.dbcon.find(
{"type": "asset"},
self._asset_projection
))
def _stop_fetch_thread(self):
self._refreshing = False
if self._doc_fetching_thread is not None:
while self._doc_fetching_thread.isRunning():
time.sleep(0.01)
self._doc_fetching_thread = None
class AssetsWidget(QtWidgets.QWidget):
"""Base widget to display a tree of assets with filter.
Assets have only one column and are sorted by name.
Refreshing of assets happens in thread so calling 'refresh' method
is not sequential. To capture moment when refreshing is finished listen
to 'refreshed' signal.
To capture selection changes listen to 'selection_changed' signal. It won't
send any information about new selection as it may be different based on
inheritance changes.
Args:
dbcon (AvalonMongoDB): Connection to avalon mongo db.
parent (QWidget): Parent Qt widget.
"""
# on model refresh
refresh_triggered = QtCore.Signal()
refreshed = QtCore.Signal()
# on view selection change
selection_changed = QtCore.Signal()
# It was double clicked on view
double_clicked = QtCore.Signal()
def __init__(self, dbcon, parent=None):
super(AssetsWidget, self).__init__(parent=parent)
self.dbcon = dbcon
# Tree View
model = self._create_source_model()
proxy = self._create_proxy_model(model)
view = AssetsView(self)
view.setModel(proxy)
header_widget = QtWidgets.QWidget(self)
current_asset_icon = qtawesome.icon(
"fa.arrow-down", color=get_default_tools_icon_color()
)
current_asset_btn = QtWidgets.QPushButton(header_widget)
current_asset_btn.setIcon(current_asset_icon)
current_asset_btn.setToolTip("Go to Asset from current Session")
# Hide by default
current_asset_btn.setVisible(False)
refresh_icon = qtawesome.icon(
"fa.refresh", color=get_default_tools_icon_color()
)
refresh_btn = QtWidgets.QPushButton(header_widget)
refresh_btn.setIcon(refresh_icon)
refresh_btn.setToolTip("Refresh items")
filter_input = PlaceholderLineEdit(header_widget)
filter_input.setPlaceholderText("Filter assets..")
# Header
header_layout = QtWidgets.QHBoxLayout(header_widget)
header_layout.setContentsMargins(0, 0, 0, 0)
header_layout.addWidget(filter_input)
header_layout.addWidget(current_asset_btn)
header_layout.addWidget(refresh_btn)
# Make header widgets expand vertically if there is a place
for widget in (
current_asset_btn,
refresh_btn,
filter_input,
):
size_policy = widget.sizePolicy()
size_policy.setVerticalPolicy(size_policy.MinimumExpanding)
widget.setSizePolicy(size_policy)
# Layout
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(header_widget, 0)
layout.addWidget(view, 1)
# Signals/Slots
filter_input.textChanged.connect(self._on_filter_text_change)
selection_model = view.selectionModel()
selection_model.selectionChanged.connect(self._on_selection_change)
refresh_btn.clicked.connect(self.refresh)
current_asset_btn.clicked.connect(self._on_current_asset_click)
view.doubleClicked.connect(self.double_clicked)
self._header_widget = header_widget
self._filter_input = filter_input
self._refresh_btn = refresh_btn
self._current_asset_btn = current_asset_btn
self._model = model
self._proxy = proxy
self._view = view
self._last_project_name = None
self._last_btns_height = None
self.model_selection = {}
@property
def header_widget(self):
return self._header_widget
def _create_source_model(self):
model = AssetModel(dbcon=self.dbcon, parent=self)
model.refreshed.connect(self._on_model_refresh)
return model
def _create_proxy_model(self, source_model):
proxy = RecursiveSortFilterProxyModel()
proxy.setSourceModel(source_model)
proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
proxy.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)
return proxy
@property
def refreshing(self):
return self._model.refreshing
def refresh(self):
self._refresh_model()
def stop_refresh(self):
self._model.stop_refresh()
def _get_current_session_asset(self):
return self.dbcon.Session.get("AVALON_ASSET")
def _on_current_asset_click(self):
"""Trigger change of asset to current context asset.
This separation gives ability to override this method and use it
in differnt way.
"""
self.set_current_session_asset()
def set_current_session_asset(self):
asset_name = self._get_current_session_asset()
if asset_name:
self.select_asset_by_name(asset_name)
def set_refresh_btn_visibility(self, visible=None):
"""Hide set refresh button.
Some tools may have their global refresh button or do not support
refresh at all.
"""
if visible is None:
visible = not self._refresh_btn.isVisible()
self._refresh_btn.setVisible(visible)
def set_current_asset_btn_visibility(self, visible=None):
"""Hide set current asset button.
Not all tools support using of current context asset.
"""
if visible is None:
visible = not self._current_asset_btn.isVisible()
self._current_asset_btn.setVisible(visible)
def select_asset(self, asset_id):
index = self._model.get_index_by_asset_id(asset_id)
new_index = self._proxy.mapFromSource(index)
self._select_indexes([new_index])
def select_asset_by_name(self, asset_name):
index = self._model.get_index_by_asset_name(asset_name)
new_index = self._proxy.mapFromSource(index)
self._select_indexes([new_index])
def activate_flick_charm(self):
self._view.activate_flick_charm()
def deactivate_flick_charm(self):
self._view.deactivate_flick_charm()
def _on_selection_change(self):
self.selection_changed.emit()
def _on_filter_text_change(self, new_text):
self._proxy.setFilterFixedString(new_text)
def _on_model_refresh(self, has_item):
"""This method should be triggered on model refresh.
Default implementation register this callback in '_create_source_model'
so if you're modifying model keep in mind that this method should be
called when refresh is done.
"""
self._proxy.sort(0)
self._set_loading_state(loading=False, empty=not has_item)
self.refreshed.emit()
def _refresh_model(self):
# Store selection
self._set_loading_state(loading=True, empty=True)
# Trigger signal before refresh is called
self.refresh_triggered.emit()
# Refresh model
self._model.refresh()
def _set_loading_state(self, loading, empty):
self._view.set_loading_state(loading, empty)
def _clear_selection(self):
selection_model = self._view.selectionModel()
selection_model.clearSelection()
def _select_indexes(self, indexes):
valid_indexes = [
index
for index in indexes
if index.isValid()
]
if not valid_indexes:
return
selection_model = self._view.selectionModel()
selection_model.clearSelection()
mode = selection_model.Select | selection_model.Rows
for index in valid_indexes:
self._view.expand(self._proxy.parent(index))
selection_model.select(index, mode)
self._view.setCurrentIndex(valid_indexes[0])
class SingleSelectAssetsWidget(AssetsWidget):
"""Single selection asset widget.
Contain single selection specific api methods.
"""
def get_selected_asset_id(self):
"""Currently selected asset id."""
selection_model = self._view.selectionModel()
indexes = selection_model.selectedRows()
for index in indexes:
return index.data(ASSET_ID_ROLE)
return None
def get_selected_asset_name(self):
"""Currently selected asset name."""
selection_model = self._view.selectionModel()
indexes = selection_model.selectedRows()
for index in indexes:
return index.data(ASSET_NAME_ROLE)
return None
class MultiSelectAssetsWidget(AssetsWidget):
"""Multiselection asset widget.
Main purpose is for loader and library loader. If another tool would use
multiselection assets this widget should be split and loader's logic
separated.
"""
def __init__(self, *args, **kwargs):
super(MultiSelectAssetsWidget, self).__init__(*args, **kwargs)
self._view.setSelectionMode(QtWidgets.QTreeView.ExtendedSelection)
delegate = UnderlinesAssetDelegate()
self._view.setItemDelegate(delegate)
self._delegate = delegate
def get_selected_asset_ids(self):
"""Currently selected asset ids."""
selection_model = self._view.selectionModel()
indexes = selection_model.selectedRows()
return [
index.data(ASSET_ID_ROLE)
for index in indexes
]
def get_selected_asset_names(self):
"""Currently selected asset names."""
selection_model = self._view.selectionModel()
indexes = selection_model.selectedRows()
return [
index.data(ASSET_NAME_ROLE)
for index in indexes
]
def select_assets(self, asset_ids):
"""Select assets by their ids.
Args:
asset_ids (list): List of asset ids.
"""
indexes = self._model.get_indexes_by_asset_ids(asset_ids)
new_indexes = [
self._proxy.mapFromSource(index)
for index in indexes
]
self._select_indexes(new_indexes)
def select_assets_by_name(self, asset_names):
"""Select assets by their names.
Args:
asset_names (list): List of asset names.
"""
indexes = self._model.get_indexes_by_asset_names(asset_names)
new_indexes = [
self._proxy.mapFromSource(index)
for index in indexes
]
self._select_indexes(new_indexes)
def clear_underlines(self):
"""Clear underlines in asset items."""
self._model.clear_underlines()
self._view.updateGeometries()
def set_underline_colors(self, colors_by_asset_id):
"""Change underline colors for passed assets.
Args:
colors_by_asset_id (dict): Key is asset id and value is list
of underline colors.
"""
self._model.set_underline_colors(colors_by_asset_id)
# Trigger repaint
self._view.updateGeometries()
| StarcoderdataPython |
21063 | import numpy as np
import pandas as pd
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.model_selection import train_test_split, cross_validate
from sklearn.preprocessing import OneHotEncoder, StandardScaler, OrdinalEncoder
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.svm import SVC
# Loading in the data
pk_df = pd.read_csv('data/pokemon.csv')
train_df, test_df = train_test_split(pk_df, test_size=0.2, random_state=1)
X_train = train_df.drop(columns=['legendary'])
y_train = train_df['legendary']
X_test = test_df.drop(columns=['legendary'])
y_test = test_df['legendary']
numeric_features = ["deck_no",
"attack",
"defense" ,
"sp_attack",
"sp_defense",
"speed",
"capture_rt",
"total_bs"]
categorical_features = ["type"]
numeric_transformer = make_pipeline(SimpleImputer(strategy="median"), StandardScaler())
categorical_transformer = make_pipeline(
SimpleImputer(strategy="most_frequent"),
OneHotEncoder(handle_unknown="ignore"))
preprocessor = make_column_transformer(
(numeric_transformer, numeric_features),
(categorical_transformer, categorical_features))
# Build a pipeline containing the column transformer and an SVC model
# Use the parameter class_weight="balanced"
# Name this pipeline main_pipe
main_pipe = make_pipeline(preprocessor, SVC(class_weight="balanced"))
# Perform cross validation on the training split using the scoring measures accuracy, precision and recall
# Save the results in a dataframe named multi_scores
multi_scores = pd.DataFrame(cross_validate(main_pipe,
X_train,
y_train,
return_train_score=True,
scoring = ['accuracy', 'precision', 'recall']))
multi_scores
| StarcoderdataPython |
1977653 | from scipy.io import loadmat
import numpy as np
from matplotlib import pyplot as plt
# This script prints selected frames of the stored escalator video sequence
data = loadmat('escalator_130p.mat')
X = data["X"]
dimensions = data["dimensions"][0]
framenumbers = [1806, 1813, 1820]
for framenumber in framenumbers:
imagename = 'frames/escalator_' + '{:03d}'.format(framenumber) + '.png'
frame = np.reshape(X[:,framenumber+1], dimensions, order='F')
plt.figure()
fig=plt.imshow(frame, cmap="gray", clim=(0, 255))
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.tight_layout()
plt.savefig(imagename, dpi=300, bbox_inches='tight')
plt.close() | StarcoderdataPython |
5048158 | <gh_stars>0
import numpy as np
class Optimizer_Adam:
def __init__(
self, learning_rate=0.001, decay=0.0, epsilon=1e-7, beta_1=0.9, beta_2=0.999
):
self.learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.decay = decay
self.iterations = 0
self.epsilon = epsilon
self.beta_1 = beta_1
self.beta_2 = beta_2
def pre_optimize(self):
if self.decay:
self.current_learning_rate = self.current_learning_rate * (
1.0 / (1.0 + self.decay * self.iterations)
)
def optimize(self, layer):
if not hasattr(layer, "weight_cache"):
layer.weight_momentums = np.zeros_like(layer.weights)
layer.weight_cache = np.zeros_like(layer.weights)
layer.bias_momentums = np.zeros_like(layer.biases)
layer.bias_cache = np.zeros_like(layer.biases)
layer.weight_momentums = (
self.beta_1 * layer.weight_momentums + (1 - self.beta_1) * layer.dweights
)
layer.bias_momentums = (
self.beta_1 * layer.bias_momentums + (1 - self.beta_1) * layer.dbiases
)
weight_momentums_corrected = layer.weight_momentums / (
1 - self.beta_1 ** (self.iterations + 1)
)
bias_momentums_corrected = layer.bias_momentums / (
1 - self.beta_1 ** (self.iterations + 1)
)
layer.weight_cache = (
self.beta_2 * layer.weight_cache + (1 - self.beta_2) * layer.dweights ** 2
)
layer.bias_cache = (
self.beta_2 * layer.bias_cache + (1 - self.beta_2) * layer.dbiases ** 2
)
weight_cache_corrected = layer.weight_cache / (
1 - self.beta_2 ** (self.iterations + 1)
)
bias_cache_corrected = layer.bias_cache / (
1 - self.beta_2 ** (self.iterations + 1)
)
layer.weights += (
-self.current_learning_rate
* weight_momentums_corrected
/ (np.sqrt(weight_cache_corrected) + self.epsilon)
)
layer.biases += (
-self.current_learning_rate
* bias_momentums_corrected
/ (np.sqrt(bias_cache_corrected) + self.epsilon)
)
def post_optimize(self):
self.iterations += 1
| StarcoderdataPython |
6414782 | <filename>eff_word_net/__init__.py<gh_stars>10-100
"""
.. include:: ../README.md
"""
import os
RATE=16000
samples_loc = os.path.join(os.path.dirname(os.path.realpath(__file__)),"sample_refs")
from eff_word_net.package_installation_scripts import check_install_tflite
check_install_tflite()
| StarcoderdataPython |
1746453 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : model.py
# @Author: zixiao
# @Date : 2019-04-07
# @Desc :
from torch import nn
import torch.nn.functional as F
import torch
class CNN(nn.Module):
def __init__(self, in_channels, num_action):
super(CNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=8, stride=4),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=1, stride=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.fc4 = nn.Linear(in_features=2 * 1 * 128, out_features=256)
self.fc5 = nn.Linear(in_features=256, out_features=num_action)
def forward(self, x): # 4 178 144
x = self.conv1(x) # 32 21,17
x = self.conv2(x) # 64 4 3
x = self.conv3(x) # 128 2 1
x = self.fc4(x.view(x.size(0), -1))
x = self.fc5(x)
return x
| StarcoderdataPython |
3436084 | import daskutils.math
import daskutils.base
import daskutils.io.msgpack
import dask.bag
import dask.distributed
import os.path
import uuid
import msgpack
import itertools
import contextlib
import socket
import subprocess
import base64
@contextlib.contextmanager
def worker_client(*arg, **kw):
started = False
try:
with dask.distributed.worker_client(*arg, **kw) as c:
started = True
yield c
except ValueError:
if started:
raise
else:
yield None
@contextlib.contextmanager
def debugopen(filename, *arg, **kw):
try:
with open(filename, *arg, **kw) as f:
yield f
except FileNotFoundError:
base = existing_base(filename)
if os.path.exists("/etc/host-hostname"):
with open("/etc/host-hostname") as f:
hostname = f.read().strip()
else:
hostname = socket.gethostname()
raise FileNotFoundError("%s:%s: only %s exists, containing %s" % (hostname, filename, base, os.listdir(base)))
def existing_base(path):
while path != "/":
if os.path.exists(path):
return path
path = os.path.split(path)[0]
assert False
class SortUnit(object):
def __init__(self, mergesort, minval=None, maxval=None, count=None, data=None, a=None, b=None):
assert count is not None
self.mergesort = mergesort
self.minval = minval
self.maxval = maxval
self.count = count
self.data = data
self.a = a
self.b = b
assert not isinstance(minval, float)
assert not isinstance(maxval, float)
def construct(self, *arg, **kwarg):
return SortUnit(self.mergesort, *arg, **kwarg)
@classmethod
@dask.delayed
def merge2(cls, a, b):
with worker_client() as client:
return a.merge(b).compute()
def append(self, other):
# Append and create a balanced tree
if other is None:
return self
if self.minval < other.minval:
a, b = self, other
else:
a, b = other, self
def build_tree(sort_units):
count = len(sort_units)
if count <= 1:
return sort_units[0]
else:
acount = count // 2
a = build_tree(sort_units[:acount])
b = build_tree(sort_units[acount:])
return self.construct(
minval=a.minval,
maxval=b.maxval,
count=a.count + b.count,
a=a,
b=b)
return build_tree(list(a.flatten()) + list(b.flatten()))
@dask.delayed
def merge(self, other):
# print("Merging %s[%s,%s] and %s[%s,%s]" % (self.count, self.minval, self.maxval, other.count, other.minval, other.maxval))
with worker_client() as client:
if other is None:
return self
elif self.maxval < other.minval:
return self.append(other)
elif other.maxval < self.minval:
return other.append(self)
elif self.data and other.data:
return self.merge_simple(other).compute()
elif self.data:
return other.merge(self).compute()
else:
return self.merge_splitted(other.split(self.b.minval)).compute()
@dask.delayed
def merge_splitted(self, items):
a, b = items
@dask.delayed
def construct(a, b):
return a.append(b)
with worker_client() as client:
return construct(self.a.merge(a), self.b.merge(b)).compute()
@dask.delayed
def split(self, value):
with worker_client() as client:
if self.maxval < value:
return self, None
elif self.minval > value:
return None, self
elif self.data:
return self.split_simple(value).compute()
else:
if value < self.b.minval:
a, b = self.a.split(value).compute()
return a, self.b.append(b)
else:
a, b = self.b.split(value).compute()
return self.a.append(a), b
@dask.delayed
def split_simple(self, value):
assert self.data
tempdir = self.mergesort.tempdir
splitline = self.mergesort.formatkey(value).decode("utf-8") + "||SPLIT"
splitname = os.path.join(tempdir, str(uuid.uuid4()))
subprocess.check_call(["bash", "-c", """
sort -k %(keydef)s -m "%(data)s" <(echo "%(split)s") |
sed '1,/%(split)s/w '>(head -n -1 > "%(splitname)sa")'\n/%(split)s/,$w '>(tail -n +2 > "%(splitname)sb")'' > /dev/null
""" % {
"keydef": self.mergesort.keydef,
"data": self.data,
"split": splitline,
"splitname": splitname
}])
amaxval = self.mergesort.itemkey(
self.mergesort.loads(
subprocess.check_output(["tail", "-n", "1", splitname + "a"]).split(b"||", 1)[1]))
alen = int(subprocess.check_output(["wc", "-l", splitname + "a"]).split(b" ")[0])
blen = int(subprocess.check_output(["wc", "-l", splitname + "b"]).split(b" ")[0])
aout = self.construct(minval=self.minval, maxval=amaxval, count=alen, data=splitname + "a")
bout = self.construct(minval=value, maxval=self.maxval, count=blen, data=splitname + "b")
return aout, bout
@dask.delayed
def merge_simple(self, other):
assert self.data
assert not self.a
assert not self.b
assert other.data
assert not other.a
assert not other.b
tempdir = self.mergesort.tempdir
key = self.mergesort.key
mergedname = os.path.join(tempdir, str(uuid.uuid4()))
subprocess.check_call(["sort", "-k", self.mergesort.keydef, "-o", mergedname, "-m", self.data, other.data])
subprocess.check_call(["split", "-n", "l/2", mergedname, mergedname])
aout = self.construct(count=0, data=mergedname + "aa")
bout = self.construct(count=0, data=mergedname + "ab")
for out in (aout, bout):
with debugopen(out.data, 'rb') as f:
for line in f:
if out.minval is None:
out.minval = self.mergesort.itemkey(self.mergesort.loads(line.split(b"||", 1)[1]))
out.count += 1
out.maxval = self.mergesort.itemkey(self.mergesort.loads(line.split(b"||", 1)[1]))
if bout.count == 0:
return aout
assert isinstance(aout.minval, tuple)
assert isinstance(bout.minval, tuple)
assert isinstance(aout.maxval, tuple)
assert isinstance(bout.maxval, tuple)
return self.construct(
minval=min(aout.minval, bout.minval),
maxval=max(aout.maxval, bout.maxval),
count=self.count + other.count,
a=aout,
b=bout)
def flatten(self):
if self.data:
yield self
else:
for child in (self.a, self.b):
for item in child.flatten():
yield item
def read(self):
assert self.data
with debugopen(self.data, 'rb') as f:
for line in f:
try:
yield self.mergesort.loads(line.split(b"||", 1)[1])
except:
print(self.data)
print(line)
raise
class MergeSort(object):
def __init__(self, tempdir, key=lambda a: a, partition_size=2000):
self.tempdir = tempdir
self._key = key
self.partition_size = partition_size
def key(self, item):
itemkey = self._key(item)
if not isinstance(itemkey, (tuple, list)):
itemkey = (itemkey,)
return itemkey
def tempfile(self, *parents, **kw):
fileid = str(uuid.uuid4())[:4]
if "op" in kw:
fileid = fileid + "-" + kw["op"]
if parents:
parents = [os.path.split(parent)[1][:-len(".msgpack")].split("-")[0] for parent in parents]
if len(parents) == 1:
parents = parents[0]
else:
parents = "{%s}" % (",".join(parents))
fileid = fileid + "-" + parents
return os.path.join(self.tempdir, "%s.msgpack" % (fileid,))
def sort(self, data):
keytypes = ["n" if type(keypart) in (int, float) else ""
for keypart in self.itemkey(data.take(1)[0])]
self.keydef = ",".join("%s%s" % (idx+1, keytype) for (idx, keytype) in enumerate(keytypes))
filenames = data.map_partitions(self.repartition_and_save)
sort_units = [dask.delayed(sort_unit) for sort_unit in filenames.map(self.sort_sort_unit).compute()]
sort_unit = self.merge_sort([dask.delayed(sort_unit) for sort_unit in sort_units])
sort_unit = sort_unit.compute()
data = dask.bag.from_sequence(sort_unit.flatten(), 1)
@data.map_partitions
def data(part):
return part[0].read()
return data
def dumps(self, item):
return base64.b64encode(msgpack.dumps(item))
def loads(self, line):
try:
return msgpack.loads(base64.b64decode(line), raw=False)
except Exception as e:
raise Exception("%s: %s" % (e, repr(line)))
def itemkey(self, item):
itemkey = self.key(item)
if not isinstance(itemkey, (tuple, list)):
itemkey = (itemkey,)
return itemkey
def formatkey(self, itemkey):
return b"|".join(str(keypart).encode("utf-8") for keypart in itemkey)
def repartition_and_save(self, data):
partitionid = self.tempfile()
sort_units = []
f = None
p = 0
s = None
for idx, item in enumerate(data):
part = idx // self.partition_size
if f is None or part != p:
if f: f.close()
p = part
filename = self.tempfile(partitionid)
f = open(filename, 'wb')
s = self.sort_unit(minval=None, maxval=None, count=0, data=filename)
sort_units.append(s)
itemkey = self.key(item)
if s.minval is None or s.minval > itemkey:
s.minval = itemkey
if s.maxval is None or s.maxval < itemkey:
s.maxval = itemkey
s.count += 1
f.write(self.formatkey(itemkey))
f.write(b"||")
f.write(self.dumps(item))
f.write(b"\n")
if f: f.close()
return sort_units
def sort_sort_unit(self, sort_unit):
outfilename = self.tempfile(sort_unit.data, op="sort")
subprocess.check_call(["sort", "-k", self.keydef, "-o", outfilename, sort_unit.data])
return self.sort_unit(minval=sort_unit.minval, maxval=sort_unit.maxval, count=sort_unit.count, data=outfilename)
def sort_unit(self, *arg, **kwarg):
return SortUnit(self, *arg, **kwarg)
def merge_sort(self, sort_units, indent='>'):
count = len(sort_units)
if count <= 1:
return sort_units[0]
else:
acount = count // 2
a = self.merge_sort(sort_units[:acount], indent+"a")
b = self.merge_sort(sort_units[acount:], indent+"b")
return SortUnit.merge2(a, b)
| StarcoderdataPython |
6405210 | #!/usr/bin/python
from flask import Flask, request, Response, jsonify, abort
from functools import wraps
import ssl
import logging, sys
import json
app = Flask(__name__)
app.config['DEBUG'] = True
import jwt
HMAC_SECRET='secret'
@app.route('/', methods = ['GET'])
def Default():
resp = Response()
resp.headers['Content-Type'] ='text/plain'
return 'ok'
@app.route('/authenticate', methods = ['POST'])
def Authenticate():
print('Requesting Authn' )
parsed = json.loads(request.data)
print(json.dumps(parsed, indent=4, sort_keys=True))
token = parsed['spec']['token']
resp = Response()
resp.headers['Content-Type'] ='application/json'
# determine the user givne bearer auth token and authenticate anyone
try:
decoded = jwt.decode(token, HMAC_SECRET, algorithms=['HS256'])
username = decoded['username']
r = {
"apiVersion": "authentication.k8s.io/v1beta1",
"kind": "TokenReview",
"status": {
"authenticated": True,
"user": {
"extra": {
"extrafield1": [
"extravalue1",
"extravalue2"
]
},
"groups": [
"developers",
"qa"
],
"uid": "42",
"username": username
}
}
}
except Exception as de:
r = {
"apiVersion": "authentication.k8s.io/v1beta1",
"kind": "TokenReview",
"status": {
"authenticated": False
}
}
return jsonify(r)
@app.route('/authorize', methods = ['POST'])
def Authorize():
print('Requesting Authz' )
parsed = json.loads(request.data)
print(json.dumps(parsed, indent=4, sort_keys=True))
# set some simple rules:
# allow everyone except <EMAIL> when listing pods
allowed = True
user = parsed['spec']['user']
try:
resource = parsed['spec']['resourceAttributes']['resource']
print(resource)
if (user == '<EMAIL>' and resource == 'pods'):
allowed = False
except KeyError:
pass
resp = Response()
resp.headers['Content-Type'] ='application/json'
r = {
"apiVersion": "authorization.k8s.io/v1beta1",
"kind": "SubjectAccessReview",
"status": {
"allowed": allowed
}
}
return jsonify(r)
if __name__ == '__main__':
# context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
# context.verify_mode = ssl.CERT_REQUIRED
# context.verify_flags
# context.load_verify_locations('tls-ca-chain.pem')
# context.load_cert_chain('server.crt', 'server.key')
# app.run(host='0.0.0.0', port=8081, debug=True, threaded=True, ssl_context=context)
app.run(host='0.0.0.0', port=8080, debug=True)
| StarcoderdataPython |
277951 | <reponame>anugrah86/datacatalog-connectors-bi
#!/usr/bin/python
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from google.cloud import datacatalog
from google.datacatalog_connectors.sisense.prepare import \
datacatalog_tag_template_factory
class DataCatalogTagTemplateFactoryTest(unittest.TestCase):
__PREPARE_PACKAGE = 'google.datacatalog_connectors.sisense.prepare'
__FACTORY_MODULE = f'{__PREPARE_PACKAGE}.datacatalog_tag_template_factory'
__BOOL_TYPE = datacatalog.FieldType.PrimitiveType.BOOL
__DOUBLE_TYPE = datacatalog.FieldType.PrimitiveType.DOUBLE
__STRING_TYPE = datacatalog.FieldType.PrimitiveType.STRING
__TIMESTAMP_TYPE = datacatalog.FieldType.PrimitiveType.TIMESTAMP
def setUp(self):
self.__factory = datacatalog_tag_template_factory.\
DataCatalogTagTemplateFactory('test-project', 'test-location')
def test_constructor_should_set_instance_attributes(self):
attrs = self.__factory.__dict__
self.assertEqual('test-project',
attrs['_DataCatalogTagTemplateFactory__project_id'])
self.assertEqual('test-location',
attrs['_DataCatalogTagTemplateFactory__location_id'])
def test_make_tag_template_for_dashboard(self):
tag_template = self.__factory.make_tag_template_for_dashboard()
self.assertEqual(
'projects/test-project/locations/test-location/'
'tagTemplates/sisense_dashboard_metadata', tag_template.name)
self.assertEqual('Sisense Dashboard Metadata',
tag_template.display_name)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['id'].type.primitive_type)
self.assertEqual('Id', tag_template.fields['id'].display_name)
self.assertTrue(tag_template.fields['id'].is_required)
self.assertEqual(
self.__STRING_TYPE,
tag_template.fields['owner_username'].type.primitive_type)
self.assertEqual('Owner username',
tag_template.fields['owner_username'].display_name)
self.assertFalse(tag_template.fields['owner_username'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['owner_name'].type.primitive_type)
self.assertEqual('Owner name',
tag_template.fields['owner_name'].display_name)
self.assertFalse(tag_template.fields['owner_name'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['folder_id'].type.primitive_type)
self.assertEqual('Folder Id',
tag_template.fields['folder_id'].display_name)
self.assertFalse(tag_template.fields['folder_id'].is_required)
self.assertEqual(
self.__STRING_TYPE,
tag_template.fields['folder_name'].type.primitive_type)
self.assertEqual('Folder Name',
tag_template.fields['folder_name'].display_name)
self.assertFalse(tag_template.fields['folder_name'].is_required)
self.assertEqual(
self.__STRING_TYPE,
tag_template.fields['folder_entry'].type.primitive_type)
self.assertEqual('Data Catalog Entry for the Folder',
tag_template.fields['folder_entry'].display_name)
self.assertFalse(tag_template.fields['folder_entry'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['datasource'].type.primitive_type)
self.assertEqual('Data Source',
tag_template.fields['datasource'].display_name)
self.assertFalse(tag_template.fields['datasource'].is_required)
self.assertEqual(
self.__TIMESTAMP_TYPE,
tag_template.fields['last_publish'].type.primitive_type)
self.assertEqual('Time it was last published',
tag_template.fields['last_publish'].display_name)
self.assertFalse(tag_template.fields['last_publish'].is_required)
self.assertEqual(
self.__TIMESTAMP_TYPE,
tag_template.fields['last_opened'].type.primitive_type)
self.assertEqual('Time it was last opened',
tag_template.fields['last_opened'].display_name)
self.assertFalse(tag_template.fields['last_opened'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['server_url'].type.primitive_type)
self.assertEqual('Sisense Server Url',
tag_template.fields['server_url'].display_name)
self.assertTrue(tag_template.fields['server_url'].is_required)
def test_make_tag_template_for_folder(self):
tag_template = self.__factory.make_tag_template_for_folder()
self.assertEqual(
'projects/test-project/locations/test-location/'
'tagTemplates/sisense_folder_metadata', tag_template.name)
self.assertEqual('Sisense Folder Metadata', tag_template.display_name)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['id'].type.primitive_type)
self.assertEqual('Id', tag_template.fields['id'].display_name)
self.assertTrue(tag_template.fields['id'].is_required)
self.assertEqual(
self.__STRING_TYPE,
tag_template.fields['owner_username'].type.primitive_type)
self.assertEqual('Owner username',
tag_template.fields['owner_username'].display_name)
self.assertFalse(tag_template.fields['owner_username'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['owner_name'].type.primitive_type)
self.assertEqual('Owner name',
tag_template.fields['owner_name'].display_name)
self.assertFalse(tag_template.fields['owner_name'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['parent_id'].type.primitive_type)
self.assertEqual('Id of Parent',
tag_template.fields['parent_id'].display_name)
self.assertFalse(tag_template.fields['parent_id'].is_required)
self.assertEqual(
self.__STRING_TYPE,
tag_template.fields['parent_name'].type.primitive_type)
self.assertEqual('Parent Folder',
tag_template.fields['parent_name'].display_name)
self.assertFalse(tag_template.fields['parent_name'].is_required)
self.assertEqual(
self.__STRING_TYPE,
tag_template.fields['parent_folder_entry'].type.primitive_type)
self.assertEqual(
'Data Catalog Entry for the parent Folder',
tag_template.fields['parent_folder_entry'].display_name)
self.assertFalse(
tag_template.fields['parent_folder_entry'].is_required)
self.assertEqual(
self.__BOOL_TYPE,
tag_template.fields['has_children'].type.primitive_type)
self.assertEqual('Has children',
tag_template.fields['has_children'].display_name)
self.assertTrue(tag_template.fields['has_children'].is_required)
self.assertEqual(
self.__DOUBLE_TYPE,
tag_template.fields['child_count'].type.primitive_type)
self.assertEqual('Child count',
tag_template.fields['child_count'].display_name)
self.assertFalse(tag_template.fields['child_count'].is_required)
self.assertEqual(
self.__BOOL_TYPE,
tag_template.fields['has_dashboards'].type.primitive_type)
self.assertEqual('Has dashboards',
tag_template.fields['has_dashboards'].display_name)
self.assertTrue(tag_template.fields['has_dashboards'].is_required)
self.assertEqual(
self.__DOUBLE_TYPE,
tag_template.fields['dashboard_count'].type.primitive_type)
self.assertEqual('Dashboard count',
tag_template.fields['dashboard_count'].display_name)
self.assertFalse(tag_template.fields['dashboard_count'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['server_url'].type.primitive_type)
self.assertEqual('Sisense Server Url',
tag_template.fields['server_url'].display_name)
self.assertTrue(tag_template.fields['server_url'].is_required)
def test_make_tag_template_for_jaql(self):
tag_template = self.__factory.make_tag_template_for_jaql()
self.assertEqual(
'projects/test-project/locations/test-location/'
'tagTemplates/sisense_jaql_metadata', tag_template.name)
self.assertEqual('Sisense JAQL Metadata', tag_template.display_name)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['table'].type.primitive_type)
self.assertEqual('Table', tag_template.fields['table'].display_name)
self.assertFalse(tag_template.fields['table'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['column'].type.primitive_type)
self.assertEqual('Column', tag_template.fields['column'].display_name)
self.assertFalse(tag_template.fields['column'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['dimension'].type.primitive_type)
self.assertEqual('Dimension',
tag_template.fields['dimension'].display_name)
self.assertFalse(tag_template.fields['dimension'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['formula'].type.primitive_type)
self.assertEqual('Formula',
tag_template.fields['formula'].display_name)
self.assertFalse(tag_template.fields['formula'].is_required)
self.assertEqual(
self.__STRING_TYPE,
tag_template.fields['aggregation'].type.primitive_type)
self.assertEqual('Aggregation',
tag_template.fields['aggregation'].display_name)
self.assertFalse(tag_template.fields['aggregation'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['server_url'].type.primitive_type)
self.assertEqual('Sisense Server Url',
tag_template.fields['server_url'].display_name)
self.assertTrue(tag_template.fields['server_url'].is_required)
def test_make_tag_template_for_widget(self):
tag_template = self.__factory.make_tag_template_for_widget()
self.assertEqual(
'projects/test-project/locations/test-location/'
'tagTemplates/sisense_widget_metadata', tag_template.name)
self.assertEqual('Sisense Widget Metadata', tag_template.display_name)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['id'].type.primitive_type)
self.assertEqual('Id', tag_template.fields['id'].display_name)
self.assertTrue(tag_template.fields['id'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['type'].type.primitive_type)
self.assertEqual('Type', tag_template.fields['type'].display_name)
self.assertTrue(tag_template.fields['type'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['subtype'].type.primitive_type)
self.assertEqual('Subtype',
tag_template.fields['subtype'].display_name)
self.assertFalse(tag_template.fields['subtype'].is_required)
self.assertEqual(
self.__STRING_TYPE,
tag_template.fields['owner_username'].type.primitive_type)
self.assertEqual('Owner username',
tag_template.fields['owner_username'].display_name)
self.assertFalse(tag_template.fields['owner_username'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['owner_name'].type.primitive_type)
self.assertEqual('Owner name',
tag_template.fields['owner_name'].display_name)
self.assertFalse(tag_template.fields['owner_name'].is_required)
self.assertEqual(
self.__STRING_TYPE,
tag_template.fields['dashboard_id'].type.primitive_type)
self.assertEqual('Dashboard Id',
tag_template.fields['dashboard_id'].display_name)
self.assertTrue(tag_template.fields['dashboard_id'].is_required)
self.assertEqual(
self.__STRING_TYPE,
tag_template.fields['dashboard_title'].type.primitive_type)
self.assertEqual('Dashboard Title',
tag_template.fields['dashboard_title'].display_name)
self.assertTrue(tag_template.fields['dashboard_title'].is_required)
self.assertEqual(
self.__STRING_TYPE,
tag_template.fields['dashboard_entry'].type.primitive_type)
self.assertEqual('Data Catalog Entry for the Dashboard',
tag_template.fields['dashboard_entry'].display_name)
self.assertTrue(tag_template.fields['dashboard_entry'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['datasource'].type.primitive_type)
self.assertEqual('Data Source',
tag_template.fields['datasource'].display_name)
self.assertFalse(tag_template.fields['datasource'].is_required)
self.assertEqual(self.__STRING_TYPE,
tag_template.fields['server_url'].type.primitive_type)
self.assertEqual('Sisense Server Url',
tag_template.fields['server_url'].display_name)
self.assertTrue(tag_template.fields['server_url'].is_required)
| StarcoderdataPython |
1865732 | <gh_stars>0
# This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2020 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
import pytest
import moto
from pathlib import Path
import dask
import dask.delayed
from datacube.utils.io import slurp
from datacube.utils.dask import (
start_local_dask,
get_total_available_memory,
compute_memory_per_worker,
compute_tasks,
pmap,
partition_map,
save_blob_to_file,
save_blob_to_s3,
_save_blob_to_file,
_save_blob_to_s3,
)
from datacube.utils.aws import (
s3_url_parse,
s3_fetch,
s3_client,
)
def test_compute_tasks():
client = start_local_dask(threads_per_worker=1,
dashboard_address=None)
tasks = (dask.delayed(x) for x in range(100))
xx = [x for x in compute_tasks(tasks, client)]
assert xx == [x for x in range(100)]
client.close()
del client
def test_start_local_dask_dashboard_link(monkeypatch):
monkeypatch.setenv('JUPYTERHUB_SERVICE_PREFIX', 'user/test/')
client = start_local_dask()
assert client.dashboard_link.startswith('user/test/proxy/')
def test_partition_map():
tasks = partition_map(10, str, range(101))
tt = [t for t in tasks]
assert len(tt) == 11
lump = tt[0].compute()
assert len(lump) == 10
assert lump == [str(x) for x in range(10)]
lump = tt[-1].compute()
assert len(lump) == 1
def test_pmap():
client = start_local_dask(threads_per_worker=1,
dashboard_address=None)
xx_it = pmap(str, range(101), client=client)
xx = [x for x in xx_it]
assert xx == [str(x) for x in range(101)]
client.close()
del client
@pytest.mark.parametrize("blob", [
"some utf8 string",
b"raw bytes",
])
def test_save_blob_file_direct(tmpdir, blob):
tmpdir = Path(str(tmpdir))
fname = str(tmpdir/"file.txt")
mode = "rt" if isinstance(blob, str) else "rb"
assert _save_blob_to_file(blob, fname) == (fname, True)
assert slurp(fname, mode=mode) == blob
fname = str(tmpdir/"missing"/"file.txt")
assert _save_blob_to_file(blob, fname) == (fname, False)
@pytest.mark.parametrize("blob", [
"some utf8 string",
b"raw bytes",
])
def test_save_blob_file(tmpdir, blob, dask_client):
tmpdir = Path(str(tmpdir))
fname = str(tmpdir/"file.txt")
dask_blob = dask.delayed(blob)
mode = "rt" if isinstance(blob, str) else "rb"
rr = save_blob_to_file(dask_blob, fname)
assert dask_client.compute(rr).result() == (fname, True)
assert slurp(fname, mode=mode) == blob
fname = str(tmpdir/"missing"/"file.txt")
rr = save_blob_to_file(dask_blob, fname)
assert dask_client.compute(rr).result() == (fname, False)
@pytest.mark.parametrize("blob", [
"some utf8 string",
b"raw bytes",
])
def test_save_blob_s3_direct(blob, monkeypatch):
region_name = "us-west-2"
blob2 = blob + blob
url = "s3://bucket/file.txt"
url2 = "s3://bucket/file-2.txt"
bucket, _ = s3_url_parse(url)
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "fake-key-id")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "fake-secret")
with moto.mock_s3():
s3 = s3_client(region_name=region_name)
s3.create_bucket(Bucket=bucket)
assert _save_blob_to_s3(blob, url, region_name=region_name) == (url, True)
assert _save_blob_to_s3(blob2, url2, region_name=region_name) == (url2, True)
bb1 = s3_fetch(url, s3=s3)
bb2 = s3_fetch(url2, s3=s3)
if isinstance(blob, str):
bb1 = bb1.decode("utf8")
bb2 = bb2.decode("utf8")
assert bb1 == blob
assert bb2 == blob2
assert _save_blob_to_s3("", "s3://not-a-bucket/f.txt") == ("s3://not-a-bucket/f.txt", False)
@pytest.mark.parametrize("blob", [
"some utf8 string",
b"raw bytes",
])
def test_save_blob_s3(blob, monkeypatch, dask_client):
region_name = "us-west-2"
blob2 = blob + blob
dask_blob = dask.delayed(blob)
dask_blob2 = dask.delayed(blob2)
url = "s3://bucket/file.txt"
url2 = "s3://bucket/file-2.txt"
bucket, _ = s3_url_parse(url)
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "fake-key-id")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "fake-secret")
with moto.mock_s3():
s3 = s3_client(region_name=region_name)
s3.create_bucket(Bucket=bucket)
rr = save_blob_to_s3(dask_blob, url, region_name=region_name)
assert rr.compute() == (url, True)
rr = save_blob_to_s3(dask_blob2, url2, region_name=region_name)
assert dask_client.compute(rr).result() == (url2, True)
bb1 = s3_fetch(url, s3=s3)
bb2 = s3_fetch(url2, s3=s3)
if isinstance(blob, str):
bb1 = bb1.decode("utf8")
bb2 = bb2.decode("utf8")
assert bb1 == blob
assert bb2 == blob2
def test_memory_functions(monkeypatch):
gig = 10**9
total_mem = get_total_available_memory()
default_safety = min(500*(1 << 20), total_mem//2)
assert total_mem - compute_memory_per_worker() == default_safety
assert total_mem - compute_memory_per_worker(2)*2 == default_safety
assert compute_memory_per_worker(mem_safety_margin=1) == total_mem - 1
assert compute_memory_per_worker(memory_limit='4G') == 4*gig
assert compute_memory_per_worker(2, memory_limit='4G') == 2*gig
assert compute_memory_per_worker(memory_limit='4G',
mem_safety_margin='1G') == 3*gig
total_mem = 1*gig
monkeypatch.setenv('MEM_LIMIT', str(total_mem))
assert get_total_available_memory() == 1*gig
assert compute_memory_per_worker(mem_safety_margin=1) == total_mem - 1
| StarcoderdataPython |
5154566 | <reponame>aarongreig/sycl-blas
#/***************************************************************************
# *
# * @license
# * Copyright (C) Codeplay Software Limited
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * For your convenience, a copy of the License has been included in this
# * repository.
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *
# * SYCL-BLAS: BLAS implementation using SYCL
# *
# * @filename py_gen_blas_reduction.py
# *
# **************************************************************************/
# py_gen import
import errno
import os
import sys
if __name__ == '__main__':
generator_path = sys.argv[1]
sys.path.insert(0, generator_path)
from py_gen import generate_file
from py_gen import *
from string import Template
executor_list = ['PolicyHandler<codeplay_policy>']
index_list = ['int', 'long', 'long long']
input_template = sys.argv[2]
blas_level_name = sys.argv[3]
blas_function_name = sys.argv[4]
blas_template_impl = sys.argv[5]
executor = sys.argv[6]
data = sys.argv[7]
index = sys.argv[8]
increment = sys.argv[9]
container0 = sys.argv[10]
container1 = sys.argv[11]
operator = sys.argv[12]
file_name = sys.argv[13]
source = 'generated_src/' + blas_level_name + '/' + blas_function_name + '/'
try:
os.makedirs(source)
except OSError as e:
if e.errno != errno.EEXIST:
raise
f = open(blas_template_impl, "r")
template = Template(f.read())
f.close()
iterables = [
Iterable(
key='EXECUTOR',
vals=[executor],
itermode=Itermode.combinations,
iter_modifier=1),
Iterable(
key='DATA_TYPE',
vals=[data],
itermode=Itermode.combinations,
iter_modifier=1),
Iterable(
key='INDEX_TYPE',
vals=[index],
itermode=Itermode.combinations,
iter_modifier=1),
Iterable(
key='INCREMENT_TYPE',
vals=[increment],
itermode=Itermode.combinations,
iter_modifier=1),
Iterable(
key='container_t0',
vals=[container0],
itermode=Itermode.combinations,
iter_modifier=1),
Iterable(
key='container_t1',
vals=[container1],
itermode=Itermode.combinations,
iter_modifier=1),
Iterable(
key='OPERATOR',
vals=[operator],
itermode=Itermode.combinations,
iter_modifier=1)
]
iter_groups = [IterGroup('@ip1@', template, iterables, combine_iters=True)]
generate_file(
input_template,
source + file_name,
iter_groups,
format_generated=False,
format_script="")
| StarcoderdataPython |
6493277 | <reponame>PdxCodeGuild/FlaskApp-Tutorial<gh_stars>10-100
import re
from flask import current_app
from flask_wtf import FlaskForm
from wtforms import BooleanField, DecimalField, FloatField, IntegerField, \
DateTimeField, DateField, \
FileField, PasswordField, StringField, TextAreaField, \
RadioField, SelectField, SelectMultipleField, \
HiddenField, SubmitField
from wtforms.validators import Email, EqualTo, InputRequired, Length
from wtforms import ValidationError
from .models import UserModel
def filter_username(data):
return re.sub('[^a-z0-9_-]', '', str(data).lower())
def filter_useremail(data):
return str(data).lower()
def validate_username(self, field):
if field.data != self.user.keyname and \
UserModel.query.filter_by(keyname=field.data).first():
raise ValidationError('Username already in use.')
def validate_usermail(self, field):
if field.data != self.user.user_email and \
UserModel.query.filter_by(user_email=field.data).first():
raise ValidationError('Email address already in use.')
class LoginForm(FlaskForm):
next = HiddenField('next')
user_email = StringField('Email', validators=[InputRequired(),Length(1,255),Email()])
password = PasswordField('Password', validators=[InputRequired()])
remember = BooleanField('Keep me logged in')
submit = SubmitField('Login')
def __init__(self, user, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.user = user
class CreatUserForm(FlaskForm):
keyname = StringField('Username', validators=[InputRequired(),Length(2,63),validate_username], filters=[filter_username])
user_email = StringField('Email', validators=[InputRequired(),Length(1,63),Email(),validate_usermail], filters=[filter_useremail])
password = PasswordField('Password', validators=[InputRequired(),Length(1,31),EqualTo('password2',message="Passwords must match.")])
password2 = PasswordField('<PASSWORD>')
submit = SubmitField('Create User')
def __init__(self, user, *args, **kwargs):
super(CreatUserForm, self).__init__(*args, **kwargs)
self.user = user
class EditUserForm(FlaskForm):
id = HiddenField('id')
keyname = StringField('Username', validators=[InputRequired(),Length(2,63),validate_username], filters=[filter_username])
user_role = SelectField('User Role', choices=[], coerce=int)
user_email = StringField('Email', validators=[InputRequired(),Length(1,63),Email(),validate_usermail], filters=[filter_useremail])
password = PasswordField('Password', validators=[EqualTo('<PASSWORD>',message="Passwords must match.")])
password2 = PasswordField('Confirm Password')
cnt_login = IntegerField('Login Count')
mod_login = DateTimeField('Last Login')
mod_create = DateTimeField('User Created')
mod_update = DateTimeField('User Updated')
submit = SubmitField('Update User')
def __init__(self, user, *args, **kwargs):
super(EditUserForm, self).__init__(*args, **kwargs)
self.user_role.choices = [
(current_app.config['USER_ROLE_ADMIN'],current_app.config['USER_ROLE'][current_app.config['USER_ROLE_ADMIN']]),
(current_app.config['USER_ROLE_EDIT'], current_app.config['USER_ROLE'][current_app.config['USER_ROLE_EDIT']]),
(current_app.config['USER_ROLE_VIEW'], current_app.config['USER_ROLE'][current_app.config['USER_ROLE_VIEW']]),
(current_app.config['USER_ROLE_NONE'], current_app.config['USER_ROLE'][current_app.config['USER_ROLE_NONE']]),
]
self.user = user
| StarcoderdataPython |
5067730 | from collections import namedtuple
Token = namedtuple('Token', ['type', 'value'])
Elements = 'Number Variable Parenthesis Operator EOF'.split()
transition_mat = [
# Space Digit Letter Hashtag Parens Operator EOF
[0, 1, None, 3, 102, 103, 104], # State 0
[-1, 1, None, -1, -1, -1, -1], # State 1: Number
[-1, 2, 2, -1, -1, -1, -1], # State 3: Variable
[None, None, 2, None, None, None, None] # State 2: Hashtag
]
def read(char):
if char == ' ':
return 0
elif char.isdigit():
return 1
elif char.isalpha():
return 2
elif char == '#':
return 3
elif char in '()':
return 4
elif char in '+-/*':
return 5
elif char == '.':
return 6
else:
raise ValueError('Char could not be read {!r}'.format(char))
def scan(input_str):
tokens = []
state = 0
token_value = []
def add_token(element_idx, value):
new_token = Token(Elements[element_idx],
''.join(value))
tokens.append(new_token)
for idx, char in enumerate(input_str):
dbg = '({}, {}) -> '.format(state, char)
if state < 100:
prev_state = state
state = transition_mat[prev_state][read(char)]
if state is None:
raise ValueError('Invalid at position {}'.format(idx))
if state == -1:
# Save and continue
element_idx = prev_state - 1
add_token(element_idx, token_value)
token_value = [char]
state = transition_mat[0][read(char)]
elif state != 0:
token_value.append(char)
else: # String accepted
element_idx = state - 100
add_token(element_idx, token_value)
token_value = []
state = 0
dbg += '{}'.format(state)
print(dbg)
return tokens
| StarcoderdataPython |
6590252 | import sys,os,lldb
def check_has_dir_in_path(dirname):
return sys.path.__contains__(dirname);
def ensure_has_dir_in_path(dirname):
dirname = os.path.abspath(dirname)
if not (check_has_dir_in_path(dirname)):
sys.path.append(dirname);
def do_import(debugger,modname):
if (len(modname) > 4 and modname[-4:] == '.pyc'):
modname = modname[:-4]
if (len(modname) > 3 and modname[-3:] == '.py'):
modname = modname[:-3]
debugger.HandleCommand("script import " + modname)
def pyimport_cmd(debugger, args, result, dict):
"""Import a Python module given its full path"""
print 'WARNING: obsolete feature - use native command "command script import"'
if args == "":
return "no module path given";
if not (os.sep in args):
modname = args
ensure_has_dir_in_path('.')
else:
endofdir = args.rfind(os.sep)
modname = args[endofdir+1:]
args = args[0:endofdir]
ensure_has_dir_in_path(args)
do_import(debugger,modname)
return None
| StarcoderdataPython |
3415947 | <filename>pms7003.py
"""
PMS7003 datasheet
http://eleparts.co.kr/data/_gextends/good-pdf/201803/good-pdf-4208690-1.pdf
"""
from dataclasses import dataclass
import glob
import logging
import os
import serial
from serial.tools.list_ports import comports
import struct
import time
from typing import Any, Dict, NamedTuple, List, Optional
class PMSData(NamedTuple):
header_high: int # 0x42
header_low: int # 0x4d
frame_length: int # 2x1(data+check bytes)
pm1_0_cf1: int # PM1.0 concentration unit μ g/m3(CF=1,standard particle)
pm2_5_cf1: int # PM2.5 concentration unit μ g/m3(CF=1,standard particle)
pm10_0_cf1: int # PM10 concentration unit μ g/m3(CF=1,standard particle)
pm1_0_atm: int # PM1.0 concentration unit μ g/m3(under atmospheric environment)
pm2_5_atm: int # PM2.5 concentration unit μ g/m3(under atmospheric environment)
pm10_0_atm: int # PM10 concentration unit μ g/m3 (under atmospheric environment)
count_0_3: int # number of particles with diameter beyond 0.3 um in 0.1 L of air.
count_0_5: int # number of particles with diameter beyond 0.5 um in 0.1 L of air.
count_1_0: int # number of particles with diameter beyond 1.0 um in 0.1 L of air.
count_2_5: int # number of particles with diameter beyond 2.5 um in 0.1 L of air.
count_5_0: int # number of particles with diameter beyond 5.0 um in 0.1 L of air.
count_10_0: int # indicates the number of particles with diameter beyond 10 um in 0.1 L of air.
reserved: int # reserved
checksum: int # checksum
@dataclass
class SearchResult:
port: str
desc: str
hwid: str
dev: Optional["PMS7003"] = None
error: Optional[str] = None
PMSStruct = struct.Struct("!2B15H")
# all the data as unsigned ints for checksum calculation
ChecksumStruct = struct.Struct("!30BH")
class PMS7003(object):
# PMS7003 protocol data (HEADER 2byte + 30byte)
PMS_7003_PROTOCOL_SIZE = 32
HEADER_HIGH = int("0x42", 16)
HEADER_LOW = int("0x4d", 16)
# Baud Rate
SERIAL_SPEED = 9600
# give up after trying to read for this long
READ_TIMEOUT_SEC = 2
@classmethod
def get_logger(cls, dev: str = None) -> logging.Logger:
path = ".".join(filter(None, ('mini-aqm', cls.__name__, dev)))
return logging.getLogger(path)
@classmethod
def find_devices(cls, only: Optional[str] = None) -> List[SearchResult]:
"""checks several possible locations for PMS7003 devices
returns all valid locations
"""
log = cls.get_logger()
# figure out possible device paths to check
if only:
log.debug(f"listing results from specified port {only}")
possible = [SearchResult(port=only, desc="user-specified", hwid="")]
else:
log.debug("listing results from all com ports...")
possible = [
SearchResult(port=p, desc=d, hwid=h) for (p, d, h) in comports()
]
log.debug(f"checking {len(possible)} ports for PMS7003 device...")
for p in possible:
log.debug(f"\tchecking port {p.port}...")
if not os.path.exists(p.port):
p.error = "no such port"
elif not os.access(p.port, mode=os.R_OK, follow_symlinks=True):
p.error = "access denied"
else:
try:
dev = PMS7003(p.port)
if dev.read():
p.dev = dev
else:
p.error = "no data"
except Exception as e:
p.error = str(e)
return possible
def __init__(self, port: str):
self.port = port
self.buffer: bytes = b""
self.log = self.get_logger(str(self))
self.checksum_errors = 0
def __str__(self) -> str:
return f"<PMS7003 on {self.id}>"
@property
def id(self) -> str:
return self.port
@property
def serial(self) -> serial.Serial:
"""Serial port interface"""
if not hasattr(self, "_serial"):
self._serial = serial.Serial(
self.port, self.SERIAL_SPEED, timeout=self.READ_TIMEOUT_SEC
)
return self._serial
def read(self) -> PMSData:
"""Returns a PMS reading"""
self.serial.flushInput()
# try to read a datagram
began = time.time()
data = None
while data is None:
# have we been trying for too long?
if time.time() - began > self.READ_TIMEOUT_SEC:
self.log.warning("read timeout exceeded")
break
# read until we have at least the right number of bytes
while len(self.buffer) < self.PMS_7003_PROTOCOL_SIZE:
self.buffer += self.serial.read(1024)
# break if we're not getting any data
if len(self.buffer) == 0:
break
# consume until buffer is nearly-empty
while len(self.buffer) >= self.PMS_7003_PROTOCOL_SIZE:
buffer = self.buffer[: self.PMS_7003_PROTOCOL_SIZE]
maybe_data = PMSData._make(PMSStruct.unpack(buffer))
# looks like the start of a packet, lets advance the buffer
if self.header_valid(maybe_data):
self.log.debug("found valid header")
self.buffer = self.buffer[self.PMS_7003_PROTOCOL_SIZE :]
if self.checksum_valid(buffer):
data = maybe_data
else:
self.log.warning("checksum does not match")
self.checksum_errors += 1
data = None
# invalid header, we might be mid-packet, advance by 1
else:
self.buffer = self.buffer[1:]
data = None
return data
@classmethod
def header_valid(cls, data: PMSData) -> bool:
"""make sure the header is valid"""
return data.header_high == cls.HEADER_HIGH and data.header_low == cls.HEADER_LOW
@classmethod
def checksum_valid(self, buffer: bytes) -> bool:
"""make sure the checksum of the buffer is valid"""
chksum_data = ChecksumStruct.unpack(buffer)
# sum every unsigned int (omit the final short)
calculated = sum(chksum_data[:-1])
# grab the send value
sent = chksum_data[-1]
return calculated == sent
| StarcoderdataPython |
3489078 | <reponame>vanitas-vanitatum/swarm-intelligence
from abc import ABC, abstractmethod
class Drawable(ABC):
@abstractmethod
def get_patch(self, **kwargs):
pass
def draw(self, ax, **kwargs):
patch = self.get_patch(**kwargs)
if isinstance(patch, list) or isinstance(patch, tuple):
for p in patch:
ax.add_patch(p)
else:
ax.add_patch(patch)
| StarcoderdataPython |
9657045 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
.. _sdc_pepolar :
Phase Encoding POLARity (*PEPOLAR*) techniques
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import pkg_resources as pkgr
from nipype.pipeline import engine as pe
from nipype.interfaces import afni, ants, fsl, utility as niu
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces import CopyHeader
from niworkflows.interfaces.freesurfer import StructuralReference
from niworkflows.interfaces.registration import ANTSApplyTransformsRPT
from niworkflows.func.util import init_enhance_and_skullstrip_bold_wf
def init_pepolar_unwarp_wf(bold_meta, epi_fmaps, omp_nthreads=1,
name="pepolar_unwarp_wf"):
"""
This workflow takes in a set of EPI files with opposite phase encoding
direction than the target file and calculates a displacements field
(in other words, an ANTs-compatible warp file).
This procedure works if there is only one '_epi' file is present
(as long as it has the opposite phase encoding direction to the target
file). The target file will be used to estimate the field distortion.
However, if there is another '_epi' file present with a matching
phase encoding direction to the target it will be used instead.
Currently, different phase encoding dimension in the target file and the
'_epi' file(s) (for example 'i' and 'j') is not supported.
The warp field correcting for the distortions is estimated using AFNI's
3dQwarp, with displacement estimation limited to the target file phase
encoding direction.
It also calculates a new mask for the input dataset that takes into
account the distortions.
.. workflow ::
:graph2use: orig
:simple_form: yes
from sdcflows.workflows.pepolar import init_pepolar_unwarp_wf
wf = init_pepolar_unwarp_wf(
bold_meta={'PhaseEncodingDirection': 'j'},
epi_fmaps=[('/dataset/sub-01/fmap/sub-01_epi.nii.gz', 'j-')],
omp_nthreads=8)
Inputs
in_reference
the reference image
in_reference_brain
the reference image skullstripped
in_mask
a brain mask corresponding to ``in_reference``
Outputs
out_reference
the ``in_reference`` after unwarping
out_reference_brain
the ``in_reference`` after unwarping and skullstripping
out_warp
the corresponding :abbr:`DFM (displacements field map)` compatible with
ANTs
out_mask
mask of the unwarped input file
"""
bold_file_pe = bold_meta["PhaseEncodingDirection"]
args = '-noXdis -noYdis -noZdis'
rm_arg = {'i': '-noXdis',
'j': '-noYdis',
'k': '-noZdis'}[bold_file_pe[0]]
args = args.replace(rm_arg, '')
usable_fieldmaps_matching_pe = []
usable_fieldmaps_opposite_pe = []
for fmap, fmap_pe in epi_fmaps:
if fmap_pe == bold_file_pe:
usable_fieldmaps_matching_pe.append(fmap)
elif fmap_pe[0] == bold_file_pe[0]:
usable_fieldmaps_opposite_pe.append(fmap)
if not usable_fieldmaps_opposite_pe:
raise Exception("None of the discovered fieldmaps has the right "
"phase encoding direction. Possibly a problem with "
"metadata. If not, rerun with '--ignore fieldmaps' to "
"skip distortion correction step.")
workflow = Workflow(name=name)
workflow.__desc__ = """\
A deformation field to correct for susceptibility distortions was estimated
based on two echo-planar imaging (EPI) references with opposing phase-encoding
directions, using `3dQwarp` @afni (AFNI {afni_ver}).
""".format(afni_ver=''.join(['%02d' % v for v in afni.Info().version() or []]))
inputnode = pe.Node(niu.IdentityInterface(
fields=['in_reference', 'in_reference_brain', 'in_mask']), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['out_reference', 'out_reference_brain', 'out_warp', 'out_mask']),
name='outputnode')
prepare_epi_opposite_wf = init_prepare_epi_wf(omp_nthreads=omp_nthreads,
name="prepare_epi_opposite_wf")
prepare_epi_opposite_wf.inputs.inputnode.fmaps = usable_fieldmaps_opposite_pe
qwarp = pe.Node(afni.QwarpPlusMinus(pblur=[0.05, 0.05],
blur=[-1, -1],
noweight=True,
minpatch=9,
nopadWARP=True,
environ={'OMP_NUM_THREADS': '%d' % omp_nthreads},
args=args),
name='qwarp', n_procs=omp_nthreads)
workflow.connect([
(inputnode, prepare_epi_opposite_wf, [('in_reference_brain', 'inputnode.ref_brain')]),
(prepare_epi_opposite_wf, qwarp, [('outputnode.out_file', 'base_file')]),
])
if usable_fieldmaps_matching_pe:
prepare_epi_matching_wf = init_prepare_epi_wf(omp_nthreads=omp_nthreads,
name="prepare_epi_matching_wf")
prepare_epi_matching_wf.inputs.inputnode.fmaps = usable_fieldmaps_matching_pe
workflow.connect([
(inputnode, prepare_epi_matching_wf, [('in_reference_brain', 'inputnode.ref_brain')]),
(prepare_epi_matching_wf, qwarp, [('outputnode.out_file', 'in_file')]),
])
else:
workflow.connect([(inputnode, qwarp, [('in_reference_brain', 'in_file')])])
to_ants = pe.Node(niu.Function(function=_fix_hdr), name='to_ants',
mem_gb=0.01)
cphdr_warp = pe.Node(CopyHeader(), name='cphdr_warp', mem_gb=0.01)
unwarp_reference = pe.Node(ANTSApplyTransformsRPT(dimension=3,
generate_report=False,
float=True,
interpolation='LanczosWindowedSinc'),
name='unwarp_reference')
enhance_and_skullstrip_bold_wf = init_enhance_and_skullstrip_bold_wf(omp_nthreads=omp_nthreads)
workflow.connect([
(inputnode, cphdr_warp, [('in_reference', 'hdr_file')]),
(qwarp, cphdr_warp, [('source_warp', 'in_file')]),
(cphdr_warp, to_ants, [('out_file', 'in_file')]),
(to_ants, unwarp_reference, [('out', 'transforms')]),
(inputnode, unwarp_reference, [('in_reference', 'reference_image'),
('in_reference', 'input_image')]),
(unwarp_reference, enhance_and_skullstrip_bold_wf, [
('output_image', 'inputnode.in_file')]),
(unwarp_reference, outputnode, [('output_image', 'out_reference')]),
(enhance_and_skullstrip_bold_wf, outputnode, [
('outputnode.mask_file', 'out_mask'),
('outputnode.skull_stripped_file', 'out_reference_brain')]),
(to_ants, outputnode, [('out', 'out_warp')]),
])
return workflow
def init_prepare_epi_wf(omp_nthreads, name="prepare_epi_wf"):
"""
This workflow takes in a set of EPI files with with the same phase
encoding direction and returns a single 3D volume ready to be used in
field distortion estimation.
The procedure involves: estimating a robust template using FreeSurfer's
'mri_robust_template', bias field correction using ANTs N4BiasFieldCorrection
and AFNI 3dUnifize, skullstripping using FSL BET and AFNI 3dAutomask,
and rigid coregistration to the reference using ANTs.
.. workflow ::
:graph2use: orig
:simple_form: yes
from sdcflows.workflows.pepolar import init_prepare_epi_wf
wf = init_prepare_epi_wf(omp_nthreads=8)
Inputs
fmaps
list of 3D or 4D NIfTI images
ref_brain
coregistration reference (skullstripped and bias field corrected)
Outputs
out_file
single 3D NIfTI file
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['fmaps', 'ref_brain']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file']),
name='outputnode')
split = pe.MapNode(fsl.Split(dimension='t'), iterfield='in_file',
name='split')
merge = pe.Node(
StructuralReference(auto_detect_sensitivity=True,
initial_timepoint=1,
fixed_timepoint=True, # Align to first image
intensity_scaling=True,
# 7-DOF (rigid + intensity)
no_iteration=True,
subsample_threshold=200,
out_file='template.nii.gz'),
name='merge')
enhance_and_skullstrip_bold_wf = init_enhance_and_skullstrip_bold_wf(
omp_nthreads=omp_nthreads)
ants_settings = pkgr.resource_filename('sdcflows',
'data/translation_rigid.json')
fmap2ref_reg = pe.Node(ants.Registration(from_file=ants_settings,
output_warped_image=True),
name='fmap2ref_reg', n_procs=omp_nthreads)
workflow = Workflow(name=name)
def _flatten(l):
from nipype.utils.filemanip import filename_to_list
return [item for sublist in l for item in filename_to_list(sublist)]
workflow.connect([
(inputnode, split, [('fmaps', 'in_file')]),
(split, merge, [(('out_files', _flatten), 'in_files')]),
(merge, enhance_and_skullstrip_bold_wf, [('out_file', 'inputnode.in_file')]),
(enhance_and_skullstrip_bold_wf, fmap2ref_reg, [
('outputnode.skull_stripped_file', 'moving_image')]),
(inputnode, fmap2ref_reg, [('ref_brain', 'fixed_image')]),
(fmap2ref_reg, outputnode, [('warped_image', 'out_file')]),
])
return workflow
def _fix_hdr(in_file, newpath=None):
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
nii = nb.load(in_file)
hdr = nii.header.copy()
hdr.set_data_dtype('<f4')
hdr.set_intent('vector', (), '')
out_file = fname_presuffix(in_file, "_warpfield", newpath=newpath)
nb.Nifti1Image(nii.get_data().astype('<f4'), nii.affine, hdr).to_filename(
out_file)
return out_file
| StarcoderdataPython |
4983953 | <gh_stars>0
# Given an array nums of integers, return how many of them contain an even number of digits.
# Example 1:
# Input: nums = [12,345,2,6,7896]
# Output: 2
# Explanation:
# 12 contains 2 digits (even number of digits).
# 345 contains 3 digits (odd number of digits).
# 2 contains 1 digit (odd number of digits).
# 6 contains 1 digit (odd number of digits).
# 7896 contains 4 digits (even number of digits).
# Therefore only 12 and 7896 contain an even number of digits.
# Example 2:
# Input: nums = [555,901,482,1771]
# Output: 1
# Explanation:
# Only 1771 contains an even number of digits.
class Solution:
def findNumbers(self, nums):
count = 0
for num in nums:
if len(str(num)) % 2 == 0:
count += 1
return count
nums = [12, 345, 2, 6, 7896]
print(Solution().findNumbers(nums))
| StarcoderdataPython |
5108059 | <reponame>C0mpy/Soft-Computing<gh_stars>0
import HogDescriptor as h
from sklearn.svm import SVC
from skimage.io import imread
import time
import numpy as np
import os
from sklearn.metrics import accuracy_score
if __name__ == "__main__":
start = time.time()
descriptor = h.HOGDescriptor((5, 5))
pos_imgs = []
neg_imgs = []
test = []
labels = []
test_labels = []
pos_dir = os.listdir("../p/")
neg_dir = os.listdir("../n/")
for i in range(50):
if i < 30:
pos_imgs.append(imread("../p/" + pos_dir[i]))
neg_imgs.append(imread("../n/" + neg_dir[i]))
else:
if i % 2:
test.append(imread("../p/" + pos_dir[i]))
test_labels.append(1)
else:
test.append(imread("../n/" + neg_dir[i]))
test_labels.append(0)
labels = [1] * 30 + [0] * 30
pos_hog = []
for img in range(len(pos_imgs)):
pos_hog.append(descriptor.describe(pos_imgs[img]))
neg_hog = []
for img in range(len(neg_imgs)):
neg_hog.append(descriptor.describe(neg_imgs[img]))
test_hog = []
for img in range(len(test)):
test_hog.append(descriptor.describe(test[img]))
pos_hog = np.array(pos_hog)
neg_hog = np.array(neg_hog)
test_hog = np.array(test_hog)
x = np.vstack((pos_hog, neg_hog))
y = np.array(labels)
clf = SVC(kernel="linear")
clf = clf.fit(x, y)
y_pred = clf.predict(test_hog)
print time.time() - start
print str(y_pred)
print str(test_labels)
print "Acc:", accuracy_score(test_labels, y_pred) | StarcoderdataPython |
6600089 | # %% [markdown]
# # Training a binary classifier to identify accounts that are likely
# commercial business vs those that are likely real human users
#
# The goal of this file is to be able to identify which of the followers that I
# selected are commercial followers or otherwise small businesses. This
# corrupts my input user base with non-people, so this is an attempt to remove
# these followers.
# %% [markdown]
# ## Imports
# %%
# General imports
import json
import glob
import pickle
import collections
import random
from tqdm import tqdm as tqdm
import time
import os
dirpath = os.path.dirname(os.path.realpath('__file__'))
from pprint import pprint
# import logging
# logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
# logging.root.level = logging.INFO
# NLP imports
import nltk
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words.extend(['https', 'http'])
import re
import gensim
import gensim.corpora as corpora
from gensim.models import CoherenceModel
import spacy
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
# To check later if a words is in english or not. Note that to include some
# additional words as stop words, I just removed them from this dictionary
with open('./words_dictionary.json') as filehandle:
words_dictionary = json.load(filehandle)
english_words = words_dictionary.keys()
# Visualization imports
import pyLDAvis
import pyLDAvis.gensim
# pyLDAvis.enable_notebook()
import matplotlib.pyplot as plt
# Other imports
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn import svm
# %% [markdown]
## All functions
# %%
def compute_lda(corpus, id2word, k=10, alpha='auto'):
"""
Performs the LDA and returns the computer model.
Input: Corpus, dictionary and hyperparameters to optimize
Output: the fitted/computed LDA model
"""
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=k,
random_state=100,
# update_every=1,
chunksize=5,
passes=100,
alpha=.01,
iterations=100,
per_word_topics=True)
return lda_model
def visualize_LDA(model, corpus):
"""
This function accepts an lda model and a corpus of words and uses pyLDAvis
to prepare a visualization and then save to html.
input: an lda model and a corpus of words
returns: None
"""
LDAvis_prepared = pyLDAvis.gensim.prepare(model, corpus,
dictionary=model.id2word,
mds='tsne')
vis_filename = './LDAvis_prepared/random_users/LDAvis.html'
pyLDAvis.save_html(LDAvis_prepared, vis_filename)
pyLDAvis.show(LDAvis_prepared)
return None
def get_augmented_feature_vectors(feature_vectors):
"""
Takes in the feature vector list of list and augments it. gensim does not
actually put a 0 for topics that have 0 probability so I need to manually
add it in to build my feature vector.
input: accepts the feature vectors output by gensim. It is a list of
tuples - one list entry per document and tuple are (topic, probability)
pairs.
returns: Augmented feature vectors as list of list. Each list entry
corresponds to one document, with the i-th element in the inner list
corresponding to the probability that the document was generated with
topic i.
"""
augmented = []
for i, vector in enumerate(feature_vectors): # each vector is a list of tuples
topics = [tup[0] for tup in vector]
for t in range(10):
if t not in topics:
feature_vectors[i].append((t, 0))
new_feature_vector = sorted(feature_vectors[i], key=lambda tup: tup[0])
augmented.append([tup[1] for tup in new_feature_vector])
return augmented
# %% [markdown]
## Loading the data
# %%
with open('./data/lda_dict_random_users.data', 'rb') as filehandle:
lda_dict_random_users = pickle.load(filehandle)
# %% [markdown]
## Generating the docs, corpuses and labels
# Note that there are about 320 documents with label 1 (yes to farmers market)
# and about 480 documents with label 0 (not farmers market).
# %%
# it seems that some of my entries don't have label, probably because it was
# skipped in a previous try, except clause. So I will try and catch this
# up front:
labels = []
docs = []
for user in lda_dict_random_users:
try:
label = lda_dict_random_users[user]['label']
doc = lda_dict_random_users[user]['fulltext']
labels += [label]
docs += [doc]
except:
pass
id2word = corpora.Dictionary(docs)
# Idea: Keep only those tokens that appear in at least 10% of the documents
id2word.filter_extremes(no_below=int(0.1*len(docs)))
corpus = [id2word.doc2bow(doc) for doc in docs]
# %%
t1 = time.time()
lda_model_random_users = compute_lda(corpus, id2word)
t2 = time.time()
print('Time elapsed:', t2-t1)
lda_model_random_users.save('./ldamodels/random_users/model.model')
pprint(lda_model_random_users.print_topics())
# %% [markdown]
## Generating feature vectors given a document and given the lda model
# %%
topics = lda_model_random_users.get_document_topics(corpus,
per_word_topics=True)
feature_vectors = [doc_topics for doc_topics, word_topics, word_phis in topics]
# Note that get_document_topics only returns cases where the probability is
# non zero, so I will have to manually go in and add zeros.
augmented_feature_vectors = get_augmented_feature_vectors(feature_vectors)
# %% [markdown]
# At this point, I have a list of feature vectors and a list of labels that
# correspond to that feature vector, which can now be fed into a binary
# classifier. I am going to start with a random forest and then see if I can or
# should do better than that.
# %% [markdown]
## Binary Classification Implementation
# I have my feature vectors (list of list) in `augmented_feature_vectors` and
# the corresponding labels in `labels`. This is ready to fed into a binary
# classifier. First, I will split my data into a test_train_split.
# %%
X_train, X_test, y_train, y_test = train_test_split(augmented_feature_vectors,
labels,
test_size=0.33,
random_state=100)
clf = svm.SVC(kernel='rbf', gamma='scale')
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('Accuracy score in distinguishing between human and commercial:',
accuracy_score(y_test, y_pred))
# %%
# save the binary classifier
with open('./models/commercial-filter-classifier.model', 'wb') as filehandle:
pickle.dump(clf, filehandle, protocol=pickle.HIGHEST_PROTOCOL)
# %%
# At this point I have a trained binary classifier that is presumably able to
# detect other farmers markets, farms, and other commercial users. We next use
# this trained model to further tune our input into the LDA when looking at
# predictions by city. | StarcoderdataPython |
1746978 | <filename>models/Global-Flow-Local-Attention/data/hmubi_dataset.py
import os.path
from data.base_dataset import BaseDataset
from data.image_folder import make_dataset
import pandas as pd
from util import pose_utils
import numpy as np
import torch
from tqdm import tqdm
class HMUBIDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser = BaseDataset.modify_commandline_options(parser, is_train)
if is_train:
parser.set_defaults(load_size=256)
else:
parser.set_defaults(load_size=256)
parser.set_defaults(old_size=(256, 256))
parser.set_defaults(structure_nc=18)
parser.set_defaults(image_nc=3)
parser.set_defaults(display_winsize=256)
return parser
def get_paths(self, opt):
if opt.phase == 'test':
root = os.path.join(opt.dataroot, 'test')
pairLst = os.path.join(opt.dataroot, 'pairs-test.txt')
elif opt.phase == 'train':
root = os.path.join(opt.dataroot, opt.phase)
pairLst = os.path.join(opt.dataroot, 'pairs-train.txt')
phase = opt.phase
name_pairs = self.init_categories(pairLst)
image_dir = os.path.join(root, 'image')
bonesLst = os.path.join(root, 'keypoints')
maskLst = os.path.join(root, 'mask')
return image_dir, bonesLst, maskLst, name_pairs
def init_categories(self, pairLst):
import os
print(os.getcwd())
pairs_file_train = pd.read_csv(pairLst, sep=' ', header=None)
size = len(pairs_file_train)
pairs = []
print('Building data pairs ...')
for i in tqdm(range(size)):
root = os.path.join(self.opt.dataroot, self.opt.phase)
P1_name = pairs_file_train.iloc[i][0].split('/')[1].split('.')[0]
P2_name = pairs_file_train.iloc[i][1].split('/')[1].split('.')[0]
# Need target keypoints, source keypoints, source image, and source mask
target_keypoints = os.path.join(root, 'keypoints/' + P2_name + '.txt')
target_image = os.path.join(root, 'image/' + P2_name + '.jpg')
target_mask = os.path.join(root, 'mask/' + P2_name + '.png')
source_keypoints = os.path.join(root, 'keypoints/' + P1_name + '.txt')
source_image = os.path.join(root, 'image/' + P1_name + '.jpg')
source_mask = os.path.join(root, 'mask/' + P1_name + '.png')
if self.opt.phase=='train':
if not os.path.exists(target_keypoints) or not os.path.exists(target_image) or not os.path.exists(target_mask) \
or not os.path.exists(source_keypoints) or not os.path.exists(source_image) or not os.path.exists(source_mask):
continue
pair = [pairs_file_train.iloc[i][0], pairs_file_train.iloc[i][1]]
pairs.append(pair)
print('Loading data pairs finished ...')
return pairs
def name(self):
return "HMUBIDataset" | StarcoderdataPython |
3281004 | <gh_stars>10-100
import numpy as np
from evo_gym.spaces import Tuple, Dict
from evo_gym.vector.utils.spaces import _BaseGymSpaces
from collections import OrderedDict
__all__ = ['concatenate', 'create_empty_array']
def concatenate(items, out, space):
"""Concatenate multiple samples from space into a single object.
Parameters
----------
items : iterable of samples of `space`
Samples to be concatenated.
out : tuple, dict, or `np.ndarray`
The output object. This object is a (possibly nested) numpy array.
space : `gym.spaces.Space` instance
Observation space of a single environment in the vectorized environment.
Returns
-------
out : tuple, dict, or `np.ndarray`
The output object. This object is a (possibly nested) numpy array.
Example
-------
>>> from evo_gym.spaces import Box
>>> space = Box(low=0, high=1, shape=(3,), dtype=np.float32)
>>> out = np.zeros((2, 3), dtype=np.float32)
>>> items = [space.sample() for _ in range(2)]
>>> concatenate(items, out, space)
array([[0.6348213 , 0.28607962, 0.60760117],
[0.87383074, 0.192658 , 0.2148103 ]], dtype=float32)
"""
assert isinstance(items, (list, tuple))
if isinstance(space, _BaseGymSpaces):
return concatenate_base(items, out, space)
elif isinstance(space, Tuple):
return concatenate_tuple(items, out, space)
elif isinstance(space, Dict):
return concatenate_dict(items, out, space)
else:
raise NotImplementedError()
def concatenate_base(items, out, space):
return np.stack(items, axis=0, out=out)
def concatenate_tuple(items, out, space):
return tuple(concatenate([item[i] for item in items],
out[i], subspace) for (i, subspace) in enumerate(space.spaces))
def concatenate_dict(items, out, space):
return OrderedDict([(key, concatenate([item[key] for item in items],
out[key], subspace)) for (key, subspace) in space.spaces.items()])
def create_empty_array(space, n=1, fn=np.zeros):
"""Create an empty (possibly nested) numpy array.
Parameters
----------
space : `gym.spaces.Space` instance
Observation space of a single environment in the vectorized environment.
n : int
Number of environments in the vectorized environment. If `None`, creates
an empty sample from `space`.
fn : callable
Function to apply when creating the empty numpy array. Examples of such
functions are `np.empty` or `np.zeros`.
Returns
-------
out : tuple, dict, or `np.ndarray`
The output object. This object is a (possibly nested) numpy array.
Example
-------
>>> from evo_gym.spaces import Box, Dict
>>> space = Dict({
... 'position': Box(low=0, high=1, shape=(3,), dtype=np.float32),
... 'velocity': Box(low=0, high=1, shape=(2,), dtype=np.float32)})
>>> create_empty_array(space, n=2, fn=np.zeros)
OrderedDict([('position', array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)),
('velocity', array([[0., 0.],
[0., 0.]], dtype=float32))])
"""
if isinstance(space, _BaseGymSpaces):
return create_empty_array_base(space, n=n, fn=fn)
elif isinstance(space, Tuple):
return create_empty_array_tuple(space, n=n, fn=fn)
elif isinstance(space, Dict):
return create_empty_array_dict(space, n=n, fn=fn)
else:
raise NotImplementedError()
def create_empty_array_base(space, n=1, fn=np.zeros):
shape = space.shape if (n is None) else (n,) + space.shape
return fn(shape, dtype=space.dtype)
def create_empty_array_tuple(space, n=1, fn=np.zeros):
return tuple(create_empty_array(subspace, n=n, fn=fn)
for subspace in space.spaces)
def create_empty_array_dict(space, n=1, fn=np.zeros):
return OrderedDict([(key, create_empty_array(subspace, n=n, fn=fn))
for (key, subspace) in space.spaces.items()])
| StarcoderdataPython |
6633938 | #!/usr/bin/env python
# Copyright 2018-2019 <NAME> & Contributors. All rights reserved.
#
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import datetime
import re
import sys
re_copyright = re.compile(rf"{datetime.datetime.now().year} <NAME>")
re_copyright_bad_year = re.compile(r"\d\d\d\d <NAME>")
errors = False
for file_name in sys.argv:
empty = True
copyright_found = False
copyright_bad_year_found = False
with open(file_name, "r") as f:
for line in f.readlines():
empty = False
if re_copyright.search(line):
copyright_found = True
break
elif re_copyright_bad_year.search(line):
copyright_bad_year_found = True
break
if not empty:
if copyright_bad_year_found:
print(f"{file_name}: copyright with bad year")
errors = True
elif not copyright_found:
print(f"{file_name}: no copyright")
errors = True
if errors:
sys.exit(1)
sys.exit(0)
| StarcoderdataPython |
3575812 | # Generated by Django 2.2.3 on 2021-10-20 15:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('proyectos', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='galeria',
new_name='Proyecto',
),
migrations.AlterModelOptions(
name='proyecto',
options={'verbose_name': 'proyecto', 'verbose_name_plural': 'proyectos'},
),
]
| StarcoderdataPython |
194939 | <gh_stars>1-10
from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
from recurrence.fields import RecurrenceField
# from schedule.models import Event
from datetime import datetime
GATHERING_TYPES = [
# ('BS', 'Bible Study'),
# ('SR', 'Sex and Relationships'),
# ('ME', 'Movies and Entertainment'),
# ('PO', 'Political'),
('EV', 'Event'),
('CL', 'Class'),
('GR', 'Group'),
]
CONNECTION_LEVELS = [
('1-Acknowledgement', 'Acknowledgement'),
('2-Dialogue/Emotional Connection', 'Dialogue/Emotional Connection'),
('3-Intersection', 'Intersection'),
('4-Invitation', 'Invitation'),
]
PARTICIPATION_LEVELS = [
('1-Connection', 'Connection'),
('2-Contact', 'Contact'),
('3-Visitor', 'Visitor'),
('4-Participant', 'Participant'),
('5-Potential Leader', 'Potential Leader'),
('6-Leader', 'Leader'),
('7-Leader-Trainer', 'Leader-Trainer'),
]
class Person(models.Model):
airtableID = models.CharField(max_length=50, unique=True, blank=True, null=True)
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=30)
pronouns = models.CharField(max_length=20, null=True, blank=True)
phone = PhoneNumberField(null=True, blank=True)
email = models.EmailField(null=True, blank=True)
birthday = models.DateField(null=True, blank=True)
leadership_interested = models.BooleanField(null=True)
leadership_trained = models.BooleanField(null=True)
connection = models.CharField(max_length=40, choices=CONNECTION_LEVELS, null=True, blank=True)
participation = models.CharField(max_length=40, choices=PARTICIPATION_LEVELS, null=True, blank=True)
def __str__(self):
return f'{self.first_name} {self.last_name}'
class Gathering(models.Model):
# airtableID = models.CharField(max_length=20, blank=True, null=True)
# event = models.OneToOneField(Event, on_delete=models.CASCADE, primary_key=True, related_name='gathering')
name = models.CharField(max_length=50)
description = models.TextField()
recurrences = RecurrenceField(null=True)
start_time = models.TimeField()
end_time = models.TimeField()
location = models.CharField(max_length=50, null=True)
category = models.CharField(max_length=2, choices=GATHERING_TYPES)
authors = models.ManyToManyField(Person)
@property
def Date(self):
next_date = self.recurrences.after(datetime.now())
if next_date:
return next_date
else:
return self.recurrences.occurrences()[0]
@property
def recurrence_text(self):
return " + ".join([rule.to_text() for rule in self.recurrences.rrules])
def __str__(self):
return self.name
# class EventGrouper(models.Model):
# event = models.OneToOneField(Event)
# gathering = models.ForeignKey(Gathering,on_delete=models.CASCADE)
class Session(models.Model):
airtableID = models.CharField(max_length=20, blank=True, null=True)
date = models.DateField()
start_time = models.TimeField(null=True)
end_time = models.TimeField(null=True)
leader = models.ManyToManyField(Person, related_name='sessions_lead')
attendees = models.ManyToManyField(Person, related_name='sessions_attended', blank=True)
gathering = models.ForeignKey(Gathering, on_delete=models.CASCADE)
location = models.CharField(max_length=30, default='')
def __str__(self):
return self.gathering.name + ' ' + self.date.strftime('%m/%d/%y') | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.