content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore.ops import operations as P
from mindspore.nn import Cell
from mindspore.common.tensor import Tensor
from mindspore.train.model import Model
from mindspore import log as logger
from mindspore import context
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
@pytest.mark.ssd_tbe | [
2,
15069,
12131,
43208,
21852,
1766,
1539,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198... | 3.918605 | 258 |
import pyVmomi
from pyVmomi import vim, vmodl
from DatacenterPrac import Login,GetCluster,GetDatacenter,get_obj,GetClusters
from clusterPrac import GetHostsInClusters
import status
from VMPrac import find_obj,get_container_view,collect_properties
import multiprocessing
from multiprocessing.dummy import Pool as ThreadPool
import time
def vm_ops_handler_wrapper(args):
"""
Wrapping arround vm_ops_handler
"""
return vm_ops_handler(*args)
############################### Cloning Operation #####################
synchObj=multiprocessing.Manager()
vm_result_list=synchObj.list()
def collect_vm_properties(service_instance, view_ref, obj_type, path_set=None,
include_mors=False,desired_vm=None):
"""
Collect properties for managed objects from a view ref
Returns:
A list of properties for the managed objects
"""
collector = service_instance.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
obj_spec = pyVmomi.vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
# Create a traversal specification to identify the path for collection
traversal_spec = pyVmomi.vmodl.query.PropertyCollector.TraversalSpec()
traversal_spec.name = 'traverseEntities'
traversal_spec.path = 'view'
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec.selectSet = [traversal_spec]
# Identify the properties to the retrieved
property_spec = pyVmomi.vmodl.query.PropertyCollector.PropertySpec()
property_spec.type = obj_type
if not path_set:
property_spec.all = True
property_spec.pathSet = path_set
# Add the object and property specification to the
# property filter specification
filter_spec = pyVmomi.vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
# Retrieve properties
props = collector.RetrieveContents([filter_spec])
properties = {}
try:
for obj in props:
for prop in obj.propSet:
if prop.val == desired_vm:
properties['name'] = prop.val
properties['obj'] = obj.obj
return properties
else:
pass
except Exception, e:
print "The exception inside collector_properties " + str(e)
return properties
| [
11748,
12972,
53,
76,
12753,
198,
6738,
12972,
53,
76,
12753,
1330,
43907,
11,
410,
4666,
75,
198,
6738,
16092,
330,
9255,
6836,
330,
1330,
23093,
11,
3855,
2601,
5819,
11,
3855,
27354,
330,
9255,
11,
1136,
62,
26801,
11,
3855,
2601,
... | 2.694681 | 940 |
import os, json
from typing import List, Dict
from hoshino.log import new_logger
log = new_logger('maimaiDX')
static = os.path.join(os.path.dirname(__file__), 'static')
arcades_json = os.path.join(os.path.dirname(__file__), 'arcades.json')
if not os.path.exists(arcades_json):
raise '请安装arcades.json文件'
arcades: List[Dict] = json.load(open(arcades_json, 'r', encoding='utf-8'))
config_json = os.path.join(os.path.dirname(__file__), 'config.json')
if not os.path.exists('config.json'):
with open('config.json', 'w', encoding='utf-8') as f:
json.dump({'enable': [], 'disable': []}, f)
config: Dict[str, List[int]] = json.load(open(config_json, 'r', encoding='utf-8'))
aliases_csv = os.path.join(static, 'aliases.csv')
| [
11748,
28686,
11,
33918,
201,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
201,
198,
6738,
289,
3768,
2879,
13,
6404,
1330,
649,
62,
6404,
1362,
201,
198,
201,
198,
6404,
796,
649,
62,
6404,
1362,
10786,
76,
1385,
1872,
36227,
11537,
... | 2.335385 | 325 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoSuchElementException,
ElementNotVisibleException
from browsermobproxy import Server
import urlparse
server = Server(r"c:\browsermob\bin\browsermob-proxy.bat")
server.start()
proxy = server.create_proxy()
proxy.new_har()
chrome_options = webdriver.ChromeOptions()
proxy = urlparse.urlparse(proxy.proxy).netloc
chrome_options.add_argument('--proxy-server=%s' % proxy)
driver = webdriver.Chrome(
executable_path=r"c:\chromedriver.exe",
chrome_options=chrome_options)
driver.get("http://google.com.ua/")
driver.find_element_by_id("gbqfsb").click()
print proxy.har
driver.quit()
server.stop()
| [
6738,
384,
11925,
1505,
1330,
3992,
26230,
201,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
201,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
9019,
1330,
9683,
201,
198,
6738,
384,
... | 2.914089 | 291 |
from datetime import datetime
from urllib.parse import urljoin
from city_scrapers_core.constants import CLASSIFICATIONS, NOT_CLASSIFIED
from city_scrapers_core.spiders import CityScrapersSpider
from dateutil.parser import parse as datetime_parse
from city_scrapers.items import Meeting
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
22179,
198,
198,
6738,
1748,
62,
1416,
2416,
364,
62,
7295,
13,
9979,
1187,
1330,
42715,
30643,
18421,
11,
5626,
62,
45449,
198,
6738,
1748,
62,
14... | 3.567901 | 81 |
import unittest
from pynes.game import PPU
| [
11748,
555,
715,
395,
198,
198,
6738,
279,
25337,
13,
6057,
1330,
350,
5105,
628
] | 3 | 15 |
#!/usr/bin/python
# -*- coding:utf-8 -*-
import logging
logger = logging.getLogger("PushConsumer")
#导入上级目录模块
import sys
sys.path.append("..")
import settings_MQ as settings
#启动JVM
from jpype import *
jvmPath = getDefaultJVMPath()
startJVM(jvmPath, settings.JVM_OPTIONS, "-Djava.ext.dirs="+settings.JAVA_EXT_DIRS)
#startJVM(jvmPath, "-Djava.class.path=" + settings.RMQClientJAR + ":")
logger.info(java.lang.System.getProperty("java.class.path"))
logger.info(java.lang.System.getProperty("java.ext.dirs"))
#启动JVM之后才能调用JPackage,否则找不到相关的jar包
from MQPushConsumer import MQPushConsumer
from MQMessageListener import msgListenerConcurrentlyProxy, msgListenerOrderlyProxy
from MQMessage import ConsumeFromWhere, MessageModel
# 为了支持文本中文输入,要显式设置编码;该编码不影响Message的Body的编码
import sys
if sys.getdefaultencoding() != 'utf-8':
reload(sys)
sys.setdefaultencoding('utf-8');
import time
if __name__ == '__main__':
consumer = MQPushConsumer('MQClient4Python-Consumer', 'jfxr-7:9876;jfxr-6:9876')
consumer.init()
consumer.setMessageModel(MessageModel['CLUSTERING']) # 默认是CLUSTERING
#consumer.setMessageModel(MessageModel.CLUSTERING) # 默认是CLUSTERING
consumer.subscribe("RMQTopicTest", "TagB")
consumer.setConsumeFromWhere(ConsumeFromWhere['CONSUME_FROM_LAST_OFFSET'])
#consumer.setConsumeFromWhere(ConsumeFromWhere.CONSUME_FROM_LAST_OFFSET)
#consumer.registerMessageListener(msgListenerConcurrentlyProxy)
consumer.registerMessageListener(msgListenerOrderlyProxy)
consumer.start()
while True:
time.sleep(1)
#监听状态时不需要shutdown,除非真实想退出!
#consumer.shutdown()
#监听状态时JVM也不能退出,除非真实想退出!
#shutdownJVM()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
11748,
18931,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7203,
49222,
49106,
4943,
198,
198,
2,
43380,
120,
17739,
... | 2.102144 | 793 |
import numpy
from numpy.random import RandomState
from numpy.linalg import cholesky as chol
from limmbo.core.vdsimple import vd_reml
from limmbo.io.input import InputData
random = RandomState(15)
N = 100
S = 1000
P = 3
snps = (random.rand(N, S) < 0.2).astype(float)
kinship = numpy.dot(snps, snps.T) / float(10)
y = random.randn(N, P)
pheno = numpy.dot(chol(kinship), y)
pheno_ID = [ 'PID{}'.format(x+1) for x in range(P)]
samples = [ 'SID{}'.format(x+1) for x in range(N)]
datainput = InputData()
datainput.addPhenotypes(phenotypes = pheno,
phenotype_ID = pheno_ID, pheno_samples = samples)
datainput.addRelatedness(relatedness = kinship,
relatedness_samples = samples)
Cg, Cn, ptime = vd_reml(datainput, verbose=False)
Cg
Cn
ptime
| [
11748,
299,
32152,
198,
6738,
299,
32152,
13,
25120,
1330,
14534,
9012,
198,
6738,
299,
32152,
13,
75,
1292,
70,
1330,
442,
4316,
2584,
355,
442,
349,
198,
6738,
1761,
2022,
78,
13,
7295,
13,
85,
9310,
320,
1154,
1330,
410,
67,
62,
... | 2.414474 | 304 |
"""
Pigeon hole problem in cpmpy.
ftp://ftp.inria.fr/INRIA/Projects/contraintes/publications/CLP-FD/plilp94.html
'''
pigeon: the pigeon-hole problem consists in putting n pigeons in m pigeon-holes (at most 1
pigeon per hole). The boolean formulation uses n × m variables to indicate, for each pigeon,
its hole number. Obviously, there is a solution iff n <= m.
'''
Model created by Hakan Kjellerstrand, hakank@hakank.com
See also my cpmpy page: http://www.hakank.org/cpmpy/
"""
import sys
import numpy as np
from cpmpy import *
from cpmpy.solvers import *
from cpmpy_hakank import *
# n: num pigeons
# m: n pigeon holes
n = 3
m = 10
pigeon_hole(n,m)
| [
37811,
198,
47,
10045,
261,
7604,
1917,
287,
31396,
3149,
88,
13,
198,
198,
701,
79,
1378,
701,
79,
13,
259,
7496,
13,
8310,
14,
1268,
49,
3539,
14,
16775,
82,
14,
3642,
16947,
274,
14,
11377,
602,
14,
5097,
47,
12,
26009,
14,
4... | 2.752101 | 238 |
from .base import DataSink
from .queued import QueuedSink
from .notifier import MeasurementNotifierSink
from .recorder import FileRecorderSink
from .uploader import UploaderSink
| [
6738,
764,
8692,
1330,
6060,
50,
676,
198,
6738,
764,
4188,
1739,
1330,
4670,
1739,
50,
676,
198,
6738,
764,
1662,
7483,
1330,
24291,
434,
3673,
7483,
50,
676,
198,
6738,
764,
8344,
2875,
1330,
9220,
6690,
2875,
50,
676,
198,
6738,
... | 3.490196 | 51 |
# http://github.com/bobk/jiracharts
#
# This example code set uses various charting libraries, Python with jira-python and
# PowerShell with JiraPS to demonstrate generating useful charts and visualizations from Jira data
from jira import JIRA
import os
import datetime
# in this program we use both the gantt and plotly libraries as examples
# all variables for gantt are prefixed with gantt, variables for plotly are prefixed with plotly
import gantt
import plotly.figure_factory as plotlyff
if __name__== "__main__" :
main()
| [
198,
2,
220,
220,
2638,
1378,
12567,
13,
785,
14,
65,
672,
74,
14,
73,
343,
620,
5889,
198,
2,
198,
2,
220,
220,
770,
1672,
2438,
900,
3544,
2972,
8262,
278,
12782,
11,
11361,
351,
474,
8704,
12,
29412,
290,
220,
198,
2,
220,
... | 3.215116 | 172 |
# imports here
import argparse
import time
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
from collections import OrderedDict
# torchvision.datasets import ImageFolder
from torch.autograd import Variable
import numpy as np
from PIL import Image
print("Stop 1 - after imports")
if __name__ == "__main__":
main()
| [
2,
17944,
994,
198,
11748,
1822,
29572,
198,
198,
11748,
640,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
1330,
6436,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
10178,
198,
6738,
2803... | 2.589623 | 212 |
__author__ = 'dwatkins'
| [
834,
9800,
834,
796,
705,
67,
47261,
5331,
6,
198
] | 2.4 | 10 |
from typing import Literal, Any | [
6738,
19720,
1330,
25659,
1691,
11,
4377
] | 4.428571 | 7 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
628,
628,
198
] | 2.875 | 8 |
from pytheas.data.projects import Project
import urllib.parse
| [
6738,
12972,
1169,
292,
13,
7890,
13,
42068,
1330,
4935,
198,
198,
11748,
2956,
297,
571,
13,
29572,
628,
628,
628
] | 3.238095 | 21 |
import logging
from pathlib import Path
from tempfile import mkstemp
from typing import Iterator, List, Optional
import glob
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Audio, Image, display
import cv2
import librosa
from tqdm import tqdm
from .mixins import ImageTrainerMixin
from .widgets import GPUIndex, Solver, Engine
def make_slice(total: int, size: int, step: int) -> Iterator[slice]:
"""
Sliding window over the melody. step should be less than or equal to size.
"""
if step > size:
logging.warn("step > size, you probably miss some part of the melody")
if total < size:
yield slice(0, total)
return
for t in range(0, total - size, step):
yield slice(t, t + size)
if t + size < total:
yield slice(total - size, total)
| [
11748,
18931,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
20218,
7753,
1330,
33480,
927,
79,
198,
6738,
19720,
1330,
40806,
1352,
11,
7343,
11,
32233,
198,
11748,
15095,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
... | 2.876289 | 291 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the minifold project.
# https://github.com/nokia/minifold
__author__ = "Marc-Olivier Buob"
__maintainer__ = "Marc-Olivier Buob"
__email__ = "marc-olivier.buob@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2018, Nokia"
__license__ = "BSD-3"
def in_ipynb() -> bool:
"""
Tests whether the code is running inside a Jupyter Notebook.
Returns:
True iff the code is running inside a Jupyter Notebook.
"""
try:
return str(type(get_ipython())) == "<class 'ipykernel.zmqshell.ZMQInteractiveShell'>"
except NameError:
return False
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
262,
949,
361,
727,
1628,
13,
198,
2,
3740,
1378,
12567,
13,
785,
14,
77... | 2.347518 | 282 |
import nipype.pipeline.engine as pe
from CPAC.pipeline.cpac_group_runner import load_config_yml
| [
198,
11748,
299,
541,
2981,
13,
79,
541,
4470,
13,
18392,
355,
613,
198,
6738,
16932,
2246,
13,
79,
541,
4470,
13,
13155,
330,
62,
8094,
62,
16737,
1330,
3440,
62,
11250,
62,
88,
4029,
628,
628,
198
] | 2.657895 | 38 |
load("//rules/jvm:private/label.bzl", _labeled_jars_implementation = "labeled_jars_implementation")
# For bedtime reading:
# https://github.com/bazelbuild/bazel/issues/4584
# https://groups.google.com/forum/#!topic/bazel-discuss/mt2llfwzmac
labeled_jars = aspect(
implementation = _labeled_jars_implementation,
attr_aspects = ["deps"], # assumption
)
| [
2220,
7203,
1003,
38785,
14,
73,
14761,
25,
19734,
14,
18242,
13,
65,
48274,
1600,
4808,
18242,
276,
62,
73,
945,
62,
320,
32851,
796,
366,
18242,
276,
62,
73,
945,
62,
320,
32851,
4943,
198,
198,
2,
1114,
3996,
2435,
3555,
25,
19... | 2.681481 | 135 |
from __future__ import annotations
from typing import Optional
from jsonclasses import jsonclass, types
@jsonclass
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
6738,
19720,
1330,
32233,
198,
6738,
33918,
37724,
1330,
33918,
4871,
11,
3858,
628,
198,
31,
17752,
4871,
198
] | 4.5 | 26 |
# Embedded file name: c:\Jenkins\live\output\win_32_static\Release\midi-remote-scripts\Push\ScrollableList.py
from __future__ import with_statement
from functools import partial
from _Framework.Control import ButtonControl, EncoderControl, control_list
from _Framework.CompoundComponent import CompoundComponent
from _Framework.Util import forward_property, in_range, clamp, BooleanContext, index_if
from _Framework.SubjectSlot import subject_slot, Subject
from _Framework import Task, Defaults
from _Framework.ScrollComponent import ScrollComponent, Scrollable
import consts
class ScrollableListItem(object):
"""
Wrapper of an item of a scrollable list.
"""
@property
@property
@property
@property
class ScrollableList(Subject, Scrollable):
"""
Class for managing a visual subset of a list of items.
The items will be wrapped in an item_type instance.
"""
__subject_events__ = ('selected_item', 'item_activated', 'scroll')
item_type = ScrollableListItem
fixed_offset = None
@property
num_visible_items = property(_get_num_visible_items, _set_num_visible_items)
@property
def select_item_index_with_offset(self, index, offset):
"""
Selects an item index but moves the view such that there are,
if possible, 'offset' number of elements visible before the
selected one. Does nothing if the item was already selected.
"""
if not (index != self.selected_item_index and index >= 0 and index < len(self._items) and self.selected_item_index != -1):
raise AssertionError
self._offset = clamp(index - offset, 0, len(self._items))
self._normalize_offset(index)
self._do_set_selected_item_index(index)
def select_item_index_with_border(self, index, border_size):
"""
Selects an item with an index. Moves the view if the selection would exceed the
border of the current view.
"""
if self.fixed_offset is not None:
self.select_item_index_with_offset(index, self.fixed_offset)
elif index >= 0 and index < len(self._items):
if not in_range(index, self._offset + border_size, self._offset + self._num_visible_items - border_size):
offset = index - (self._num_visible_items - 2 * border_size) if self.selected_item_index < index else index - border_size
self._offset = clamp(offset, 0, len(self._items))
self._normalize_offset(index)
self._do_set_selected_item_index(index)
return
selected_item_index = property(_get_selected_item_index, _set_selected_item_index)
@property
@property
class ActionListItem(ScrollableListItem):
"""
Interface for an list element that can be actuated on.
"""
supports_action = False
class ActionList(ScrollableList):
"""
A scrollable list of items that can be actuated on.
"""
item_type = ActionListItem
class DefaultItemFormatter(object):
"""
Item formatter that will indicate selection and show action_message if the item
is currently performing an action
"""
action_message = 'Loading...'
class ListComponent(CompoundComponent):
"""
Component that handles a ScrollableList. If an action button is
passed, it can handle an ActionList.
"""
__subject_events__ = ('item_action',)
SELECTION_DELAY = 0.5
ENCODER_FACTOR = 10.0
empty_list_message = ''
_current_action_item = None
_last_action_item = None
action_button = ButtonControl(color='Browser.Load')
encoders = control_list(EncoderControl)
@property
scrollable_list = property(_get_scrollable_list, _set_scrollable_list)
select_next_button = forward_property('_scroller')('scroll_down_button')
select_prev_button = forward_property('_scroller')('scroll_up_button')
next_page_button = forward_property('_pager')('scroll_down_button')
prev_page_button = forward_property('_pager')('scroll_up_button')
@subject_slot('scroll')
@subject_slot('selected_item')
@encoders.value
@action_button.pressed
def _execute_action(self):
""" Is called by the execute action task and should not be called directly
use _trigger_action instead """
if self._current_action_item != None:
self.do_trigger_action(self._current_action_item)
self._last_action_item = self._current_action_item
self._current_action_item = None
self.update()
return
@property
@property | [
2,
13302,
47238,
2393,
1438,
25,
269,
7479,
44875,
5331,
59,
12583,
59,
22915,
59,
5404,
62,
2624,
62,
12708,
59,
26362,
59,
13602,
72,
12,
47960,
12,
46521,
59,
49222,
59,
29261,
540,
8053,
13,
9078,
198,
6738,
11593,
37443,
834,
1... | 2.795122 | 1,640 |
from locker import User, Credentials
function()
def create_user(username, password):
'''
Function to create a new user with a username and password
'''
return User(username, password)
def save_user(user):
'''
Function to save a new user
'''
user.save_user()
def display_user():
"""
Function to display existing user
"""
return User.display_user()
def create_new_credential(account, userName, password):
"""
Function that creates new credentials for a given user account
"""
return Credentials(account, userName, password)
def save_credentials(credentials):
"""
Function to save Credentials
"""
credentials.save_credential()
def display_accounts_details():
"""
Function that returns all the saved credential.
"""
return Credentials.display_credentials()
def del_credential(credentials):
"""
Function to delete a Credentials from credentials list
"""
credentials.delete_credentials()
def find_credential(account):
"""
Function that finds a Credentials by an account name and returns the Credentials that belong to that account
"""
return Credentials.find_credential(account)
def check_credentials(account):
"""
Function that check if a Credentials exists with that account name and return true or false
"""
return Credentials.if_credential_exist(account)
def generate_Password():
'''
generates a random password for the user.
'''
return Credentials.generate_random_password()
if __name__ == '__main__':
locker()
| [
6738,
17355,
1330,
11787,
11,
327,
445,
14817,
628,
198,
198,
8818,
3419,
628,
198,
4299,
2251,
62,
7220,
7,
29460,
11,
9206,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
15553,
284,
2251,
257,
649,
2836,
351,
257,
20579... | 3.003752 | 533 |
import datetime
dt = '21/03/2012'
day, month, year = (int(x) for x in dt.split('/'))
ans = datetime.date(year, month, day)
print ans.strftime("%A") | [
11748,
4818,
8079,
198,
28664,
796,
705,
2481,
14,
3070,
14,
6999,
6,
198,
820,
11,
1227,
11,
614,
796,
357,
600,
7,
87,
8,
329,
2124,
287,
288,
83,
13,
35312,
10786,
14,
6,
4008,
220,
220,
220,
220,
198,
504,
796,
4818,
8079,
... | 2.359375 | 64 |
nums = [2,1,0,1,2,2,3,0,4,2]
val = 2
s = Solution()
print(s.removeElement(nums,val)) | [
198,
77,
5700,
796,
685,
17,
11,
16,
11,
15,
11,
16,
11,
17,
11,
17,
11,
18,
11,
15,
11,
19,
11,
17,
60,
198,
2100,
796,
362,
198,
198,
82,
796,
28186,
3419,
198,
198,
4798,
7,
82,
13,
28956,
20180,
7,
77,
5700,
11,
2100,
... | 1.77551 | 49 |
#!/usr/bin/env python3
import argparse
import random
onsets = [
"b",
"c",
"d",
"f",
"g",
"h",
"j",
"k",
"l",
"m",
"n",
"p",
"r",
"s",
"t",
"v",
"w",
"pl",
"bl",
"kl",
"ɡl",
"pr",
"br",
"tr",
"dr",
"kr",
"ɡr",
"tw",
"dw",
"ɡw",
"kw",
"pw",
"fl",
"sl",
"dʒ",
"θl",
"fr",
"θr",
"ʃr",
"hw",
"sw",
"θw",
"vw",
"pj",
"bj",
"tj",
"dj",
"kj",
"ɡj",
"mj",
"nj",
"fj",
"vj",
"θj",
"sj",
"zj",
"hj",
"lj",
"sp",
"st",
"sk",
"sm",
"sn",
"sf",
"sθ",
"spl",
"skl",
"spr",
"str",
"skr",
"skw",
"smj",
"spj",
"stj",
"skj",
"sfr",
]
nuclei = [
"a",
"e",
"i",
"o",
"u",
"oo",
"ui",
"oi",
"ai",
"ae",
"ee",
"ei",
"ie",
]
codas = [
"b",
"c",
"d",
"f",
"g",
"k",
"l",
"m",
"n",
"p",
"r",
"s",
"t",
"v",
"ŋ",
"lp",
"lb",
"lt",
"ld",
"ltʃ",
"ldʒ",
"lk",
"rp",
"rb",
"rt",
"rd",
"rtʃ",
"rdʒ",
"rk",
"rɡ",
"lf",
"lv",
"lθ",
"ls",
"lʃ",
"rf",
"rv",
"rθ",
"rs",
"rz",
"rʃ",
"lm",
"ln",
"rm",
"rn",
"rl",
"mp",
"nt",
"nd",
"ntʃ",
"ndʒ",
"ŋk",
"mf",
"mθ",
"nθ",
"ns",
"nz",
"ŋθ",
"ft",
"sp",
"st",
"sk",
"fθ",
"pt",
"kt",
"pθ",
"ps",
"tθ",
"ts",
"dθ",
"ks",
"lpt",
"lps",
"lfθ",
"lts",
"lst",
"lkt",
"lks",
"rmθ",
"rpt",
"rps",
"rts",
"rst",
"rkt",
"mpt",
"mps",
"ndθ",
"ŋkt",
"ŋks",
"ŋkθ",
"ksθ",
"kst",
]
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
1822,
29572,
198,
11748,
4738,
198,
198,
684,
1039,
796,
685,
198,
220,
366,
65,
1600,
198,
220,
366,
66,
1600,
198,
220,
366,
67,
1600,
198,
220,
366,
69,
1600,
19... | 1.456881 | 1,090 |
# def f():
# print (x, id(x))
# x = 99
# print (x, id(x))
# f()
# # ----------------
# def f():
# x = 100
# print (x, id(x))
# f()
# # print (x)
# # ----------------
# def f():
# x = 100
# print (x, id(x))
# x = 99
# print (x, id(x))
# f()
# print (x, id(x))
# # ----------------
# def f():
# x = 100
# print (x, id(x))
# def y():
# print (x, id(x))
# y()
# f()
# # ----------------
x = 99
f()
print (x, id(x)) | [
2,
825,
277,
33529,
198,
2,
220,
220,
220,
220,
3601,
357,
87,
11,
4686,
7,
87,
4008,
198,
198,
2,
2124,
796,
7388,
198,
2,
3601,
357,
87,
11,
4686,
7,
87,
4008,
198,
2,
277,
3419,
198,
198,
2,
1303,
34400,
198,
198,
2,
825,... | 1.898785 | 247 |
n1=int(input('digite um numero'))
n2=int(input('digite um numero'))
n3=int(input('digite um numero'))
maior = n1
if n2 > n1 and n2 > n3 :
maior = n2
if n3 > n1 and n3 >n2 :
maior = n3
menor = n1
if n2 < n1 and n2 < n3 :
menor = n2
if n3 < n1 and n3 < n2 :
menor = n3
print ('{} é o MAIOR'.format(maior))
print ('{} é o MENOR'.format(menor)) | [
77,
16,
28,
600,
7,
15414,
10786,
12894,
578,
23781,
997,
3529,
6,
4008,
198,
77,
17,
28,
600,
7,
15414,
10786,
12894,
578,
23781,
997,
3529,
6,
4008,
198,
77,
18,
28,
600,
7,
15414,
10786,
12894,
578,
23781,
997,
3529,
6,
4008,
... | 1.983425 | 181 |
# To Do: improve docstrings
from Parser import Parser
from CodeWriter import CodeWriter
import sys
import os
class VMTranslator:
"""
Main class. Handles input, reads the VM file, writes to the assembly file, and drives the VM translation process.
"""
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
if __name__ == "__main__":
if len(sys.argv) < 2:
raise Exception() # To Do - elaborate
else:
input_files = sys.argv[1]
output_file = sys.argv[2]
vmt = VMTranslator(input_files, output_file) | [
2,
1675,
2141,
25,
2987,
2205,
37336,
198,
198,
6738,
23042,
263,
1330,
23042,
263,
198,
6738,
6127,
34379,
1330,
6127,
34379,
198,
11748,
25064,
198,
11748,
28686,
628,
198,
4871,
16990,
8291,
41880,
25,
198,
220,
220,
220,
37227,
198,... | 2.539749 | 239 |
# Python example
# http://jasminsms.com
import urllib2
import urllib
baseParams = {'username':'foo', 'password':'bar', 'to':'+336222172', 'content':'Hello'}
# Sending long content (more than 160 chars):
baseParams['content'] = 'Very long message ....................................................................................................................................................................................'
urllib2.urlopen("http://127.0.0.1:1401/send?%s" % urllib.urlencode(baseParams)).read()
# Sending UCS2 (UTF-16) arabic content
baseParams['content'] = '\x06\x23\x06\x31\x06\x46\x06\x28'
baseParams['coding'] = 8
urllib2.urlopen("http://127.0.0.1:1401/send?%s" % urllib.urlencode(baseParams)).read()
| [
2,
11361,
1672,
198,
2,
2638,
1378,
73,
8597,
1040,
907,
13,
785,
198,
11748,
2956,
297,
571,
17,
198,
11748,
2956,
297,
571,
198,
198,
8692,
10044,
4105,
796,
1391,
6,
29460,
10354,
6,
21943,
3256,
705,
28712,
10354,
6,
5657,
3256,... | 3.111111 | 234 |
"""Initialize proj-template module."""
| [
37811,
24243,
1096,
386,
73,
12,
28243,
8265,
526,
15931,
198
] | 3.545455 | 11 |
from importlib import import_module as _import
from .api import keywords, scan
from .base import PkgcheckException
from .results import Result
__all__ = ('keywords', 'scan', 'PkgcheckException', 'Result')
__title__ = 'pkgcheck'
__version__ = '0.10.10'
def __getattr__(name):
"""Provide import access to keyword classes."""
if name in keywords:
return keywords[name]
try:
return _import('.' + name, __name__)
except ImportError:
raise AttributeError(f'module {__name__} has no attribute {name}')
| [
6738,
1330,
8019,
1330,
1330,
62,
21412,
355,
4808,
11748,
198,
198,
6738,
764,
15042,
1330,
26286,
11,
9367,
198,
6738,
764,
8692,
1330,
350,
10025,
9122,
16922,
198,
6738,
764,
43420,
1330,
25414,
198,
198,
834,
439,
834,
796,
19203,
... | 2.893048 | 187 |
# _*_ coding: utf-8 _*_
"""
Time: 2022/3/7 15:37
Author: ZHANG Yuwei
Version: V 0.2
File: setup.py
Describe:
"""
import setuptools
# Reads the content of your README.md into a variable to be used in the setup below
with open("./README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='supertld',
version='0.0.2',
license='MIT',
description='SuperTLD: Detecting TAD-like domains from RNA-associated interactions',
long_description=long_description, # loads your README.md
long_description_content_type="text/markdown", # README.md is of type 'markdown'
author='Yu Wei Zhang',
author_email='ywzhang224@gmail.com',
url='https://github.com/deepomicslab/SuperTLD',
packages=setuptools.find_packages(),
classifiers=[ # https://pypi.org/classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
) | [
2,
4808,
9,
62,
19617,
25,
3384,
69,
12,
23,
4808,
9,
62,
201,
198,
37811,
201,
198,
7575,
25,
220,
220,
220,
220,
33160,
14,
18,
14,
22,
1315,
25,
2718,
201,
198,
13838,
25,
220,
220,
1168,
39,
15567,
10605,
42990,
201,
198,
... | 2.473008 | 389 |
from flask import jsonify, Response, request
from model import StampModel, CouponModel
from view import BaseResource
| [
6738,
42903,
1330,
33918,
1958,
11,
18261,
11,
2581,
198,
198,
6738,
2746,
1330,
40694,
17633,
11,
43156,
261,
17633,
198,
6738,
1570,
1330,
7308,
26198,
628,
198
] | 4.285714 | 28 |
# Run with Python 3
import requests
import pandas as pd
import math
import copy
'''This example demonstrates how to get lessons data via Stepik-API and why it can be useful.'''
'''We download lessons' data one by one,
then we make plots to see how much the loss of the people depends on the lesson time '''
plots_message = '<br /><hr>Plots describe how quantity of people who viewed, ' \
'passed and left depends on lesson duration.'
enable_russian = '<head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> \n</head>'
welcome_message = 'Hi! <br><br> Click on public lessons to check them out. ' \
'<br><hr> List of existing lessons with id from {} to {}: <br> '
setting_css_style = '<style> \nli { float:left; width: 49%; } \nbr { clear: left; } \n</style>'
start_lesson_id = 1
finish_lesson_id = 100
# 1. Get your keys at https://stepik.org/oauth2/applications/ (client type = confidential,
# authorization grant type = client credentials)
client_id = "..."
client_secret = "..."
# 2. Get a token
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
resp = requests.post('https://stepik.org/oauth2/token/',
data={'grant_type': 'client_credentials'},
auth=auth
)
token = resp.json()['access_token']
# Class for drawing plots in text
def introduce_lessons_in_html(start, finish, json_of_lessons, html_file='lessons.html'):
"""
:param start: first id of lesson downloaded via API
:param finish: last id of lesson downloaded via API
:param json_of_lessons: json file we made by concatenating API answers that gave one-lesson-answer
:param html_file: file we write to
"""
with open(html_file, 'w', encoding='utf-8') as f:
# enabling russian language and setting html style for two-columns lists
f.write(enable_russian + setting_css_style)
f.write('<big>{}</big><ol>\n'.format(welcome_message.format(start, finish)))
for lesson in json_of_lessons:
if lesson['is_public']:
url = '<a href="https://stepik.org/lesson/{}">{}</a>'.format(lesson['slug'], lesson["title"])
f.write('<li>{}</li>\n'.format(url))
else:
f.write('<li>{}</li> \n'.format(lesson['title']))
f.write('</ol>\n')
f.close()
# 3. Call API (https://stepik.org/api/docs/) using this token.
# Example:
def get_lessons_from_n_to_m(from_n, to_m, current_token):
"""
:param from_n: starting lesson id
:param to_m: finish lesson id
:param current_token: token given by API
:return: json object with all existing lessons with id from from_n to to_m
"""
api_url = 'https://stepik.org/api/lessons/'
json_of_n_lessons = []
for n in range(from_n, to_m + 1):
try:
current_answer = (requests.get(api_url + str(n),
headers={'Authorization': 'Bearer ' + current_token}).json())
# check if lesson exists
if not ("detail" in current_answer):
json_of_n_lessons.append(current_answer['lessons'][0])
except:
print("Failure on id {}".format(n))
return json_of_n_lessons
def nan_to_zero(*args):
"""
:param args: lists with possible float-nan values
:return: same list with all nans replaced by 0
"""
for current_list in args:
for i in range(len(current_list)):
if not math.isnan(current_list[i]):
current_list[i] = round(current_list[i])
else:
current_list[i] = 0
if __name__ == '__main__':
# downloading lessons using API
json_of_lessons_being_analyzed = get_lessons_from_n_to_m(start_lesson_id, finish_lesson_id, token)
# storing the result in pandas DataFrame
lessons_data_frame = pd.DataFrame(json_of_lessons_being_analyzed)
# extracting the data needed
passed = lessons_data_frame['passed_by'].values
time_to_complete = lessons_data_frame['time_to_complete'].values
viewed = lessons_data_frame['viewed_by'].values
left = viewed - passed
# replacing data-slices by lists of their values
time_to_complete = time_to_complete.tolist()
viewed = viewed.tolist()
passed = passed.tolist()
left = left.tolist()
# replacing nan-values with 0 and rounding values
nan_to_zero(time_to_complete, viewed, passed, left)
# creating new Figure to make plots
figure1 = Figure(save_file='lessons.html')
# adding bar diagrams to Figure f1
figure1.add_barplot(time_to_complete, viewed, "X -- time to complete | Y - quantity of people who viewed")
figure1.add_barplot(time_to_complete, passed, "X -- time to complete | Y - quantity of people who passed")
figure1.add_barplot(time_to_complete, left, "X -- time to complete | Y - quantity of people who left")
# creating html-file describing lessons
introduce_lessons_in_html(start_lesson_id, finish_lesson_id, json_of_lessons_being_analyzed, 'lessons.html')
# saving plots (file is linked with Figure object f1)
figure1.save_plots_to_html()
| [
2,
5660,
351,
11361,
513,
198,
11748,
7007,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
10688,
198,
11748,
4866,
198,
198,
7061,
6,
1212,
1672,
15687,
703,
284,
651,
11658,
1366,
2884,
5012,
1134,
12,
17614,
290,
1521,
340,
460,... | 2.5107 | 2,056 |
from aiautomation.testcase.test_plan import TestPlanRunner, PlanInfo
plan = PlanInfo('4', '自动化测试', None
, None, '119', '1000', '0', '0')
t = TestPlanRunner(plan=plan)
t.add_case("百度搜索", "一般百度搜索")
t.start()
| [
6738,
257,
544,
315,
296,
341,
13,
9288,
7442,
13,
9288,
62,
11578,
1330,
6208,
20854,
49493,
11,
5224,
12360,
198,
198,
11578,
796,
5224,
12360,
10786,
19,
3256,
705,
164,
229,
103,
27950,
101,
44293,
244,
38184,
233,
46237,
243,
325... | 1.76378 | 127 |
from pkg_example.calculator_module import Calculator
| [
6738,
279,
10025,
62,
20688,
13,
9948,
3129,
1352,
62,
21412,
1330,
43597,
198
] | 3.785714 | 14 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Flask, session, redirect, url_for, escape, request
import os
os.putenv('LANG', 'en_US.UTF-8')
os.putenv('LC_ALL', 'en_US.UTF-8')
app = Flask(__name__)
@app.route('/t')
# @app.route('/')
# def index():
# if 'username' in session:
# return 'Logged in as %s' % escape(session['username'])
# return 'You are not logged in'
@app.route('/plugin', methods=['GET', 'POST'])
# @app.route('/login', methods=['GET', 'POST'])
# def login():
# session['username'] = request.form['username']
# return redirect(url_for('index'))
# return '''
# <form method="post">
# <p><input type=text name=username>
# <p><input type=submit value=Login>
# </form>
# '''
# @app.route('/logout')
# def logout():
# # remove the username from the session if it's there
# session.pop('username', None)
# return redirect(url_for('index'))
if __name__ == "__main__":
app.run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
42903,
1330,
46947,
11,
6246,
11,
18941,
11,
19016,
62,
1640,
11,
6654,
11,
2581,
198,
11748,
28686,
1... | 2.310502 | 438 |
#! encoding=utf8
# To decide the storage of RDD, there are different storage levels, which are given below -
# DISK_ONLY = StorageLevel(True, False, False, False, 1)
# DISK_ONLY_2 = StorageLevel(True, False, False, False, 2)
# MEMORY_AND_DISK = StorageLevel(True, True, False, False, 1)
# MEMORY_AND_DISK_2 = StorageLevel(True, True, False, False, 2)
# MEMORY_AND_DISK_SER = StorageLevel(True, True, False, False, 1)
# MEMORY_AND_DISK_SER_2 = StorageLevel(True, True, False, False, 2)
# MEMORY_ONLY = StorageLevel(False, True, False, False, 1)
# MEMORY_ONLY_2 = StorageLevel(False, True, False, False, 2)
# MEMORY_ONLY_SER = StorageLevel(False, True, False, False, 1)
# MEMORY_ONLY_SER_2 = StorageLevel(False, True, False, False, 2)
# OFF_HEAP = StorageLevel(True, True, True, False, 1)
from pyspark import SparkContext
import pyspark
sc = SparkContext (
"local",
"storagelevel app"
)
rdd1 = sc.parallelize([1,2])
rdd1.persist( pyspark.StorageLevel.MEMORY_AND_DISK_2 )
rdd1.getStorageLevel()
print(rdd1.getStorageLevel()) | [
2,
0,
21004,
28,
40477,
23,
198,
2,
1675,
5409,
262,
6143,
286,
371,
16458,
11,
612,
389,
1180,
6143,
2974,
11,
543,
389,
1813,
2174,
532,
198,
198,
2,
13954,
42,
62,
1340,
11319,
796,
20514,
4971,
7,
17821,
11,
10352,
11,
10352,
... | 2.657289 | 391 |
# Soultion for Project Euler Problem #8 - https://projecteuler.net/problem=8
# (c) 2017 dpetker
TEST_VAL = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'
curr_max = 0
for ctr in range(0, len(TEST_VAL) - 13):
temp_prod = multiply_range(TEST_VAL[ctr : ctr + 13])
if temp_prod > curr_max:
curr_max = temp_prod
print('The thirteen adjacent digits in the 1000-digit number that have the greatest product is {}'.format(curr_max))
| [
2,
22862,
2528,
295,
329,
4935,
412,
18173,
20647,
1303,
23,
532,
3740,
1378,
16302,
68,
18173,
13,
3262,
14,
45573,
28,
23,
198,
2,
357,
66,
8,
2177,
288,
6449,
6122,
198,
198,
51,
6465,
62,
23428,
796,
705,
4790,
21940,
1558,
29... | 2.404145 | 579 |
""" utils.py
Nicholas Boucher 2020
Utility functions for assisting in election verification
calculations.
"""
from typing import TypeVar, Iterable
from logging import info, warning
from electionguard.group import ElementModP, int_to_p
from electionguard.election import ElectionDescription, ContestDescription
from electionguard.ballot import CiphertextAcceptedBallot, CiphertextBallotContest, CiphertextBallotSelection
from electionguard.key_ceremony import CoefficientValidationSet
T: TypeVar = TypeVar('T')
class Invariants():
"""Represents a series of conditions that must all hold for the
collection of invariants to remain valid."""
title: str
conditions: dict[str, bool]
def __init__(self, title: str):
"""Instantiate a new set of invariants collectively labelled `title`."""
self.title = title
self.conditions = {}
def ensure(self, invariant: str, condition: bool) -> bool:
"""Track the truthiness of `condition` for the invariant labelled `invariant`."""
if invariant in self.conditions:
self.conditions[invariant] = self.conditions[invariant] and condition
else:
self.conditions[invariant] = condition
return condition
def validate(self) -> bool:
"""Return whether all conditions are valid, logging the results."""
validity: bool = True
error_msg: str = ''
for invariant, state in self.conditions.items():
validity = validity and state
if not state:
error_msg += f'\t\tFailed to validate invariant {invariant}.\n'
if validity:
info(f'[VALID]: {self.title}')
else:
info(f'[INVALID]: {self.title}')
info(error_msg)
return validity
class Contests():
"""Speeds up access to contest descriptions through object_id indexing."""
contests: dict[str,ContestDescription]
def __init__(self, description: ElectionDescription):
"""Indexes contest descriptions by object_id for quick lookups."""
self.contests = {}
for contest in description.contests:
self.contests[contest.object_id] = contest
def __getitem__(self, contest: str) -> ContestDescription:
"""Returns the requested contest, or None if no such contest exists."""
if contest in self.contests:
return self.contests[contest]
else:
return None
class Guardians():
"""Speeds up access to guardians through owner_id indexing."""
guardians: dict[str,CoefficientValidationSet]
def __init__(self, guardians: Iterable[CoefficientValidationSet]):
"""Indexes guardians by owner_id for quick lookups."""
self.guardians = {}
for guardian in guardians:
self.guardians[guardian.owner_id] = guardian
def __getitem__(self, guardian: str) -> ContestDescription:
"""Returns the requested guardian, or None if no such guardian exists."""
if guardian in self.guardians:
return self.guardians[guardian]
else:
return None
def get_first_el(els: list[T]) -> T:
"""Returns the first element of `els`, or None if it is empty."""
if len(els) > 0:
return els[0]
else:
return None
def get_contest(ballot: CiphertextAcceptedBallot, contest_id: str) -> CiphertextBallotContest:
"""Given a ballot, gets the supplied contest. If the contest appears more than once,
None is returned."""
result: CiphertextBallotContest = None
for contest in ballot.contests:
if contest.object_id == contest_id:
if result != None:
warn('Ballot contains multiple entries for the same contest.')
return None
else:
result = contest
return result
def get_selection(ballot: CiphertextAcceptedBallot, contest_id: str, selection_id: str) -> CiphertextBallotSelection:
"""Given a ballot, gets the supplied selection from within the supplied contest.
If the contest or selection appear more than once, None is returned."""
result: CiphertextBallotSelection = None
contest: CiphertextBallotContest = get_contest(ballot, contest_id)
if contest:
for selection in contest.ballot_selections:
if selection.object_id == selection_id:
if result != None:
warn('Ballot contains multiple entries for the same selection.')
return None
else:
result = selection
return result
def warn(msg: str) -> None:
"""Emits a warning message `msg` to the logs."""
warning(f'[WARNING]: {msg}') | [
37811,
3384,
4487,
13,
9078,
198,
220,
220,
220,
20320,
14551,
2044,
12131,
628,
220,
220,
220,
34030,
5499,
329,
26508,
287,
3071,
19637,
198,
220,
220,
220,
16765,
13,
198,
37811,
198,
198,
6738,
19720,
1330,
5994,
19852,
11,
40806,
... | 2.644134 | 1,790 |
import os
from krcg import deck
from krcg import twda
| [
11748,
28686,
198,
198,
6738,
479,
6015,
70,
1330,
6203,
198,
6738,
479,
6015,
70,
1330,
665,
6814,
628,
628,
628
] | 2.857143 | 21 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import re
import time
import datetime
import decimal
from ..thirdparty import six
from .. import utils
class DataType(object):
"""
Abstract data type
"""
_singleton = True
__slots__ = 'nullable',
@property
# Bigint
# Double
# String
#Timestamp
# Boolean
bigint = Bigint()
double = Double()
string = String()
timestamp = Timestamp()
boolean = Boolean()
_datahub_primitive_data_types = dict(
[(t.name, t) for t in (
bigint, double, string, timestamp, boolean
)]
)
integer_builtins = six.integer_types
float_builtins = (float,)
try:
import numpy as np
integer_builtins += (np.integer,)
float_builtins += (np.float,)
except ImportError:
pass
_datahub_primitive_to_builtin_types = {
bigint: integer_builtins,
double: float_builtins,
string: six.string_types,
timestamp: integer_builtins,
boolean: bool
}
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
... | 3.13879 | 562 |
#purpose: to take output from a MW@h .out file and produce workable data/plots to look at the resulting output in meaningful ways
#this is still too hard-coded for my liking, but it'll have to do for now
#i.e. if you want to add new attributes to the data class then you manually have to go through and fix the appending functions
import matplotlib.pyplot as plt
import numpy as np
import coord_trans as ct
import astropy
from astropy.coordinates import SkyCoord
import astropy.units as u
import random
import galpy
from galpy.orbit import Orbit
from galpy.potential import HernquistPotential
from galpy.potential import LogarithmicHaloPotential
from galpy.potential import MiyamotoNagaiPotential
from galpy.potential import PlummerPotential
m_bulge = 3.4e10*u.solMass #solar masses
m_disk = 1.0e11*u.solMass
v_halo = 74.61*u.km/u.s #km/s
G = 6.67e-11*u.m**3/(u.kg*u.s**2)
pot_bulge = HernquistPotential(amp=2*m_bulge, a=0.7*u.kpc, ro=8., vo=220.)
pot_disk = MiyamotoNagaiPotential(amp=G*m_disk, a=6.5*u.kpc, b=0.26*u.kpc, ro=8., vo=220.)
pot_halo = LogarithmicHaloPotential(amp=2*v_halo**2, q=1., core=12.0*u.kpc, ro=8., vo=220.)
pot = [pot_bulge, pot_disk, pot_halo]
m_plummer = 1e9*u.solMass
r_scale_plummer = 3*u.kpc
plummer_pot = PlummerPotential(amp=G*m_plummer, b=r_scale_plummer, ro=10*u.kpc, vo=20*u.km/u.s)
struct_to_sol = 222288.47 #this many solar masses make up one structural nass unit (the output of mwah)
#data.plot(d1='var1', d2='var2'): data, str, str -> plot
#takes in the 2 coordinates of the data you want to plot and plots them in a 2d scatter plot
#sticks a big fat red dot wherever the specific star is, given an id
#data.hist(d='r'): data, str -> histogram plot
#takes in the coordinate of the data you want in your histogram and then produces the relevant plot
#read_output(f): filename -> data class
#reads a milky way at home output file and turns it into a data class
#subset(data): data_object -> data_object
#takes in a data object and outputs a cut data object. Can cut within some radius or a rectangle cut. Can specify the axes, or if there is only 1 axis.
| [
2,
29983,
25,
284,
1011,
5072,
422,
257,
29961,
31,
71,
764,
448,
2393,
290,
4439,
670,
540,
1366,
14,
489,
1747,
284,
804,
379,
262,
7186,
5072,
287,
11570,
2842,
198,
2,
5661,
318,
991,
1165,
1327,
12,
40976,
329,
616,
24976,
11... | 2.778796 | 764 |
from datetime import datetime
from typing import cast, List, Optional
from asyncpg import Record
from attr import dataclass
from linkedin_messaging import URN
from mautrix.types import EventID, RoomID
from .model_base import Model
@dataclass
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
19720,
1330,
3350,
11,
7343,
11,
32233,
198,
198,
6738,
30351,
6024,
1330,
13266,
198,
6738,
708,
81,
1330,
4818,
330,
31172,
198,
6738,
6692,
259,
62,
37348,
3039,
1330,
37902,
45,
198,
... | 3.565217 | 69 |
# -*- coding: utf-8 -*-
import logging
logging_level = logging.DEBUG
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
18931,
198,
198,
6404,
2667,
62,
5715,
796,
18931,
13,
30531,
628,
198
] | 2.517241 | 29 |
import math
import time
import numpy as np
from tqdm import tqdm
from multiprocessing import Pool, cpu_count
| [
11748,
10688,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
18540,
305,
919,
278,
1330,
19850,
11,
42804,
62,
9127,
628,
628,
628,
628,
198
] | 3.105263 | 38 |
# Copyright (c) 2019 Mycroft AI, Inc. and Matthew Scholefield
#
# This file is part of Mycroft Light
# (see https://github.com/MatthewScholefield/mycroft-light).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tempfile
from os.path import join
from pocketsphinx import Decoder
from typing import Callable
from mycroft.interfaces.speech.wake_word_engines.wake_word_engine_plugin import WakeWordEnginePlugin
from mycroft.util.misc import download_extract_tar
| [
2,
15069,
357,
66,
8,
13130,
2011,
36714,
9552,
11,
3457,
13,
290,
9308,
3059,
2305,
3245,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
2011,
36714,
4401,
198,
2,
357,
3826,
3740,
1378,
12567,
13,
785,
14,
25372,
14874,
2305,
3245,
1... | 3.835962 | 317 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bottle import template, redirect
import utils
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
9294,
1330,
11055,
11,
18941,
198,
11748,
3384,
4487,
198
] | 2.8 | 35 |
"""
191. Number of 1 Bits (Hamming weight)
Easy
Share
Write a function that takes an unsigned integer and returns the number of '1' bits it has (also known as the Hamming weight).
Note:
Note that in some languages, such as Java, there is no unsigned integer type. In this case, the input will be given as a signed integer type. It should not affect your implementation, as the integer's internal binary representation is the same, whether it is signed or unsigned.
In Java, the compiler represents the signed integers using 2's complement notation. Therefore, in Example 3, the input represents the signed integer. -3.
Example 1:
Input: n = 00000000000000000000000000001011
Output: 3
Explanation: The input binary string 00000000000000000000000000001011 has a total of three '1' bits.
Example 2:
Input: n = 00000000000000000000000010000000
Output: 1
Explanation: The input binary string 00000000000000000000000010000000 has a total of one '1' bit.
Example 3:
Input: n = 11111111111111111111111111111101
Output: 31
Explanation: The input binary string 11111111111111111111111111111101 has a total of thirty one '1' bits.
Constraints:
The input must be a binary string of length 32.
Follow up: If this function is called many times, how would you optimize it?
"""
# V0
# The bin() method returns the binary string equivalent to the given integer.
# V0'
# IDEA : bit manipulation : n&(n-1) CAN REMOVE LAST 1 PER LOOP
# https://github.com/labuladong/fucking-algorithm/blob/master/%E7%AE%97%E6%B3%95%E6%80%9D%E7%BB%B4%E7%B3%BB%E5%88%97/%E5%B8%B8%E7%94%A8%E7%9A%84%E4%BD%8D%E6%93%8D%E4%BD%9C.md
# V1
# http://bookshadow.com/weblog/2015/03/10/leetcode-number-1-bits/
# IDEA : BITWISE OPERATOR
# https://wiki.python.org/moin/BitwiseOperators
# x & y
# Does a "bitwise and". Each bit of the output is 1 if the corresponding bit of x AND of y is 1, otherwise it's 0.
# e.g. :
# 111 & 111 = 111
# 111 & 100 = 100
# 1 & 0 = 0
# 1 & 1 = 1
# 0 & 0 = 0
# @param n, an integer
# @return an integer
# V1'
# http://bookshadow.com/weblog/2015/03/10/leetcode-number-1-bits/
# @param n, an integer
# @return an integer
# V1''
# https://blog.csdn.net/coder_orz/article/details/51323188
# IDEA
# The bin() method returns the binary string equivalent to the given integer.
# V2
# Time: O(logn) = O(32)
# Space: O(1)
# @param n, an integer
# @return an integer
| [
37811,
198,
26492,
13,
7913,
286,
352,
44733,
357,
21281,
2229,
3463,
8,
198,
28406,
198,
198,
11649,
198,
16594,
257,
2163,
326,
2753,
281,
22165,
18253,
290,
5860,
262,
1271,
286,
705,
16,
6,
10340,
340,
468,
357,
14508,
1900,
355,
... | 2.898305 | 826 |
# LSTM to count the number of '1's in a binary string
# Reference: https://becominghuman.ai/a-noobs-guide-to-implementing-rnn-lstm-using-tensorflow-1907a5bbb1fa
import numpy as np
from random import shuffle
import tensorflow as tf
"""
Parameters
"""
# Seed for all RNGs
rng_seed = 12345
np.random.seed(rng_seed)
tf.set_random_seed(rng_seed)
# Length of each binary string (i.e., length of each input sequence)
seq_len = 15
# Maximum range (i.e., max val of the integer reprsented by the bit string)
# Note that, max val is 2**num_range
num_range = 15
# Train split (fraction of data to be used for training)
train_split = 0.8
# Number of train samples
num_samples = 2 ** num_range
num_train = int(np.floor(train_split * num_samples))
num_test = num_samples - num_train
# Dimensions
dim_input = 1
dim_output = num_range + 1 # Since num of bits can only be in the range [0, num_range]
# Model parameters
num_hidden = 10
# Other hyperparameters
batch_size = 50
learning_rate = 0.01
momentum = 0.09
beta1 = 0.7
num_epochs = 10
num_train_batches = int(np.floor(float(num_train) / float(batch_size)))
num_test_batches = int(np.floor(float(num_test) / float(batch_size)))
# Verbosity controls
print_experiment_summary = True
if print_experiment_summary:
print('Total number of samples:', num_samples)
print('Train samples:', num_train)
print('Test samples:', num_test)
print('Batch size:', batch_size)
print('Train batches:', num_train_batches)
print('Test batches:', num_test_batches)
print('Max epochs:', num_epochs)
print_train_every = 100
print_test_every = 10
"""
Generate training data
"""
# Generate all strings of numbers in the interval [0, 2**num_range]
dataset = ['{0:^0{str_len}b}'.format(i, str_len = seq_len) for i in range(2**num_range)]
# Convert the string to a set of integers
dataset = np.array([[[int(j)] for j in list(dataset[i])] for i in range(len(dataset))])
# print(dataset)
labels_helper = np.array([[np.sum(num)] for num in dataset])
labels = np.zeros((num_samples, dim_output))
cur = 0
for ind in labels_helper:
labels[cur][ind] = 1.0
cur += 1
# print(labels)
"""
Build the computation graph
"""
data = tf.placeholder(tf.float32, [None, seq_len, dim_input])
target = tf.placeholder(tf.float32, [None, dim_output])
recurrent_unit = tf.contrib.rnn.LSTMCell(num_hidden)
val, _ = tf.nn.dynamic_rnn(recurrent_unit, data, dtype = tf.float32)
val = tf.transpose(val, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1)
weight_fc = tf.Variable(tf.truncated_normal([num_hidden, int(target.get_shape()[1])]))
bias_fc = tf.Variable(tf.constant(0.1, shape = [target.get_shape()[1]]))
prediction = tf.nn.softmax(tf.matmul(last, weight_fc) + bias_fc)
cross_entropy = - tf.reduce_sum(target * tf.log(tf.clip_by_value(prediction, 1e-10, 1.0)))
loss = tf.train.AdamOptimizer(learning_rate = learning_rate, beta1 = beta1).minimize(cross_entropy)
# Accuracy computation
mistakes = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1))
error = tf.reduce_mean(tf.cast(mistakes, tf.float32))
"""
Execute graph
"""
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
epoch = 0
# 'Epoch' loop
while epoch < num_epochs:
batch = 0
# Shuffle train data
train_order = np.random.permutation(num_train)
# 'Iteration' loop
train_error_this_epoch = 0.0
train_error_temp = 0.0
while batch < num_train_batches:
startIdx = batch*batch_size
endIdx = (batch+1)*batch_size
inds = train_order[startIdx:endIdx]
# input_batch, label_batch = dataset[startIdx:endIdx], labels[startIdx:endIdx] # no shuffle
input_batch, label_batch = dataset[inds], labels[inds]
net_out = sess.run([loss, error], feed_dict = {data: input_batch, target: label_batch})
train_error_temp += net_out[1]
train_error_this_epoch += net_out[1]
if batch % print_train_every == 0:
print('Epoch: ', epoch, 'Error: ', train_error_temp/float(print_train_every))
train_error_temp = 0.0
batch += 1
# print('Epoch:', epoch, 'Full train set:', train_error_this_epoch/float(num_train))
# Test
if epoch % 2 == 0:
test_error_this_epoch = 0.0
test_error_temp = 0.0
while batch < num_train_batches + num_test_batches:
startIdx = batch*batch_size
endIdx = (batch+1)*batch_size
input_batch, label_batch = dataset[startIdx:endIdx], labels[startIdx:endIdx]
net_out = sess.run([error, prediction], feed_dict = {data: input_batch, target: label_batch})
test_error_temp += net_out[0]
test_error_this_epoch += net_out[0]
if batch % print_test_every == 0:
print('Epoch: ', epoch, 'Error: ', test_error_temp/float(print_test_every))
test_error_temp = 0.0
random_disp = np.random.randint(batch_size)
print(np.squeeze(input_batch[random_disp]))
print('Pred:', np.argmax(net_out[1][random_disp]), 'GT:', \
np.argmax(label_batch[random_disp]))
batch += 1
print('Epoch: ', epoch, 'Full test set:', test_error_this_epoch/float(num_test))
epoch += 1
| [
2,
406,
2257,
44,
284,
954,
262,
1271,
286,
705,
16,
338,
287,
257,
13934,
4731,
198,
2,
20984,
25,
3740,
1378,
9423,
3383,
10734,
13,
1872,
14,
64,
12,
3919,
8158,
12,
41311,
12,
1462,
12,
320,
26908,
278,
12,
81,
20471,
12,
75... | 2.525945 | 1,985 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Given an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.
The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.
Please note that your returned answers (both index1 and index2) are not zero-based.
You may assume that each input would have exactly one solution and you may not use the same element twice.
Input: numbers={2, 7, 11, 15}, target=9
Output: index1=1, index2=2
"""
"""
使用2个指针,一开始分别指向第一个数和最后一个数,当两者之和小于target时,左指针右移,当两者之和大于target时,右指针左移
时间:O(n),空间:O(1), 可能有O(log n)的解法吗???
类似问题:653. Two Sum IV - Input is a BST
"""
if __name__ == '__main__':
# for sanity check
nums = [2, 7, 11, 15]
assert(Solution().twoSum(nums, 9) == [1, 2]) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
15056,
281,
7177,
286,
37014,
326,
318,
1541,
23243,
287,
41988,
1502,
11,
1064,
734,
3146,
884,
326,
... | 2.052009 | 423 |
#!/usr/bin/env python
# encoding: utf-8
import os
import six
import struct
import sys
import unittest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import pfp
import pfp.errors
from pfp.fields import *
import pfp.utils
from pfp.bitwrap import BitwrappedStream
import utils
if __name__ == "__main__":
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
11748,
28686,
198,
11748,
2237,
198,
11748,
2878,
198,
11748,
25064,
198,
11748,
555,
715,
395,
198,
198,
17597,
13,
6978,
13,
28463,
7,
15,... | 2.674419 | 129 |
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Circle
import numpy as np
from typing import List, Tuple
from loguru import logger
from kino.geometry.point import Point
from kino.geometry import Vector
from myterial import blue_dark, pink
from slam.environment import Environment
from slam.map import Map
from slam.ray import Ray
from slam.behavior import (
BehavioralRoutine,
Explore,
Backtrack,
SpinScan,
NavigateToNode,
)
from slam.planner import Planner
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
13,
8071,
2052,
1330,
48599,
9248,
11,
16291,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
19720,
1330,
7343,
11,
309,
29291,
198,
6738,
2604,
1471... | 3.313725 | 153 |
import logging as _logging
import arcgis
_log = _logging.getLogger(__name__)
_use_async = False
def _get_list_value(index, array):
"""
helper operation to loop a list of values regardless of the index value
Example:
>>> a = [111,222,333]
>>> list_loop(15, a)
111
"""
if len(array) == 0:
return None
elif index >= 0 and index < len(array):
return array[index]
return array[index % len(array)]
def export_map(web_map_as_json = None,
format = """PDF""",
layout_template = """MAP_ONLY""",
gis=None):
"""
This function takes the state of the web map(for example, included services, layer visibility
settings, client-side graphics, and so forth) and returns either (a) a page layout or
(b) a map without page surrounds of the specified area of interest in raster or vector format.
The input for this function is a piece of text in JavaScript object notation (JSON) format describing the layers,
graphics, and other settings in the web map. The JSON must be structured according to the WebMap specification
in the ArcGIS HelpThis tool is shipped with ArcGIS Server to support web services for printing, including the
preconfigured service named PrintingTools.
Parameters:
web_map_as_json: Web Map as JSON (str). Required parameter. A JSON representation of the state of the map to be exported as it appears in the web application. See the WebMap specification in the ArcGIS Help to understand how this text should be formatted. The ArcGIS web APIs (for JavaScript, Flex, Silverlight, etc.) allow developers to easily get this JSON string from the map.
format: Format (str). Optional parameter. The format in which the map image for printing will be delivered. The following strings are accepted.For example:PNG8 (default if the parameter is left blank)PDFPNG32JPGGIFEPSSVGSVGZ
Choice list:['PDF', 'PNG32', 'PNG8', 'JPG', 'GIF', 'EPS', 'SVG', 'SVGZ']
layout_template: Layout Template (str). Optional parameter. Either a name of a template from the list or the keyword MAP_ONLY. When MAP_ONLY is chosen or an empty string is passed in, the output map does not contain any page layout surroundings (for example title, legends, scale bar, and so forth)
Choice list:['A3 Landscape', 'A3 Portrait', 'A4 Landscape', 'A4 Portrait', 'Letter ANSI A Landscape', 'Letter ANSI A Portrait', 'Tabloid ANSI B Landscape', 'Tabloid ANSI B Portrait', 'MAP_ONLY']
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
output_file - Output File as a DataFile
See https://utility.arcgisonline.com/arcgis/rest/directories/arcgisoutput/Utilities/PrintingTools_GPServer/Utilities_PrintingTools/ExportWebMapTask.htm for additional help.
"""
from arcgis.geoprocessing import DataFile
from arcgis.geoprocessing._support import _execute_gp_tool
kwargs = locals()
param_db = {
"web_map_as_json": (str, "Web_Map_as_JSON"),
"format": (str, "Format"),
"layout_template": (str, "Layout_Template"),
"output_file": (DataFile, "Output File"),
}
return_values = [
{"name": "output_file", "display_name": "Output File", "type": DataFile},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.printTask.url[:-len('/Export%20Web%20Map%20Task')]
return _execute_gp_tool(gis, "Export Web Map Task", kwargs, param_db, return_values, _use_async, url)
export_map.__annotations__ = {
'web_map_as_json': str,
'format': str,
'layout_template': str
}
def get_layout_templates(gis=None):
"""
This function returns the content of the GIS's layout templates formatted as dict.
Parameters:
gis: Optional, the GIS on which this tool runs. If not specified, the active GIS is used.
Returns:
output_json - layout templates as Python dict
See https://utility.arcgisonline.com/arcgis/rest/directories/arcgisoutput/Utilities/PrintingTools_GPServer/Utilities_PrintingTools/GetLayoutTemplatesInfo.htm for additional help.
"""
from arcgis.geoprocessing import DataFile
from arcgis.geoprocessing._support import _execute_gp_tool
kwargs = locals()
param_db = {
"output_json": (str, "Output JSON"),
}
return_values = [
{"name": "output_json", "display_name": "Output JSON", "type": str},
]
if gis is None:
gis = arcgis.env.active_gis
url = gis.properties.helperServices.printTask.url[:-len('/Export%20Web%20Map%20Task')]
return _execute_gp_tool(gis, "Get Layout Templates Info Task", kwargs, param_db, return_values, _use_async, url)
get_layout_templates.__annotations__ = {'return': str} | [
11748,
18931,
355,
4808,
6404,
2667,
198,
11748,
10389,
70,
271,
628,
198,
62,
6404,
796,
4808,
6404,
2667,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
62,
1904,
62,
292,
13361,
796,
10352,
628,
198,
4299,
4808,
1136,
62,... | 2.867066 | 1,670 |
"""
Author: Daniel Fink
Email: daniel-fink@outlook.com
"""
import os
import aiohttp
class FileService:
"""
A service class for all kind of file access like downloads,
file deletion, folder deletion, ...
"""
@classmethod
@classmethod
@classmethod
@classmethod | [
37811,
198,
13838,
25,
7806,
376,
676,
198,
15333,
25,
288,
6321,
12,
69,
676,
31,
448,
5460,
13,
785,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
257,
952,
4023,
628,
198,
4871,
9220,
16177,
25,
198,
220,
220,
220,
37227,
198,
... | 2.911765 | 102 |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RExactextractr(RPackage):
"""Fast Extraction from Raster Datasets using Polygons
Provides a replacement for the 'extract' function from the 'raster' package
that is suitable for extracting raster values using 'sf' polygons."""
homepage = "https://cloud.r-project.org/package=exactextractr"
url = "https://cloud.r-project.org/src/contrib/exactextractr_0.3.0.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/exactextractr"
version('0.5.1', sha256='47ddfb4b9e42e86957e03b1c745d657978d7c4bed12ed3aa053e1bc89f20616d')
version('0.3.0', sha256='c7fb38b38b9dc8b3ca5b8f1f84f4ba3256efd331f2b4636b496d42689ffc3fb0')
version('0.2.1', sha256='d0b998c77c3fd9265a600a0e08e9bf32a2490a06c19df0d0c0dea4b5c9ab5773')
depends_on('r@3.4.0:', type=('build', 'run'))
depends_on('r-rcpp@0.12.12:', type=('build', 'run'))
depends_on('r-raster', type=('build', 'run'))
depends_on('r-sf', type=('build', 'run'))
depends_on('geos@3.5.0:', type=('build', 'run', 'link'))
| [
2,
15069,
2211,
12,
1238,
1828,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
... | 2.299632 | 544 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_snmp_traps
version_added: "2.4"
short_description: Manages SNMP traps configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP traps configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
feature_name:
description:
- Alarm feature name.
required: false
default: null
choices: ['aaa', 'arp', 'bfd', 'bgp', 'cfg', 'configuration', 'dad', 'devm',
'dhcpsnp', 'dldp', 'driver', 'efm', 'erps', 'error-down', 'fcoe',
'fei', 'fei_comm', 'fm', 'ifnet', 'info', 'ipsg', 'ipv6', 'isis',
'l3vpn', 'lacp', 'lcs', 'ldm', 'ldp', 'ldt', 'lldp', 'mpls_lspm',
'msdp', 'mstp', 'nd', 'netconf', 'nqa', 'nvo3', 'openflow', 'ospf',
'ospfv3', 'pim', 'pim-std', 'qos', 'radius', 'rm', 'rmon', 'securitytrap',
'smlktrap', 'snmp', 'ssh', 'stackmng', 'sysclock', 'sysom', 'system',
'tcp', 'telnet', 'trill', 'trunk', 'tty', 'vbst', 'vfs', 'virtual-perception',
'vrrp', 'vstm', 'all']
trap_name:
description:
- Alarm trap name.
required: false
default: null
interface_type:
description:
- Interface type.
required: false
default: null
choices: ['Ethernet', 'Eth-Trunk', 'Tunnel', 'NULL', 'LoopBack', 'Vlanif', '100GE',
'40GE', 'MTunnel', '10GE', 'GE', 'MEth', 'Vbdif', 'Nve']
interface_number:
description:
- Interface number.
required: false
default: null
port_number:
description:
- Source port number.
required: false
default: null
'''
EXAMPLES = '''
- name: CloudEngine snmp traps test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP trap all enable"
ce_snmp_traps:
state: present
feature_name: all
provider: "{{ cli }}"
- name: "Config SNMP trap interface"
ce_snmp_traps:
state: present
interface_type: 40GE
interface_number: 2/0/1
provider: "{{ cli }}"
- name: "Config SNMP trap port"
ce_snmp_traps:
state: present
port_number: 2222
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"feature_name": "all",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"snmp-agent trap": [],
"undo snmp-agent trap": []}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"snmp-agent trap": ["enable"],
"undo snmp-agent trap": []}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent trap enable"]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config, ce_argument_spec, run_commands
class SnmpTraps(object):
""" Manages SNMP trap configuration """
def __init__(self, **kwargs):
""" Class init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(
argument_spec=self.spec,
required_together=[("interface_type", "interface_number")],
supports_check_mode=True
)
# config
self.cur_cfg = dict()
self.cur_cfg["snmp-agent trap"] = []
self.cur_cfg["undo snmp-agent trap"] = []
# module args
self.state = self.module.params['state']
self.feature_name = self.module.params['feature_name']
self.trap_name = self.module.params['trap_name']
self.interface_type = self.module.params['interface_type']
self.interface_number = self.module.params['interface_number']
self.port_number = self.module.params['port_number']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.existing["snmp-agent trap"] = []
self.existing["undo snmp-agent trap"] = []
self.end_state = dict()
self.end_state["snmp-agent trap"] = []
self.end_state["undo snmp-agent trap"] = []
commands = list()
cmd1 = 'display interface brief'
commands.append(cmd1)
self.interface = run_commands(self.module, commands)
def check_args(self):
""" Check invalid args """
if self.port_number:
if self.port_number.isdigit():
if int(self.port_number) < 1025 or int(self.port_number) > 65535:
self.module.fail_json(
msg='Error: The value of port_number is out of [1025 - 65535].')
else:
self.module.fail_json(
msg='Error: The port_number is not digit.')
if self.interface_type and self.interface_number:
tmp_interface = self.interface_type + self.interface_number
if tmp_interface not in self.interface[0]:
self.module.fail_json(
msg='Error: The interface %s is not in the device.' % tmp_interface)
def get_proposed(self):
""" Get proposed state """
self.proposed["state"] = self.state
if self.feature_name:
self.proposed["feature_name"] = self.feature_name
if self.trap_name:
self.proposed["trap_name"] = self.trap_name
if self.interface_type:
self.proposed["interface_type"] = self.interface_type
if self.interface_number:
self.proposed["interface_number"] = self.interface_number
if self.port_number:
self.proposed["port_number"] = self.port_number
def get_existing(self):
""" Get existing state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_cfg_lower = tmp_cfg.lower()
temp_data = tmp_cfg.split("\n")
temp_data_lower = temp_cfg_lower.split("\n")
for item in temp_data:
if "snmp-agent trap source-port " in item:
if self.port_number:
item_tmp = item.split("snmp-agent trap source-port ")
self.cur_cfg["trap source-port"] = item_tmp[1]
self.existing["trap source-port"] = item_tmp[1]
elif "snmp-agent trap source " in item:
if self.interface_type:
item_tmp = item.split("snmp-agent trap source ")
self.cur_cfg["trap source interface"] = item_tmp[1]
self.existing["trap source interface"] = item_tmp[1]
if self.feature_name:
for item in temp_data_lower:
if item == "snmp-agent trap enable":
self.cur_cfg["snmp-agent trap"].append("enable")
self.existing["snmp-agent trap"].append("enable")
elif item == "snmp-agent trap disable":
self.cur_cfg["snmp-agent trap"].append("disable")
self.existing["snmp-agent trap"].append("disable")
elif "undo snmp-agent trap enable " in item:
item_tmp = item.split("undo snmp-agent trap enable ")
self.cur_cfg[
"undo snmp-agent trap"].append(item_tmp[1])
self.existing[
"undo snmp-agent trap"].append(item_tmp[1])
elif "snmp-agent trap enable " in item:
item_tmp = item.split("snmp-agent trap enable ")
self.cur_cfg["snmp-agent trap"].append(item_tmp[1])
self.existing["snmp-agent trap"].append(item_tmp[1])
else:
del self.existing["snmp-agent trap"]
del self.existing["undo snmp-agent trap"]
def get_end_state(self):
""" Get end_state state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_cfg_lower = tmp_cfg.lower()
temp_data = tmp_cfg.split("\n")
temp_data_lower = temp_cfg_lower.split("\n")
for item in temp_data:
if "snmp-agent trap source-port " in item:
if self.port_number:
item_tmp = item.split("snmp-agent trap source-port ")
self.end_state["trap source-port"] = item_tmp[1]
elif "snmp-agent trap source " in item:
if self.interface_type:
item_tmp = item.split("snmp-agent trap source ")
self.end_state["trap source interface"] = item_tmp[1]
if self.feature_name:
for item in temp_data_lower:
if item == "snmp-agent trap enable":
self.end_state["snmp-agent trap"].append("enable")
elif item == "snmp-agent trap disable":
self.end_state["snmp-agent trap"].append("disable")
elif "undo snmp-agent trap enable " in item:
item_tmp = item.split("undo snmp-agent trap enable ")
self.end_state[
"undo snmp-agent trap"].append(item_tmp[1])
elif "snmp-agent trap enable " in item:
item_tmp = item.split("snmp-agent trap enable ")
self.end_state["snmp-agent trap"].append(item_tmp[1])
else:
del self.end_state["snmp-agent trap"]
del self.end_state["undo snmp-agent trap"]
def cli_load_config(self, commands):
""" Load configure through cli """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_config(self):
""" Get configure through cli """
regular = "| include snmp | include trap"
flags = list()
flags.append(regular)
tmp_cfg = get_config(self.module, flags)
return tmp_cfg
def set_trap_feature_name(self):
""" Set feature name for trap """
if self.feature_name == "all":
cmd = "snmp-agent trap enable"
else:
cmd = "snmp-agent trap enable feature-name %s" % self.feature_name
if self.trap_name:
cmd += " trap-name %s" % self.trap_name
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_trap_feature_name(self):
""" Undo feature name for trap """
if self.feature_name == "all":
cmd = "undo snmp-agent trap enable"
else:
cmd = "undo snmp-agent trap enable feature-name %s" % self.feature_name
if self.trap_name:
cmd += " trap-name %s" % self.trap_name
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def set_trap_source_interface(self):
""" Set source interface for trap """
cmd = "snmp-agent trap source %s %s" % (
self.interface_type, self.interface_number)
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_trap_source_interface(self):
""" Undo source interface for trap """
cmd = "undo snmp-agent trap source"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def set_trap_source_port(self):
""" Set source port for trap """
cmd = "snmp-agent trap source-port %s" % self.port_number
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_trap_source_port(self):
""" Undo source port for trap """
cmd = "undo snmp-agent trap source-port"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" The work function """
self.check_args()
self.get_proposed()
self.get_existing()
find_flag = False
find_undo_flag = False
tmp_interface = None
if self.state == "present":
if self.feature_name:
if self.trap_name:
tmp_cfg = "feature-name %s trap-name %s" % (
self.feature_name, self.trap_name.lower())
else:
tmp_cfg = "feature-name %s" % self.feature_name
find_undo_flag = False
if self.cur_cfg["undo snmp-agent trap"]:
for item in self.cur_cfg["undo snmp-agent trap"]:
if item == tmp_cfg:
find_undo_flag = True
elif tmp_cfg in item:
find_undo_flag = True
elif self.feature_name == "all":
find_undo_flag = True
if find_undo_flag:
self.set_trap_feature_name()
if not find_undo_flag:
find_flag = False
if self.cur_cfg["snmp-agent trap"]:
for item in self.cur_cfg["snmp-agent trap"]:
if item == "enable":
find_flag = True
elif item == tmp_cfg:
find_flag = True
if not find_flag:
self.set_trap_feature_name()
if self.interface_type:
find_flag = False
tmp_interface = self.interface_type + self.interface_number
if "trap source interface" in self.cur_cfg.keys():
if self.cur_cfg["trap source interface"] == tmp_interface:
find_flag = True
if not find_flag:
self.set_trap_source_interface()
if self.port_number:
find_flag = False
if "trap source-port" in self.cur_cfg.keys():
if self.cur_cfg["trap source-port"] == self.port_number:
find_flag = True
if not find_flag:
self.set_trap_source_port()
else:
if self.feature_name:
if self.trap_name:
tmp_cfg = "feature-name %s trap-name %s" % (
self.feature_name, self.trap_name.lower())
else:
tmp_cfg = "feature-name %s" % self.feature_name
find_flag = False
if self.cur_cfg["snmp-agent trap"]:
for item in self.cur_cfg["snmp-agent trap"]:
if item == tmp_cfg:
find_flag = True
elif item == "enable":
find_flag = True
elif tmp_cfg in item:
find_flag = True
else:
find_flag = True
find_undo_flag = False
if self.cur_cfg["undo snmp-agent trap"]:
for item in self.cur_cfg["undo snmp-agent trap"]:
if item == tmp_cfg:
find_undo_flag = True
elif tmp_cfg in item:
find_undo_flag = True
if find_undo_flag:
pass
elif find_flag:
self.undo_trap_feature_name()
if self.interface_type:
if "trap source interface" in self.cur_cfg.keys():
self.undo_trap_source_interface()
if self.port_number:
if "trap source-port" in self.cur_cfg.keys():
self.undo_trap_source_port()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
feature_name=dict(choices=['aaa', 'arp', 'bfd', 'bgp', 'cfg', 'configuration', 'dad',
'devm', 'dhcpsnp', 'dldp', 'driver', 'efm', 'erps', 'error-down',
'fcoe', 'fei', 'fei_comm', 'fm', 'ifnet', 'info', 'ipsg', 'ipv6',
'isis', 'l3vpn', 'lacp', 'lcs', 'ldm', 'ldp', 'ldt', 'lldp',
'mpls_lspm', 'msdp', 'mstp', 'nd', 'netconf', 'nqa', 'nvo3',
'openflow', 'ospf', 'ospfv3', 'pim', 'pim-std', 'qos', 'radius',
'rm', 'rmon', 'securitytrap', 'smlktrap', 'snmp', 'ssh', 'stackmng',
'sysclock', 'sysom', 'system', 'tcp', 'telnet', 'trill', 'trunk',
'tty', 'vbst', 'vfs', 'virtual-perception', 'vrrp', 'vstm', 'all']),
trap_name=dict(type='str'),
interface_type=dict(choices=['Ethernet', 'Eth-Trunk', 'Tunnel', 'NULL', 'LoopBack', 'Vlanif',
'100GE', '40GE', 'MTunnel', '10GE', 'GE', 'MEth', 'Vbdif', 'Nve']),
interface_number=dict(type='str'),
port_number=dict(type='str')
)
argument_spec.update(ce_argument_spec)
module = SnmpTraps(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
28038,
856,
198,
2,
198,
2,
28038,
856,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,
340,
739,
262,
2846,
286,
262,... | 1.976164 | 9,901 |
import csv
import datetime
import random
import sys
import os
import time
import argparse
import pandas as pd
import json
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
BASE_DIR = "/Users/jrtorres/Documents/JRTDocs/Development/General_Projects/cpd-workshop-health-care/data/"
OUTPUT_DIR = "/Users/jrtorres/tmp/"
LOINC_CODES_NAMES = {
"8302-2": "Height",
"29463-7": "Weight",
"6690-2": "Leukocytes",
"789-8": "Erythrocytes",
"718-7": "Hemoglobin",
"4544-3": "Hematocrit",
"787-2": "MCV",
"785-6": "MCH",
"786-4": "MCHC",
"777-3": "Platelets",
"8462-4": "Diastolic Blood Pressure",
"8480-6": "Systolic Blood Pressure",
"39156-5": "Body Mass Index",
"2093-3": "Total Cholesterol",
"2571-8": "Triglycerides",
"18262-6": "LDL Cholesterol",
"2085-9": "HDL Cholesterol",
"4548-4": "A1c Hemoglobin Total",
"2339-0": "Glucose",
"6299-2": "Urea Nitrogen",
"38483-4": "Creatinine",
"49765-1": "Calcium",
"2947-0": "Sodium",
"6298-4": "Potassium",
"2069-3": "Chloride",
"20565-8": "Carbon Dioxide",
"14959-1": "Microalbumin Creatinine Ratio",
"38265-5": "DXA Bone density",
"26464-8": "White Blood Cell",
"26453-1": "Red Blood Cell",
"30385-9": "RBC Distribution Width",
"26515-7": "Platelet Count"
}
if __name__ == "__main__":
if sys.version_info[0] < 3:
raise Exception("Python 3 or higher version is required for this script.")
parser = argparse.ArgumentParser(prog="python %s)" % os.path.basename(__file__), description="Script that manages healthcare dataset.")
parser.add_argument("-output-base-directory", dest="out_base_dir", required=False, default=None, help="Directory to store output files.")
parser.add_argument("-input-base-directory", dest="in_base_dir", required=False, default=None, help="Directory with healthcare data set.")
parser.add_argument("-num-patients", dest="num_records", required=False, type=int, default=0, help="Number of patients.")
print("Starting script.\n")
args = parser.parse_args()
started_time = time.time()
if args.num_records is not 0:
subset_files_by_patient(args.num_records)
#print_unique_observation_codedescriptions("/Users/jrtorres/tmp/observations_small.csv")
#transpose_observations("/Users/jrtorres/tmp/observations_small_test.csv", "/Users/jrtorres/tmp/test_process_obs2.csv")
transpose_observations(BASE_DIR+"observations.csv", OUTPUT_DIR+"observations_processed.csv")
elapsed = time.time() - started_time
print("\nFinished script. Elapsed time: %f" % elapsed)
| [
11748,
269,
21370,
198,
11748,
4818,
8079,
198,
11748,
4738,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
1822,
29572,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
33918,
198,
198,
6738,
279,
893,
20928,
13,
2... | 2.437044 | 1,096 |
# Generated by Django 2.0.5 on 2018-05-08 17:52
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
20,
319,
2864,
12,
2713,
12,
2919,
1596,
25,
4309,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from tkinter import *
root = Tk()
my_gui = Calculator(root)
root.mainloop()
| [
6738,
256,
74,
3849,
1330,
1635,
201,
198,
201,
198,
201,
198,
201,
198,
15763,
796,
309,
74,
3419,
201,
198,
1820,
62,
48317,
796,
43597,
7,
15763,
8,
201,
198,
15763,
13,
12417,
26268,
3419,
201,
198
] | 2.263158 | 38 |
from django.shortcuts import render_to_response
import datetime, pickle, os
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from web_frontend import settings
from django.core.urlresolvers import reverse
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
62,
1462,
62,
26209,
198,
11748,
4818,
8079,
11,
2298,
293,
11,
28686,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198... | 3.316176 | 136 |
#!/usr/bin/env python3
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
if __name__ == "__main__":
from subprocess_main import subprocess_main # pylint: disable=no-name-in-module
subprocess_main(framework=None)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
357,
34,
8,
2864,
12,
1238,
2481,
8180,
10501,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
628,
198,
361,
11593,
3672,
834,
6624,
... | 2.782609 | 92 |
from rest_framework import permissions
| [
6738,
1334,
62,
30604,
1330,
21627,
628
] | 5.714286 | 7 |
# Title : Inheritance
# Author : Kiran Raj R.
# Date : 08:11:2020
import math
class Polygon:
"Create a simply polygon class, which takes number of sites and takes the maginute of each sides"
# triangle = Polygon(3)
# # triangle.get_sides()
# # triangle.print_sides()
triangle1 = Triangle()
triangle1.get_sides()
triangle1.findArea()
| [
2,
11851,
220,
1058,
47025,
42942,
198,
2,
6434,
1058,
7385,
272,
13308,
371,
13,
198,
2,
7536,
220,
220,
1058,
8487,
25,
1157,
25,
42334,
198,
198,
11748,
10688,
198,
198,
4871,
12280,
14520,
25,
198,
220,
220,
220,
366,
16447,
257... | 2.890756 | 119 |
#!/usr/bin/env python
import argparse
import datetime
import pandas as pd
import yaml
from pytrthree import TRTH
from pytrthree.utils import retry
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tool to send a series of requests to TRTH.')
parser.add_argument('--config', action='store', type=argparse.FileType('r'), required=True,
help='TRTH API configuration (YAML file)')
parser.add_argument('--template', action='store', type=argparse.FileType('r'), required=True,
help='Base template for the requests (YAML file)')
parser.add_argument('--criteria', action='store', type=argparse.FileType('r'), required=True,
help='Criteria for searching RICs and modifying queried fields (YAML file)')
parser.add_argument('--start', action='store', type=str, required=True,
help='Start date (ISO-8601 datetime string)')
parser.add_argument('--end', action='store', type=str, default=str(datetime.datetime.now().date()),
help='End date (ISO-8601 datetime string). Default to today\'s date.')
parser.add_argument('--group', action='store', type=str, default='1A',
help='Pandas datetime frequency string for grouping requests. Defaults to "1A".')
args = parser.parse_args()
api = TRTH(config=args.config)
api.options['raise_exception'] = True
criteria = yaml.load(args.criteria)
template = yaml.load(args.template)
dates = pd.date_range(args.start, args.end).to_series()
dateranges = [parse_daterange(i) for _, i in dates.groupby(pd.TimeGrouper(args.group))]
for daterange in dateranges:
for name, crit in criteria.items():
request = make_request(daterange, crit)
rid = retry(api.submit_ftp_request, request, sleep=30, exp_base=2)
api.logger.info(rid['requestID'])
api.logger.info('All requests sent!')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
1822,
29572,
198,
11748,
4818,
8079,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
331,
43695,
198,
6738,
12972,
2213,
15542,
1330,
7579,
4221,
198,
6738,
12972,
2213,
15... | 2.513376 | 785 |
import pytest
import numpy as np
from sklearn import datasets
from Logistic_Regression.logistic_regression import LogisticRegression
@pytest.fixture
def test_logistic_regression(train_test_data_final):
"""
Tests the linear regression algorithm using the Normal Equation
"""
X_train, y_train = train_test_data_final
X_train, y_train = X_train[:, 3:], (y_train == 2).astype(np.int8).reshape(-1, 1) # Binary classification problem
X_test, y_test = np.array([[1.7], [1.5]]), np.array([[1], [0]])
log_reg = LogisticRegression(n_iterations=5000, batch_size=32)
log_reg.fit(X_train, y_train)
y_pred = log_reg.predict(X_test)
assert isinstance(y_pred, np.ndarray)
assert len(y_pred) > 0
assert y_pred.shape[0] == X_test.shape[0]
assert np.array_equal(y_test, y_pred)
| [
11748,
12972,
9288,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
1341,
35720,
1330,
40522,
201,
198,
201,
198,
6738,
5972,
2569,
62,
8081,
2234,
13,
6404,
2569,
62,
2301,
2234,
1330,
5972,
2569,
8081,
2234,
201,
198,
201,
... | 2.402857 | 350 |
from django.contrib import admin
from .models import Question
from .models import Answer
# Register your models here
admin.site.register(Question)
admin.site.register(Answer)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
18233,
198,
6738,
764,
27530,
1330,
23998,
198,
198,
2,
17296,
534,
4981,
994,
198,
28482,
13,
15654,
13,
30238,
7,
24361,
8,
198,
28482,
13,
15654,
13,
3023... | 3.826087 | 46 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test the utils"""
import sys
import os
import unittest
sys.path = ['./'] + sys.path
from util import is_meta
from util import get_canonical_id_from_url_segment
from util import get_canonical_id_from_title
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
14402,
262,
3384,
4487,
37811,
628,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
198,
1... | 2.623932 | 117 |
#!/usr/bin/env python3
import numpy as np
import argparse
from osgeo import gdal
import isce
import isceobj
import os
def cmdLineParse():
'''
Parse command line.
'''
parser = argparse.ArgumentParser(description='Convert GeoTiff to ISCE file')
parser.add_argument('-i','--input', dest='infile', type=str,
required=True, help='Input GeoTiff file. If tar file is also included, this will be output file extracted from the TAR archive.')
parser.add_argument('-o','--output', dest='outfile', type=str,
required=True, help='Output GeoTiff file')
parser.add_argument('-t','--tar', dest='tarfile', type=str,
default=None, help='Optional input tar archive. If provided, Band 8 is extracted to file name provided with input option.')
return parser.parse_args()
def dumpTiff(infile, outfile):
'''
Read geotiff tags.
'''
###Uses gdal bindings to read geotiff files
data = {}
ds = gdal.Open(infile)
data['width'] = ds.RasterXSize
data['length'] = ds.RasterYSize
gt = ds.GetGeoTransform()
data['minx'] = gt[0]
data['miny'] = gt[3] + data['width'] * gt[4] + data['length']*gt[5]
data['maxx'] = gt[0] + data['width'] * gt[1] + data['length']*gt[2]
data['maxy'] = gt[3]
data['deltax'] = gt[1]
data['deltay'] = gt[5]
data['reference'] = ds.GetProjectionRef()
band = ds.GetRasterBand(1)
inArr = band.ReadAsArray(0,0, data['width'], data['length'])
inArr.astype(np.float32).tofile(outfile)
return data
def extractBand8(intarfile, destfile):
'''
Extracts Band 8 of downloaded Tar file from EarthExplorer
'''
import tarfile
import shutil
fid = tarfile.open(intarfile)
fileList = fid.getmembers()
###Find the band 8 file
src = None
for kk in fileList:
if kk.name.endswith('B8.TIF'):
src = kk
if src is None:
raise Exception('Band 8 TIF file not found in tar archive')
print('Extracting: %s'%(src.name))
####Create source and target file Ids.
srcid = fid.extractfile(src)
destid = open(destfile,'wb')
##Copy content
shutil.copyfileobj(srcid, destid)
fid.close()
destid.close()
if __name__ == '__main__':
####Parse cmd line
inps = cmdLineParse()
####If input tar file is given
if inps.tarfile is not None:
extractBand8(inps.tarfile, inps.infile)
print('Dumping image to file')
meta = dumpTiff(inps.infile, inps.outfile)
# print(meta)
####Create an ISCE XML header for the landsat image
img = isceobj.createDemImage()
img.setFilename(inps.outfile)
img.setDataType('FLOAT')
dictProp = {
'REFERENCE' : meta['reference'],
'Coordinate1': {
'size': meta['width'],
'startingValue' : meta['minx'],
'delta': meta['deltax']
},
'Coordinate2': {
'size' : meta['length'],
'startingValue' : meta['maxy'],
'delta': meta['deltay']
},
'FILE_NAME' : inps.outfile
}
img.init(dictProp)
img.renderHdr()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
299,
32152,
355,
45941,
220,
198,
11748,
1822,
29572,
198,
6738,
28686,
469,
78,
1330,
308,
31748,
198,
11748,
318,
344,
198,
11748,
318,
344,
26801,
198,
11748,
28686,
... | 2.109641 | 1,587 |
from routersploit import (
exploits,
print_status,
print_success,
print_error,
http_request,
mute,
validators,
shell,
)
class Exploit(exploits.Exploit):
"""
Exploit implementation for Netgear R7000 and R6400 Remote Code Execution vulnerability.
If the target is vulnerable, command loop is invoked that allows executing commands on operating system level.
"""
__info__ = {
'name': 'Netgear R7000 & R6400 RCE',
'description': 'Module exploits remote command execution in Netgear R7000 and R6400 devices. If the target is '
'vulnerable, command loop is invoked that allows executing commands on operating system level.',
'authors': [
'Chad Dougherty', # vulnerability discovery
'Marcin Bury <marcin.bury[at]reverse-shell.com>', # routersploit module
],
'references': [
'http://www.sj-vs.net/a-temporary-fix-for-cert-vu582384-cwe-77-on-netgear-r7000-and-r6400-routers/',
'https://www.exploit-db.com/exploits/40889/',
'http://www.kb.cert.org/vuls/id/582384',
],
'devices': [
'R6400 (AC1750)',
'R7000 Nighthawk (AC1900, AC2300)',
'R7500 Nighthawk X4 (AC2350)',
'R7800 Nighthawk X4S(AC2600)',
'R8000 Nighthawk (AC3200)',
'R8500 Nighthawk X8 (AC5300)',
'R9000 Nighthawk X10 (AD7200)',
]
}
target = exploits.Option('', 'Target address e.g. http://192.168.1.1', validators=validators.url)
port = exploits.Option(80, 'Target Port', validators=validators.integer)
@mute
| [
6738,
41144,
489,
30711,
1330,
357,
198,
220,
220,
220,
27062,
11,
198,
220,
220,
220,
3601,
62,
13376,
11,
198,
220,
220,
220,
3601,
62,
13138,
11,
198,
220,
220,
220,
3601,
62,
18224,
11,
198,
220,
220,
220,
2638,
62,
25927,
11,... | 2.222964 | 749 |
import json
import numpy as np
import open3d as o3d
if __name__ == '__main__':
mse_cal()
# read_pcd_pointclouds()
# show_gd()
# file_path = '/home/ljs/workspace/eccv/FirstTrainingData/out_4096/train/38.pcd'
# read_pcd_pointclouds(file_path)
| [
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
1280,
18,
67,
355,
267,
18,
67,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
285,
325,
62,
9948,
3419,
198,
220,... | 2.214876 | 121 |
from stream_framework.feeds.base import BaseFeed
from stream_framework.storage.memory import InMemoryActivityStorage
from stream_framework.storage.memory import InMemoryTimelineStorage
| [
6738,
4269,
62,
30604,
13,
12363,
82,
13,
8692,
1330,
7308,
18332,
198,
6738,
4269,
62,
30604,
13,
35350,
13,
31673,
1330,
554,
30871,
16516,
31425,
198,
6738,
4269,
62,
30604,
13,
35350,
13,
31673,
1330,
554,
30871,
14967,
4470,
31425,... | 4.428571 | 42 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of global_sewage_signatures.
# https://github.com/josl/Global_Sewage_Signatures
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2016, Jose L. Bellod Cisneros & Kosai Al-Nakked
# <bellod.cisneros@gmail.com & kosai@cbs.dtu.dk>
import numpy as np
import math
from collections import defaultdict
# We keep a global count of all coefficients for the Universal Hashing to
# have unique set of numbers
coefficients = set()
# Reference: http://www.mmds.org/mmds/v2.1/ch03-lsh.pdf
# Each permutation is applied to all the rows and we update the signature
# matrix based on the column with the minimum hash found so far
# All-against-all comparison of the signature matrix result of the
# permutation. We compare each signature for each document and group
# similar items together if their jaccard similarity is less than the
# distance provided
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
770,
2393,
318,
636,
286,
3298,
62,
325,
21482,
62,
12683,
6691,
13,
198,
2,
3740,
1378,
12567,
13,
785,
14... | 3.200647 | 309 |
import logging
import os
import re
import uuid
from io import BytesIO
from mimetypes import guess_extension
from os.path import splitext
from PIL import Image
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.validators import RegexValidator
from django.db import models
from django.db.models import fields
from django.forms import forms
from django.forms.models import model_to_dict
from django.utils.crypto import get_random_string
from django.utils.text import get_valid_filename
from django.utils.translation import gettext_lazy as _
from l10n.models import Country, AdminArea
logger = logging.getLogger(__name__)
def ensure_single_primary(queryset):
"""
ensure that at most one item of the queryset is primary
"""
primary_items = queryset.filter(primary=True)
if primary_items.count() > 1:
for item in primary_items[1:]:
item.primary = False
item.save()
elif primary_items.count() == 0:
item = queryset.first()
if item:
item.primary = True
item.save()
class AddressMixin(models.Model):
"""
Address information
see i.e. http://tools.ietf.org/html/draft-ietf-scim-core-schema-03 or http://schema.org/PostalAddress
"""
addressee = models.CharField(_("addressee"), max_length=80)
street_address = models.TextField(_('street address'), blank=True,
help_text=_('Full street address, with house number, street name, P.O. box, and '
'extended street address information.'), max_length=512)
city = models.CharField(_("city"), max_length=100) # , help_text=_('City or locality')
city_native = models.CharField(_("city in native language"), max_length=100, blank=True)
postal_code = models.CharField(_("postal code"), max_length=30, blank=True)
country = models.ForeignKey(Country, on_delete=models.CASCADE, verbose_name=_("country"),
limit_choices_to={'active': True})
region = models.CharField(_("region"), help_text=_('State or region'), blank=True, max_length=100)
primary = models.BooleanField(_("primary"), default=False)
# formatted : formatted Address for mail http://tools.ietf.org/html/draft-ietf-scim-core-schema-03
phone_re = re.compile(
r'^\+\d{1,3}' + r'((-?\d+)|(\s?\(\d+\)\s?)|\s?\d+){1,9}$'
)
validate_phone = RegexValidator(phone_re, _("Enter a valid phone number i.e. +49 (531) 123456"), 'invalid')
def update_object_from_dict(destination, source_dict, key_mapping=None):
"""
check if the values in the destination object differ from
the values in the source_dict and update if needed
key_mapping can be a simple mapping of key names or
a mapping of key names to a tuple with a key name and a transformation
for the value,
for example {'key': ('new_key', lambda x : x + 2), ..}
"""
if not key_mapping: key_mapping = {}
field_names = [f.name for f in destination._meta.fields]
new_object = True if destination.pk is None else False
updated = False
for key in source_dict:
field_name = key
transformation = None
if key in key_mapping:
if isinstance(key_mapping[key], tuple):
(field_name, transformation) = key_mapping[key]
else:
field_name = key_mapping[key]
if field_name in field_names:
if transformation is None:
new_value = source_dict[key]
else:
new_value = transformation(source_dict[key])
if new_object:
setattr(destination, field_name, new_value)
else:
old_value = getattr(destination, field_name)
if old_value != new_value:
setattr(destination, field_name, new_value)
updated = True
if updated or new_object:
destination.save()
| [
11748,
18931,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
334,
27112,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
6738,
17007,
2963,
12272,
1330,
4724,
62,
2302,
3004,
198,
6738,
28686,
13,
6978,
1330,
4328,
578,
742,
198,
198,
6... | 2.440513 | 1,639 |
from flask import render_template
from . import main
@main.app_errorhandler(404)
def four_o_four(error):
'''
This is a function that renders the 404 error page
'''
return render_template('fourofour.html'),404
| [
6738,
42903,
1330,
8543,
62,
28243,
198,
6738,
764,
1330,
1388,
198,
198,
31,
12417,
13,
1324,
62,
18224,
30281,
7,
26429,
8,
198,
4299,
1440,
62,
78,
62,
14337,
7,
18224,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
7... | 2.935065 | 77 |
import base64
import sys
from contextlib import contextmanager
from io import StringIO
from threading import current_thread
from typing import Union
import hana_ml.dataframe
import pandas
import streamlit as st
from streamlit.report_thread import REPORT_CONTEXT_ATTR_NAME
# from https://discuss.streamlit.io/t/cannot-print-the-terminal-output-in-streamlit/6602/2
@contextmanager
@contextmanager
@contextmanager
def get_table_download_link(df, file_name):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe, file name
out: href string
"""
csv = df.to_csv(index=False)
b64 = base64.b64encode(
csv.encode()
).decode() # some strings <-> bytes conversions necessary here
return f'<a href="data:file/csv;base64,{b64}" download="{file_name}.csv">Download file</a>'
| [
11748,
2779,
2414,
198,
11748,
25064,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
6738,
33245,
1330,
10903,
9399,
198,
6738,
4704,
278,
1330,
1459,
62,
16663,
198,
6738,
19720,
1330,
4479,
198,
198,
11748,
289,
2271,
62,
4029,
13,
78... | 2.961938 | 289 |
from .sim_model import SimModel
from .i_sim_model import ISimModel
from .sim_array import SimArray
from .sim_array_view import SimArrayView
from .sim_pstudy import SimPStudy
from .sim_output import SimOut
| [
198,
198,
6738,
764,
14323,
62,
19849,
1330,
3184,
17633,
198,
6738,
764,
72,
62,
14323,
62,
19849,
1330,
3180,
320,
17633,
198,
6738,
764,
14323,
62,
18747,
1330,
3184,
19182,
198,
6738,
764,
14323,
62,
18747,
62,
1177,
1330,
3184,
1... | 3.234375 | 64 |
# Utilities for interacting with databases
import os
from urllib.parse import urlparse
from sqlalchemy import create_engine, text
from ensembl_prodinf.server_utils import get_file_sizes
from sqlalchemy.engine.url import make_url
def list_databases(db_uri, query):
"""
List databases on a specified MySQL server
Arguments:
db_uri : URI of MySQL server e.g. mysql://user@host:3306/
query : optional regular expression to filter databases e.g. .*_core_.*
"""
valid_uri = validate_mysql_url(db_uri)
engine = create_engine(valid_uri)
if(query == None):
s = text("select schema_name from information_schema.schemata")
else:
s = text("select schema_name from information_schema.schemata where schema_name rlike :q")
with engine.connect() as con:
return [str(r[0]) for r in con.execute(s, {"q": query}).fetchall()]
def get_database_sizes(db_uri, query, dir_name):
"""
List sizes of databases on a specified MySQL server
Arguments:
db_uri : URI of MySQL server e.g. mysql://user@host:3306/ (file system must be accessible)
query : optional regular expression to filter databases e.g. .*_core_.*
dir_name : location of MySQL data files on server
"""
db_list = list_databases(db_uri, query)
url = make_url(db_uri)
dir_path = os.path.join(dir_name, str(url.port), 'data')
sizes = get_file_sizes(url.host, dir_path)
return {db: sizes[db] for db in db_list if db in sizes.keys()}
| [
2,
41086,
329,
24986,
351,
20083,
198,
11748,
28686,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
29572,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
11,
2420,
198,
6738,
551,
4428,
75,
62,
1676,
67,
10745,
13,
15388,
62... | 2.736746 | 547 |
import pytest
| [
11748,
12972,
9288,
628,
198
] | 3.2 | 5 |
# -*- coding: utf-8 -*-
'''
Copyright (c) 2020 Huawei Technologies Sweden AB, All rights reserved.
Authors:
Karl Gäfvert
'''
import argparse
from .gui import GUI
parser = argparse.ArgumentParser(description='Run GUI')
parser.add_argument('input_dir', metavar='input_dir', type=str, help='Path to saved simulation. Ex. "results/10212020_021804"')
parser.add_argument('--fps', type=int, default='2', help='FPS during visualization')
parser.add_argument('--silent-strategy-0', action='store_true', help='Disable strategy 0 simulator output')
parser.add_argument('--silent-strategy-1', action='store_true', help='Disable strategy 1 simulator output')
parser.add_argument('--about', action='store_true', help='Print info and license')
# Args
args = parser.parse_args()
# Print about
if args.about:
print('''Copyright (c) 2020 Huawei Technologies Sweden AB, All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Karl Gäfvert
Romain Deffayet
''')
exit(0)
gui = GUI(disable_0=args.silent_strategy_0, disable_1=args.silent_strategy_1)
gui.play_from_file(args.input_dir, fps=args.fps)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
7061,
6,
201,
198,
15269,
357,
66,
8,
12131,
43208,
21852,
10710,
9564,
11,
1439,
2489,
10395,
13,
201,
198,
201,
198,
30515,
669,
25,
201,
198,
220,
220,
220,
... | 3.015982 | 876 |
from __future__ import unicode_literals
import logging
import json
from django.core.exceptions import ImproperlyConfigured
from urllib.request import urlopen
from .exceptions import RateBackendError
from .models import RateSource, Rate
from .settings import money_rates_settings
logger = logging.getLogger(__name__)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
18931,
198,
11748,
33918,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
12205,
525,
306,
16934,
1522,
198,
6738,
2956,
297,
571,
13,
25927,
13... | 3.588889 | 90 |
# Trying a new data model for state variables and domains:
# Create a new sub-class of numpy.ndarray
# that has as an attribute the domain itself
# Following a tutorial on subclassing ndarray here:
#
# http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
from __future__ import division
import numpy as np
from climlab.domain.xarray import Field_to_xarray
class Field(np.ndarray):
"""Custom class for climlab gridded quantities, called Field.
This class behaves exactly like :py:class:`numpy.ndarray`
but every object has an attribute called ``self.domain``
which is the domain associated with that field (e.g. state variables).
**Initialization parameters** \n
An instance of ``Field`` is initialized with the following
arguments:
:param array input_array: the array which the Field object should be
initialized with
:param domain: the domain associated with that field
(e.g. state variables)
:type domain: :class:`~climlab.domain.domain._Domain`
**Object attributes** \n
Following object attribute is generated during initialization:
:var domain: the domain associated with that field
(e.g. state variables)
:vartype domain: :class:`~climlab.domain.domain._Domain`
:Example:
::
>>> import climlab
>>> import numpy as np
>>> from climlab import domain
>>> from climlab.domain import field
>>> # distribution of state
>>> distr = np.linspace(0., 10., 30)
>>> # domain creation
>>> sfc, atm = domain.single_column()
>>> # build state of type Field
>>> s = field.Field(distr, domain=atm)
>>> print s
[ 0. 0.34482759 0.68965517 1.03448276 1.37931034
1.72413793 2.06896552 2.4137931 2.75862069 3.10344828
3.44827586 3.79310345 4.13793103 4.48275862 4.82758621
5.17241379 5.51724138 5.86206897 6.20689655 6.55172414
6.89655172 7.24137931 7.5862069 7.93103448 8.27586207
8.62068966 8.96551724 9.31034483 9.65517241 10. ]
>>> print s.domain
climlab Domain object with domain_type=atm and shape=(30,)
>>> # can slice this and it preserves the domain
>>> # a more full-featured implementation would have intelligent
>>> # slicing like in iris
>>> s.shape == s.domain.shape
True
>>> s[:1].shape == s[:1].domain.shape
False
>>> # But some things work very well. E.g. new field creation:
>>> s2 = np.zeros_like(s)
>>> print s2
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
>>> print s2.domain
climlab Domain object with domain_type=atm and shape=(30,)
"""
## Loosely based on the approach in numpy.ma.core.MaskedArray
# This determines how we slice a Field object
def __getitem__(self, indx):
"""
x.__getitem__(y) <==> x[y]
Return the item described by i, as a Field.
"""
# create a view of just the data as np.ndarray and slice it
dout = self.view(np.ndarray)[indx]
try:
#Force dout to type Field
dout = dout.view(type(self))
# Now slice the domain
dout.domain = self.domain[indx]
# Inherit attributes from self
if hasattr(self, 'interfaces'):
dout.interfaces = self.interfaces
except:
# The above will fail if we extract a single item
# in which case we should just return the item
pass
return dout
def to_xarray(self):
"""Convert Field object to xarray.DataArray"""
return Field_to_xarray(self)
def global_mean(field):
"""Calculates the latitude weighted global mean of a field
with latitude dependence.
:param Field field: input field
:raises: :exc:`ValueError` if input field has no latitude axis
:return: latitude weighted global mean of the field
:rtype: float
:Example:
initial global mean temperature of EBM model::
>>> import climlab
>>> model = climlab.EBM()
>>> climlab.global_mean(model.Ts)
Field(11.997968598413685)
"""
try:
lat = field.domain.lat.points
except:
raise ValueError('No latitude axis in input field.')
try:
# Field is 2D latitude / longitude
lon = field.domain.lon.points
return _global_mean_latlon(field.squeeze())
except:
# Field is 1D latitude only (zonal average)
lat_radians = np.deg2rad(lat)
return _global_mean(field.squeeze(), lat_radians)
def to_latlon(array, domain, axis = 'lon'):
"""Broadcasts a 1D axis dependent array across another axis.
:param array input_array: the 1D array used for broadcasting
:param domain: the domain associated with that
array
:param axis: the axis that the input array will
be broadcasted across
[default: 'lon']
:return: Field with the same shape as the
domain
:Example:
::
>>> import climlab
>>> from climlab.domain.field import to_latlon
>>> import numpy as np
>>> state = climlab.surface_state(num_lat=3, num_lon=4)
>>> m = climlab.EBM_annual(state=state)
>>> insolation = np.array([237., 417., 237.])
>>> insolation = to_latlon(insolation, domain = m.domains['Ts'])
>>> insolation.shape
(3, 4, 1)
>>> insolation
Field([[[ 237.], [[ 417.], [[ 237.],
[ 237.], [ 417.], [ 237.],
[ 237.], [ 417.], [ 237.],
[ 237.]], [ 417.]], [ 237.]]])
"""
# if array is latitude dependent (has the same shape as lat)
axis, array, depth = np.meshgrid(domain.axes[axis].points, array,
domain.axes['depth'].points)
if axis == 'lat':
# if array is longitude dependent (has the same shape as lon)
np.swapaxes(array,1,0)
return Field(array, domain=domain)
| [
2,
220,
31165,
257,
649,
1366,
2746,
329,
1181,
9633,
290,
18209,
25,
198,
2,
220,
13610,
257,
649,
850,
12,
4871,
286,
299,
32152,
13,
358,
18747,
198,
2,
220,
326,
468,
355,
281,
11688,
262,
7386,
2346,
198,
198,
2,
14207,
257,
... | 2.120101 | 3,164 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 13:32:14 2019
@author: ortutay
"""
import pandas as pd
import numpy as np
link = 'http://bit.ly/uforeports'
ufo = pd.read_csv(link)
# We split 60-20-20% tran-validation-test sets
train, validate, test = np.split(ufo.sample(frac=1),
[int(.6*len(ufo)),int(.8*len(ufo))])
a = pd.DataFrame({'col1': np.arange(1, 21),'col2': np.arange(21,41)})
train, validate, test = np.split(a.sample(frac=1), [int(.8 * len(a)), int(.9 * len(a))]) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
4280,
1511,
1511,
25,
2624,
25,
1415,
13130,
198,
198,
31,
9800,
25,
393,
83,
315,... | 2.056818 | 264 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the p2p stub connection."""
import os
import tempfile
from pathlib import Path
from typing import Any, Union, cast
from aea.configurations.base import ConnectionConfig, PublicId
from aea.identity.base import Identity
from aea.mail.base import Envelope
from packages.fetchai.connections.stub.connection import StubConnection, write_envelope
PUBLIC_ID = PublicId.from_str("fetchai/p2p_stub:0.16.0")
class P2PStubConnection(StubConnection):
r"""A p2p stub connection.
This connection uses an existing directory as a Rendez-Vous point for agents to communicate locally.
Each connected agent will create a file named after its address/identity where it can receive messages.
The connection detects new messages by watchdogging the input file looking for new lines.
"""
connection_id = PUBLIC_ID
def __init__(
self, configuration: ConnectionConfig, identity: Identity, **kwargs: Any
) -> None:
"""
Initialize a p2p stub connection.
:param configuration: the connection configuration
:param identity: the identity
"""
namespace_dir_path = cast(
Union[str, Path],
configuration.config.get("namespace_dir", tempfile.mkdtemp()),
)
if namespace_dir_path is None:
raise ValueError("namespace_dir_path must be set!") # pragma: nocover
self.namespace = os.path.abspath(namespace_dir_path)
input_file_path = os.path.join(self.namespace, "{}.in".format(identity.address))
output_file_path = os.path.join(
self.namespace, "{}.out".format(identity.address)
)
configuration.config["input_file"] = input_file_path
configuration.config["output_file"] = output_file_path
super().__init__(configuration=configuration, identity=identity, **kwargs)
async def send(self, envelope: Envelope) -> None:
"""
Send messages.
:return: None
"""
if self.loop is None:
raise ValueError("Loop not initialized.") # pragma: nocover
self._ensure_valid_envelope_for_external_comms(envelope)
target_file = Path(os.path.join(self.namespace, "{}.in".format(envelope.to)))
with open(target_file, "ab") as file:
await self.loop.run_in_executor(
self._write_pool, write_envelope, envelope, file
)
async def disconnect(self) -> None:
"""Disconnect the connection."""
if self.loop is None:
raise ValueError("Loop not initialized.") # pragma: nocover
await self.loop.run_in_executor(self._write_pool, self._cleanup)
await super().disconnect()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16529,
26171,
198,
2,
198,
2,
220,
220,
15069,
2864,
12,
23344,
376,
7569,
13,
20185,
15302,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
3... | 2.826753 | 1,241 |
n1 = float(input('Digite o primeiro numero'))
n2 = float(input('Digite o segundo numero'))
n3 = float(input('Digite o terceiro numero'))
if n1 < (n2 + n3) and n2 < (n1 + n3) and n3 < (n2 + n1):
print('Podem formar um triangulo')
else:
print('Nao formam um triangulo')
| [
77,
16,
796,
12178,
7,
15414,
10786,
19511,
578,
267,
6994,
7058,
997,
3529,
6,
4008,
198,
77,
17,
796,
12178,
7,
15414,
10786,
19511,
578,
267,
384,
70,
41204,
997,
3529,
6,
4008,
198,
77,
18,
796,
12178,
7,
15414,
10786,
19511,
... | 2.3 | 120 |
import argparse
import logging
from transformers import (
set_seed,
)
from adynorm.eval_utils import (
evaluate
)
from adynorm.adynorm import Adynorm, AdynormNet
from adynorm.datasets import ConceptDataset, DictDataset
logger = logging.getLogger(__name__)
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
198,
11748,
18931,
198,
198,
6738,
6121,
364,
1330,
357,
198,
220,
220,
220,
900,
62,
28826,
11,
198,
8,
198,
198,
6738,
512,
2047,
579,
13,
18206,
62,
26791,
1330,
357,
198,
220,
220,
220,
13446,
198,
8,
... | 2.672414 | 116 |
from tests.product.mode_installers import StandaloneModeInstaller
from tests.product.prestoadmin_installer import PrestoadminInstaller
from tests.product.topology_installer import TopologyInstaller
from tests.product.standalone.presto_installer import StandalonePrestoInstaller
STANDALONE_BARE_CLUSTER = 'bare'
BARE_CLUSTER = 'bare'
STANDALONE_PA_CLUSTER = 'pa_only_standalone'
STANDALONE_PRESTO_CLUSTER = 'presto'
cluster_types = {
BARE_CLUSTER: [],
STANDALONE_PA_CLUSTER: [PrestoadminInstaller,
StandaloneModeInstaller],
STANDALONE_PRESTO_CLUSTER: [PrestoadminInstaller,
StandaloneModeInstaller,
TopologyInstaller,
StandalonePrestoInstaller],
}
| [
6738,
5254,
13,
11167,
13,
14171,
62,
17350,
364,
1330,
5751,
17749,
19076,
15798,
263,
198,
6738,
5254,
13,
11167,
13,
79,
2118,
1170,
1084,
62,
17350,
263,
1330,
24158,
1170,
1084,
15798,
263,
198,
6738,
5254,
13,
11167,
13,
4852,
1... | 2.189415 | 359 |
# Workaround for the 'methods' file not being able to locate the 'mcmcsamplers' folder for importing
import sys
import os
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, '../logistigate', 'logistigate')))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, '../logistigate', 'logistigate', 'mcmcsamplers')))
import logistigate.logistigate.utilities as util # Pull from the submodule "develop" branch
import logistigate.logistigate.methods as methods # Pull from the submodule "develop" branch
import logistigate.logistigate.lg as lg # Pull from the submodule "develop" branch
def cleanMQD():
'''
Script that cleans up raw Medicines Quality Database data for use in logistigate.
It reads in a CSV file with columns 'Country,' 'Province,' 'Therapeutic Indication',
'Manufacturer,' 'Facility Type', 'Date Sample Collected', 'Final Test Result,' and
'Type of Test', and returns a dictionary of objects to be formatted for use with logistigate.
'''
# Read in the raw database file
import pandas as pd
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
filesPath = os.path.join(SCRIPT_DIR, '../MQDfiles')
MQD_df = pd.read_csv(os.path.join(filesPath,'MQD_ALL_CSV.csv')) # Main raw database file
# Get data particular to each country of interest
MQD_df_CAM = MQD_df[MQD_df['Country'] == 'Cambodia'].copy()
MQD_df_GHA = MQD_df[MQD_df['Country'] == 'Ghana'].copy()
MQD_df_PHI = MQD_df[MQD_df['Country'] == 'Philippines'].copy()
# Consolidate typos or seemingly identical entries in significant categories
# Cambodia
# Province
MQD_df_CAM.loc[
(MQD_df_CAM.Province == 'Ratanakiri') | (MQD_df_CAM.Province == 'Rattanakiri'), 'Province'] = 'Ratanakiri'
MQD_df_CAM.loc[
(MQD_df_CAM.Province == 'Steung Treng') | (MQD_df_CAM.Province == 'Stung Treng'), 'Province'] = 'Stung Treng'
# Manufacturer
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Acdhon Co., Ltd') | (MQD_df_CAM.Manufacturer == 'Acdhon Company Ltd'),
'Manufacturer'] = 'Acdhon Co., Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Alembic Limited') | (MQD_df_CAM.Manufacturer == 'Alembic Pharmaceuticals Ltd'),
'Manufacturer'] = 'Alembic Limited'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'ALICE PHARMA PVT LTD') | (MQD_df_CAM.Manufacturer == 'Alice Pharma Pvt.Ltd')
| (MQD_df_CAM.Manufacturer == 'Alice Pharmaceuticals'), 'Manufacturer'] = 'Alice Pharmaceuticals'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Atoz Pharmaceutical Pvt.Ltd') | (MQD_df_CAM.Manufacturer == 'Atoz Pharmaceuticals Ltd'),
'Manufacturer'] = 'Atoz Pharmaceuticals Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Aurobindo Pharma LTD') | (MQD_df_CAM.Manufacturer == 'Aurobindo Pharma Ltd.')
| (MQD_df_CAM.Manufacturer == 'Aurobindo Pharmaceuticals Ltd'), 'Manufacturer'] = 'Aurobindo'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Aventis') | (MQD_df_CAM.Manufacturer == 'Aventis Pharma Specialite'),
'Manufacturer'] = 'Aventis'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Bright Future Laboratories') | (MQD_df_CAM.Manufacturer == 'Bright Future Pharma'),
'Manufacturer'] = 'Bright Future Laboratories'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Burapha') | (MQD_df_CAM.Manufacturer == 'Burapha Dispensary Co, Ltd'),
'Manufacturer'] = 'Burapha'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'CHANKIT') | (MQD_df_CAM.Manufacturer == 'Chankit Trading Ltd')
| (MQD_df_CAM.Manufacturer == 'Chankit trading Ltd, Part'),
'Manufacturer'] = 'Chankit Trading Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Chea Chamnan Laboratoire Co., LTD') | (MQD_df_CAM.Manufacturer == 'Chea Chamnan Laboratories Co., Ltd')
| (MQD_df_CAM.Manufacturer == 'Chea Chamnan Laboratory Company Ltd'),
'Manufacturer'] = 'Chea Chamnan Laboratory Company Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Cipla Ltd.') | (MQD_df_CAM.Manufacturer == 'Cipla Ltd'),
'Manufacturer'] = 'Cipla Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'DOMESCO MEDICAL IMP EXP JOINT STOCK CORP')
| (MQD_df_CAM.Manufacturer == 'DOMESCO MEDICAL IMP EXP JOINT_stock corp')
| (MQD_df_CAM.Manufacturer == 'DOMESCO MEDICAL IMPORT EXPORT JOINT STOCK CORP')
| (MQD_df_CAM.Manufacturer == 'Domesco'),
'Manufacturer'] = 'Domesco'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Emcure Pharmaceutical') | (MQD_df_CAM.Manufacturer == 'Emcure'),
'Manufacturer'] = 'Emcure'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Eurolife Healthcare Pvt Ltd') | (MQD_df_CAM.Manufacturer == 'Eurolife'),
'Manufacturer'] = 'Eurolife'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Flamingo Pharmaceutical Limited') | (MQD_df_CAM.Manufacturer == 'Flamingo Pharmaceuticals Ltd'),
'Manufacturer'] = 'Flamingo Pharmaceuticals Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Global Pharma Health care PVT-LTD')
| (MQD_df_CAM.Manufacturer == 'GlobalPharma Healthcare Pvt-Ltd')
| (MQD_df_CAM.Manufacturer == 'Global Pharma'),
'Manufacturer'] = 'Global Pharma'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Gracure Pharmaceuticals Ltd.') | (MQD_df_CAM.Manufacturer == 'Gracure Pharmaceuticals'),
'Manufacturer'] = 'Gracure Pharmaceuticals'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Il Dong Pharmaceutical Company Ltd') | (MQD_df_CAM.Manufacturer == 'Il Dong Pharmaceuticals Ltd'),
'Manufacturer'] = 'Il Dong Pharmaceuticals Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Khandelwal Laboratories Ltd')
| (MQD_df_CAM.Manufacturer == 'Khandewal Lab')
| (MQD_df_CAM.Manufacturer == 'Khandelwal'),
'Manufacturer'] = 'Khandelwal'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Laboratories EPHAC Co., Ltd')
| (MQD_df_CAM.Manufacturer == 'EPHAC Laboratories Ltd'),
'Manufacturer'] = 'Laboratories EPHAC Co., Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Lyka Laboratories Ltd')
| (MQD_df_CAM.Manufacturer == 'Lyka Labs Limited.')
| (MQD_df_CAM.Manufacturer == 'Lyka Labs'),
'Manufacturer'] = 'Lyka Labs'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Marksans Pharmaceuticals Ltd') | (MQD_df_CAM.Manufacturer == 'Marksans Pharma Ltd.')
| (MQD_df_CAM.Manufacturer == 'Marksans Pharma Ltd.,'),
'Manufacturer'] = 'Marksans Pharma Ltd.'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'MASALAB') | (MQD_df_CAM.Manufacturer == 'Masa Lab Co., Ltd'),
'Manufacturer'] = 'Masa Lab Co., Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Medical Supply Pharmaceutical Enterprise')
| (MQD_df_CAM.Manufacturer == 'Medical Supply Pharmaceutical Enteprise'),
'Manufacturer'] = 'Medical Supply Pharmaceutical Enterprise'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Medopharm Pvt. Ltd.')
| (MQD_df_CAM.Manufacturer == 'Medopharm'),
'Manufacturer'] = 'Medopharm'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Micro Laboratories Ltd') | (MQD_df_CAM.Manufacturer == 'MICRO LAB LIMITED')
| (MQD_df_CAM.Manufacturer == 'Micro Labs Ltd') | (MQD_df_CAM.Manufacturer == 'Microlabs Limited'),
'Manufacturer'] = 'Microlabs'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Millimed Co., Ltd Thailand')
| (MQD_df_CAM.Manufacturer == 'Millimed'),
'Manufacturer'] = 'Millimed'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Orchid Health Care') | (MQD_df_CAM.Manufacturer == 'Orchid Health'),
'Manufacturer'] = 'Orchid Health'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Osoth Inter Laboratory Co., LTD') | (MQD_df_CAM.Manufacturer == 'Osoth Inter Laboratories'),
'Manufacturer'] = 'Osoth Inter Laboratories'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'PHARMASANT LABORATORIES Co.,LTD') | (MQD_df_CAM.Manufacturer == 'Pharmasant Laboratories Co., Ltd'),
'Manufacturer'] = 'Pharmasant Laboratories Co., Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Plethico Pharmaceuticals, Ltd')
| (MQD_df_CAM.Manufacturer == 'Plethico Pharmaceuticals Ltd')
| (MQD_df_CAM.Manufacturer == 'Plethico Pharmaceutical Ltd')
| (MQD_df_CAM.Manufacturer == 'Plethico'),
'Manufacturer'] = 'Plethico'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'PPM Laboratory') | (MQD_df_CAM.Manufacturer == 'PPM')
| (MQD_df_CAM.Manufacturer == 'Pharma Product Manufacturing'),
'Manufacturer'] = 'PPM'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Ranbaxy Laboratories Limited.')
| (MQD_df_CAM.Manufacturer == 'Ranbaxy Pharmaceuticals'),
'Manufacturer'] = 'Ranbaxy Pharmaceuticals'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Shijiazhuang Pharma Group Zhongnuo Pharmaceutical [Shijiazhuang] Co.,LTD')
| (MQD_df_CAM.Manufacturer == 'Shijiazhuang Pharmaceutical Group Ltd'),
'Manufacturer'] = 'Shijiazhuang Pharmaceutical Group Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Sanofi-Aventis Vietnam') | (MQD_df_CAM.Manufacturer == 'Sanofi Aventis'),
'Manufacturer'] = 'Sanofi Aventis'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Stada Vietnam Joint Venture Co., Ltd.') | (MQD_df_CAM.Manufacturer == 'Stada Vietnam Joint Venture'),
'Manufacturer'] = 'Stada Vietnam Joint Venture'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Shandong Reyoung Pharmaceutical Co., Ltd') | (
MQD_df_CAM.Manufacturer == 'Shandong Reyoung Pharmaceuticals Ltd'),
'Manufacturer'] = 'Shandong Reyoung Pharmaceuticals Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'T Man Pharma Ltd. Part.')
| (MQD_df_CAM.Manufacturer == 'T-MAN Pharma Ltd., Part')
| (MQD_df_CAM.Manufacturer == 'T-Man Pharmaceuticals Ltd'),
'Manufacturer'] = 'T-Man Pharmaceuticals Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Umedica Laboratories PVT. LTD.')
| (MQD_df_CAM.Manufacturer == 'Umedica Laboratories PVT. Ltd')
| (MQD_df_CAM.Manufacturer == 'Umedica Laboratories Pvt Ltd')
| (MQD_df_CAM.Manufacturer == 'Umedica'),
'Manufacturer'] = 'Umedica'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Utopian Co,.LTD') | (MQD_df_CAM.Manufacturer == 'Utopian Co., Ltd')
| (MQD_df_CAM.Manufacturer == 'Utopian Company Ltd'),
'Manufacturer'] = 'Utopian Company Ltd'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Vesco Pharmaceutical Ltd.,Part')
| (MQD_df_CAM.Manufacturer == 'Vesco Pharmaceutical Ltd Part'),
'Manufacturer'] = 'Vesco Pharmaceutical Ltd Part'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Yanzhou Xier Kangtai Pharmaceutical Co., Ltd')
| (MQD_df_CAM.Manufacturer == 'Yanzhou Xier Kangtai Pharm'),
'Manufacturer'] = 'Yanzhou Xier Kangtai Pharm'
MQD_df_CAM.loc[
(MQD_df_CAM.Manufacturer == 'Zhangjiakou DongFang pharmaceutical Co., Ltd')
| (MQD_df_CAM.Manufacturer == 'Zhangjiakou Dongfang Phamaceutical'),
'Manufacturer'] = 'Zhangjiakou Dongfang Phamaceutical'
# Ghana
# Province
MQD_df_GHA.loc[
(MQD_df_GHA.Province == 'Northern') | (MQD_df_GHA.Province == 'Northern Region')
| (MQD_df_GHA.Province == 'Northern Region, Northern Region'),
'Province'] = 'Northern'
MQD_df_GHA.loc[
(MQD_df_GHA.Province == 'Western (Ghana)'),
'Province'] = 'Western'
# Manufacturer
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Ajanta Pharma Ltd') | (MQD_df_GHA.Manufacturer == 'Ajanta Pharma Ltd.'),
'Manufacturer'] = 'Ajanta Pharma Ltd.'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Ally Pharma Options Pvt Ltd.') | (MQD_df_GHA.Manufacturer == 'Ally Pharma Options Pvt. Ltd'),
'Manufacturer'] = 'Ally Pharma Options Pvt. Ltd'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Bliss GVS Pharma Ltd') | (MQD_df_GHA.Manufacturer == 'Bliss GVS Pharmaceuticals Ltd.'),
'Manufacturer'] = 'Bliss GVS Pharma Ltd'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Cipla Ltd. India') | (MQD_df_GHA.Manufacturer == 'Cipla Ltd'),
'Manufacturer'] = 'Cipla Ltd'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Danadams Pharmaceutical Industry Limited')
| (MQD_df_GHA.Manufacturer == 'Danadams Pharmaceutical Industry, Ltd.')
| (MQD_df_GHA.Manufacturer == 'Danadams Pharmaceuticals Industry Limited'),
'Manufacturer'] = 'Danadams'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Guilin Pharmaceutical Company Ltd.')
| (MQD_df_GHA.Manufacturer == 'Guilin Pharmaceutical Co. Ltd')
| (MQD_df_GHA.Manufacturer == 'Guilin Pharmaceutical Co., Ltd'),
'Manufacturer'] = 'Guilin'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Kinapharma Limited') | (MQD_df_GHA.Manufacturer == 'Kinapharma Ltd'),
'Manufacturer'] = 'Kinapharma'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Maphar Laboratories') | (MQD_df_GHA.Manufacturer == 'Maphar'),
'Manufacturer'] = 'Maphar'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Novartis Pharmaceutical Corporation')
| (MQD_df_GHA.Manufacturer == 'Novartis Pharmaceuticals Corporation'),
'Manufacturer'] = 'Novartis'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Pharmanova Limited')
| (MQD_df_GHA.Manufacturer == 'Pharmanova Ltd'),
'Manufacturer'] = 'Pharmanova'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Phyto-Riker (Gihoc) Pharmaceuticals Ltd')
| (MQD_df_GHA.Manufacturer == 'Phyto-Riker (Gihoc) Pharmaceuticals, Ltd.'),
'Manufacturer'] = 'Phyto-Riker'
MQD_df_GHA.loc[
(MQD_df_GHA.Manufacturer == 'Ronak Exim PVT. Ltd')
| (MQD_df_GHA.Manufacturer == 'Ronak Exim Pvt Ltd'),
'Manufacturer'] = 'Ronak Exim'
# Philippines
# Province
MQD_df_PHI.loc[(MQD_df_PHI.Province == 'CALABARZON'), 'Province'] = 'Calabarzon'
MQD_df_PHI.loc[(MQD_df_PHI.Province == 'region 1 '), 'Province'] = 'Region 1'
MQD_df_PHI.loc[(MQD_df_PHI.Province == 'region7'), 'Province'] = 'Region 7'
MQD_df_PHI.loc[(MQD_df_PHI.Province == 'region9'), 'Province'] = 'Region 9'
# Manufacturer
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'AM-Europharma')
| (MQD_df_PHI.Manufacturer == 'Am-Euro Pharma Corporation'),
'Manufacturer'] = 'AM-Europharma'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Amherst Laboratories Inc')
| (MQD_df_PHI.Manufacturer == 'Amherst Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'Amherst Laboratories, Inc.'),
'Manufacturer'] = 'Amherst'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Biotech Research Lab Inc.')
| (MQD_df_PHI.Manufacturer == 'BRLI'),
'Manufacturer'] = 'BRLI'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Compact Pharmaceutical Corp')
| (MQD_df_PHI.Manufacturer == 'Compact Pharmaceutical Corp.')
| (MQD_df_PHI.Manufacturer == 'Compact Pharmaceutical Corporation'),
'Manufacturer'] = 'Compact'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Diamond Laboratorie, Inc. ')
| (MQD_df_PHI.Manufacturer == 'Diamond Laboratories, Inc.'),
'Manufacturer'] = 'Diamond'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Drugmakers Biotech Research Laboratories, Inc.')
| (MQD_df_PHI.Manufacturer == 'Drugmakers Laboratories Inc')
| (MQD_df_PHI.Manufacturer == 'Drugmakers Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'Drugmakers Laboratories, Inc.'),
'Manufacturer'] = 'Drugmakers'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Flamingo Pharmaceuticals Ltd')
| (MQD_df_PHI.Manufacturer == 'Flamingo Pharmaceuticals Ltd.')
| (MQD_df_PHI.Manufacturer == 'Flamingo Pharmaceuticals, Ltd.'),
'Manufacturer'] = 'Flamingo'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Interphil Laboratories')
| (MQD_df_PHI.Manufacturer == 'Interphil Laboratories, Inc.')
| (MQD_df_PHI.Manufacturer == 'Interphil Laboratories,Inc'),
'Manufacturer'] = 'Interphil'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'J.M. Tolman Laboratories, Inc.')
| (MQD_df_PHI.Manufacturer == 'J.M. Tolmann Lab. Inc.')
| (MQD_df_PHI.Manufacturer == 'J.M. Tolmann Laboratories, Inc.')
| (MQD_df_PHI.Manufacturer == 'J.M.Tollman Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'J.M.Tolmann Laboratories Inc')
| (MQD_df_PHI.Manufacturer == 'J.M.Tolmann Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'Tolmann'),
'Manufacturer'] = 'J.M. Tolmann'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Lloyd Laboratories Inc')
| (MQD_df_PHI.Manufacturer == 'Lloyd Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'Lloyd Laboratories, Inc.'),
'Manufacturer'] = 'Lloyd'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Lumar Pharmaceutical Lab')
| (MQD_df_PHI.Manufacturer == 'Lumar Pharmaceutical Lab. ')
| (MQD_df_PHI.Manufacturer == 'Lumar Pharmaceutical Laboratory'),
'Manufacturer'] = 'Lumar'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Lupin Limited') | (MQD_df_PHI.Manufacturer == 'Lupin Ltd')
| (MQD_df_PHI.Manufacturer == 'Lupin Ltd.'),
'Manufacturer'] = 'Lupin'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Missing') | (MQD_df_PHI.Manufacturer == 'No Information Available')
| (MQD_df_PHI.Manufacturer == 'No information'),
'Manufacturer'] = 'Unknown'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Natrapharm') | (MQD_df_PHI.Manufacturer == 'Natrapharm Inc.')
| (MQD_df_PHI.Manufacturer == 'Natrapharm, Inc.'),
'Manufacturer'] = 'Natrapharm'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'New Myrex Lab., Inc.') | (MQD_df_PHI.Manufacturer == 'New Myrex Laboratories Inc')
| (MQD_df_PHI.Manufacturer == 'New Myrex Laboratories Inc.')
| (MQD_df_PHI.Manufacturer == 'New Myrex Laboratories, Inc.'),
'Manufacturer'] = 'New Myrex'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Novartis (Bangladesh)') | (MQD_df_PHI.Manufacturer == 'Novartis (Bangladesh) Ltd.')
| (MQD_df_PHI.Manufacturer == 'Novartis Bangladesh Ltd')
| (MQD_df_PHI.Manufacturer == 'Novartis Bangladesh Ltd.')
| (MQD_df_PHI.Manufacturer == 'Novartis'),
'Manufacturer'] = 'Novartis'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Pascual Lab. Inc.')
| (MQD_df_PHI.Manufacturer == 'Pascual Laboratories, Inc.'),
'Manufacturer'] = 'Pascual'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Pharex Health Corp.')
| (MQD_df_PHI.Manufacturer == 'Pharex'),
'Manufacturer'] = 'Pharex'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Plethico Pharmaceutical Ltd.')
| (MQD_df_PHI.Manufacturer == 'Plethico Pharmaceuticals, Ltd.'),
'Manufacturer'] = 'Plethico'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'San Marino Lab., Corp.')
| (MQD_df_PHI.Manufacturer == 'San Marino Laboratories Corp'),
'Manufacturer'] = 'San Marino'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Sandoz South Africa Ltd.')
| (MQD_df_PHI.Manufacturer == 'Sandoz Private Ltd.')
| (MQD_df_PHI.Manufacturer == 'Sandoz Philippines Corp.')
| (MQD_df_PHI.Manufacturer == 'Sandoz GmbH')
| (MQD_df_PHI.Manufacturer == 'Sandoz'),
'Manufacturer'] = 'Sandoz'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Scheele Laboratories Phil., Inc.')
| (MQD_df_PHI.Manufacturer == 'Scheele Laboratories Phils, Inc.')
| (MQD_df_PHI.Manufacturer == 'Scheele Laboratories Phis., Inc.')
| (MQD_df_PHI.Manufacturer == 'Scheele Laboratories Phils, Inc.'),
'Manufacturer'] = 'Scheele'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'The Generics Pharmacy')
| (MQD_df_PHI.Manufacturer == 'The Generics Pharmacy Inc.')
| (MQD_df_PHI.Manufacturer == 'TGP'),
'Manufacturer'] = 'TGP'
MQD_df_PHI.loc[(MQD_df_PHI.Manufacturer == 'Wyeth Pakistan Limited')
| (MQD_df_PHI.Manufacturer == 'Wyeth Pakistan Ltd')
| (MQD_df_PHI.Manufacturer == 'Wyeth Pakistan Ltd.'),
'Manufacturer'] = 'Wyeth'
# Make smaller data frames filtered for facility type and therapeutic indication
# Filter for facility type
MQD_df_CAM_filt = MQD_df_CAM[MQD_df_CAM['Facility Type'].isin(
['Depot of Pharmacy', 'Health Clinic', 'Pharmacy', 'Pharmacy Depot', 'Private Clinic',
'Retail-drug Outlet', 'Retail drug outlet', 'Clinic'])].copy()
MQD_df_GHA_filt = MQD_df_GHA[MQD_df_GHA['Facility Type'].isin(
['Health Clinic', 'Hospital', 'Pharmacy', 'Retail Shop', 'Retail-drug Outlet'])].copy()
MQD_df_PHI_filt = MQD_df_PHI[MQD_df_PHI['Facility Type'].isin(
['Health Center', 'Health Clinic', 'Hospital', 'Hospital Pharmacy', 'Pharmacy',
'Retail-drug Outlet', 'health office'])].copy()
# Now filter by chosen drug types
MQD_df_CAM_antimalarial = MQD_df_CAM_filt[MQD_df_CAM_filt['Therapeutic Indications'].isin(['Antimalarial'])].copy()
MQD_df_GHA_antimalarial = MQD_df_GHA_filt[MQD_df_GHA_filt['Therapeutic Indications'].isin(['Antimalarial',
'Antimalarials'])].copy()
MQD_df_PHI_antituberculosis = MQD_df_PHI_filt[MQD_df_PHI_filt['Therapeutic Indications'].isin(['Anti-tuberculosis',
'Antituberculosis'])].copy()
# For each desired data set, generate lists suitable for use with logistigate
# Overall data
dataTbl_CAM = MQD_df_CAM[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_CAM = [[i[0],i[1],1] if i[2]=='Fail' else [i[0],i[1],0] for i in dataTbl_CAM]
dataTbl_GHA = MQD_df_GHA[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_GHA = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_GHA]
dataTbl_PHI = MQD_df_PHI[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_PHI = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_PHI]
# Filtered data
dataTbl_CAM_filt = MQD_df_CAM_filt[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_CAM_filt = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_CAM_filt]
dataTbl_GHA_filt = MQD_df_GHA_filt[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_GHA_filt = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_GHA_filt]
dataTbl_PHI_filt = MQD_df_PHI_filt[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_PHI_filt = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_PHI_filt]
# Therapeutics data
dataTbl_CAM_antimalarial = MQD_df_CAM_antimalarial[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_CAM_antimalarial = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_CAM_antimalarial]
dataTbl_GHA_antimalarial = MQD_df_GHA_antimalarial[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_GHA_antimalarial = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_GHA_antimalarial]
dataTbl_PHI_antituberculosis = MQD_df_PHI_antituberculosis[['Province', 'Manufacturer', 'Final Test Result']].values.tolist()
dataTbl_PHI_antituberculosis = [[i[0], i[1], 1] if i[2] == 'Fail' else [i[0], i[1], 0] for i in dataTbl_PHI_antituberculosis]
# Put the databases and lists into a dictionary
outputDict = {}
outputDict.update({'df_ALL':MQD_df,
'df_CAM':MQD_df_CAM, 'df_GHA':MQD_df_GHA, 'df_PHI':MQD_df_PHI,
'df_CAM_filt':MQD_df_CAM_filt, 'df_GHA_filt':MQD_df_GHA_filt, 'df_PHI_filt':MQD_df_PHI_filt,
'df_CAM_antimalarial':MQD_df_CAM_antimalarial, 'df_GHA_antimalarial':MQD_df_GHA_antimalarial,
'df_PHI_antituberculosis':MQD_df_PHI_antituberculosis,
'dataTbl_CAM':dataTbl_CAM, 'dataTbl_GHA':dataTbl_GHA, 'dataTbl_PHI':dataTbl_PHI,
'dataTbl_CAM_filt':dataTbl_CAM_filt, 'dataTbl_GHA_filt':dataTbl_GHA_filt,
'dataTbl_PHI_filt':dataTbl_PHI_filt, 'dataTbl_CAM_antimalarial':dataTbl_CAM_antimalarial,
'dataTbl_GHA_antimalarial':dataTbl_GHA_antimalarial,
'dataTbl_PHI_antituberculosis':dataTbl_PHI_antituberculosis})
return outputDict
def MQDdataScript():
'''Script looking at the MQD data'''
import scipy.special as sps
import numpy as np
MCMCdict = {'MCMCtype': 'NUTS', 'Madapt': 5000, 'delta': 0.4}
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, '../logistigate', 'exmples', 'data')))
# Grab processed data tables
dataDict = cleanMQD()
# Run with Country as outlets
dataTblDict = util.testresultsfiletotable('MQDfiles/MQD_TRIMMED1')
dataTblDict.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(0.038)),
'MCMCdict': MCMCdict})
logistigateDict = lg.runlogistigate(dataTblDict)
util.plotPostSamples(logistigateDict)
util.printEstimates(logistigateDict)
# Run with Country-Province as outlets
dataTblDict2 = util.testresultsfiletotable('MQDfiles/MQD_TRIMMED2.csv')
dataTblDict2.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(0.038)),
'MCMCdict': MCMCdict})
logistigateDict2 = lg.runlogistigate(dataTblDict2)
util.plotPostSamples(logistigateDict2)
util.printEstimates(logistigateDict2)
# Run with Cambodia provinces
dataTblDict_CAM = util.testresultsfiletotable(dataDict['dataTbl_CAM'], csvName=False)
countryMean = np.sum(dataTblDict_CAM['Y']) / np.sum(dataTblDict_CAM['N'])
dataTblDict_CAM.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_CAM = lg.runlogistigate(dataTblDict_CAM)
numCamImps_fourth = int(np.floor(logistigateDict_CAM['importerNum'] / 4))
util.plotPostSamples(logistigateDict_CAM, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth).tolist(),
subTitleStr=['\nCambodia - 1st Quarter', '\nCambodia'])
util.plotPostSamples(logistigateDict_CAM, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth,numCamImps_fourth*2).tolist(),
subTitleStr=['\nCambodia - 2nd Quarter', '\nCambodia'])
util.plotPostSamples(logistigateDict_CAM, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth * 2, numCamImps_fourth * 3).tolist(),
subTitleStr=['\nCambodia - 3rd Quarter', '\nCambodia'])
util.plotPostSamples(logistigateDict_CAM, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth * 3, numCamImps_fourth * 4).tolist(),
subTitleStr=['\nCambodia - 4th Quarter', '\nCambodia'])
util.printEstimates(logistigateDict_CAM)
# Plot importers subset where median sample is above 0.4
totalEntities = logistigateDict_CAM['importerNum'] + logistigateDict_CAM['outletNum']
sampMedians = [np.median(logistigateDict_CAM['postSamples'][:,i]) for i in range(totalEntities)]
highImporterInds = [i for i, x in enumerate(sampMedians[:logistigateDict_CAM['importerNum']]) if x > 0.4]
util.plotPostSamples(logistigateDict_CAM, importerIndsSubset=highImporterInds,subTitleStr=['\nCambodia - Subset','\nCambodia'])
util.printEstimates(logistigateDict_CAM, importerIndsSubset=highImporterInds)
# Run with Cambodia provinces filtered for outlet-type samples
dataTblDict_CAM_filt = util.testresultsfiletotable(dataDict['dataTbl_CAM_filt'], csvName=False)
#dataTblDict_CAM_filt = util.testresultsfiletotable('MQDfiles/MQD_CAMBODIA_FACILITYFILTER.csv')
countryMean = np.sum(dataTblDict_CAM_filt['Y']) / np.sum(dataTblDict_CAM_filt['N'])
dataTblDict_CAM_filt.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_CAM_filt = lg.runlogistigate(dataTblDict_CAM_filt)
numCamImps_fourth = int(np.floor(logistigateDict_CAM_filt['importerNum'] / 4))
util.plotPostSamples(logistigateDict_CAM_filt, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth).tolist(),
subTitleStr=['\nCambodia (filtered) - 1st Quarter', '\nCambodia (filtered)'])
util.plotPostSamples(logistigateDict_CAM_filt, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth, numCamImps_fourth * 2).tolist(),
subTitleStr=['\nCambodia (filtered) - 2nd Quarter', '\nCambodia (filtered)'])
util.plotPostSamples(logistigateDict_CAM_filt, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth * 2, numCamImps_fourth * 3).tolist(),
subTitleStr=['\nCambodia (filtered) - 3rd Quarter', '\nCambodia (filtered)'])
util.plotPostSamples(logistigateDict_CAM_filt, plotType='int90',
importerIndsSubset=np.arange(numCamImps_fourth * 3, logistigateDict_CAM_filt['importerNum']).tolist(),
subTitleStr=['\nCambodia (filtered) - 4th Quarter', '\nCambodia (filtered)'])
# Run with Cambodia provinces filtered for antibiotics
dataTblDict_CAM_antibiotic = util.testresultsfiletotable('MQDfiles/MQD_CAMBODIA_ANTIBIOTIC.csv')
countryMean = np.sum(dataTblDict_CAM_antibiotic['Y']) / np.sum(dataTblDict_CAM_antibiotic['N'])
dataTblDict_CAM_antibiotic.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_CAM_antibiotic = lg.runlogistigate(dataTblDict_CAM_antibiotic)
numCamImps_third = int(np.floor(logistigateDict_CAM_antibiotic['importerNum'] / 3))
util.plotPostSamples(logistigateDict_CAM_antibiotic, plotType='int90',
importerIndsSubset=np.arange(numCamImps_third).tolist(),
subTitleStr=['\nCambodia - 1st Third (Antibiotics)', '\nCambodia (Antibiotics)'])
util.plotPostSamples(logistigateDict_CAM_antibiotic, plotType='int90',
importerIndsSubset=np.arange(numCamImps_third, numCamImps_third * 2).tolist(),
subTitleStr=['\nCambodia - 2nd Third (Antibiotics)', '\nCambodia (Antibiotics)'])
util.plotPostSamples(logistigateDict_CAM_antibiotic, plotType='int90',
importerIndsSubset=np.arange(numCamImps_third * 2, logistigateDict_CAM_antibiotic['importerNum']).tolist(),
subTitleStr=['\nCambodia - 3rd Third (Antibiotics)', '\nCambodia (Antibiotics)'])
util.printEstimates(logistigateDict_CAM_antibiotic)
# Run with Cambodia provinces filtered for antimalarials
dataTblDict_CAM_antimalarial = util.testresultsfiletotable(dataDict['dataTbl_CAM_antimalarial'], csvName=False)
countryMean = np.sum(dataTblDict_CAM_antimalarial['Y']) / np.sum(dataTblDict_CAM_antimalarial['N'])
dataTblDict_CAM_antimalarial.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_CAM_antimalarial = lg.runlogistigate(dataTblDict_CAM_antimalarial)
#numCamImps_half = int(np.floor(logistigateDict_CAM_antimalarial['importerNum'] / 2))
#util.plotPostSamples(logistigateDict_CAM_antimalarial, plotType='int90',
# importerIndsSubset=np.arange(numCamImps_half).tolist(),
# subTitleStr=['\nCambodia - 1st Half (Antimalarials)', '\nCambodia (Antimalarials)'])
#util.plotPostSamples(logistigateDict_CAM_antimalarial, plotType='int90',
# importerIndsSubset=np.arange(numCamImps_half,
# logistigateDict_CAM_antimalarial['importerNum']).tolist(),
# subTitleStr=['\nCambodia - 2nd Half (Antimalarials)', '\nCambodia (Antimalarials)'])
# Special plotting for these data sets
numImp, numOut = logistigateDict_CAM_antimalarial['importerNum'], logistigateDict_CAM_antimalarial['outletNum']
lowerQuant, upperQuant = 0.05, 0.95
intStr = '90'
priorSamps = logistigateDict_CAM_antimalarial['prior'].expitrand(5000)
priorLower, priorUpper = np.quantile(priorSamps, lowerQuant), np.quantile(priorSamps, upperQuant)
importerIndsSubset = range(numImp)
impNames = [logistigateDict_CAM_antimalarial['importerNames'][i] for i in importerIndsSubset]
impLowers = [np.quantile(logistigateDict_CAM_antimalarial['postSamples'][:, l], lowerQuant) for l in importerIndsSubset]
impUppers = [np.quantile(logistigateDict_CAM_antimalarial['postSamples'][:, l], upperQuant) for l in importerIndsSubset]
midpoints = [impUppers[i] - (impUppers[i] - impLowers[i]) / 2 for i in range(len(impUppers))]
zippedList = zip(midpoints, impUppers, impLowers, impNames)
sorted_pairs = sorted(zippedList, reverse=True)
impNamesSorted = [tup[3] for tup in sorted_pairs]
impNamesSorted.append('')
impNamesSorted.append('(Prior)')
# Plot
import matplotlib.pyplot as plt
fig, (ax) = plt.subplots(figsize=(10, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot((impNamesSorted[-1], impNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(impNamesSorted)), impNamesSorted, rotation=90)
plt.title('Manufacturers - ' + intStr + '% Intervals' + '\nCambodia Antimalarials',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Manufacturer Name', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(9)
fig.tight_layout()
plt.show()
plt.close()
outletIndsSubset = range(numOut)
outNames = [logistigateDict_CAM_antimalarial['outletNames'][i] for i in outletIndsSubset]
outLowers = [np.quantile(logistigateDict_CAM_antimalarial['postSamples'][:, numImp + l], lowerQuant) for l in outletIndsSubset]
outUppers = [np.quantile(logistigateDict_CAM_antimalarial['postSamples'][:, numImp + l], upperQuant) for l in outletIndsSubset]
midpoints = [outUppers[i] - (outUppers[i] - outLowers[i]) / 2 for i in range(len(outUppers))]
zippedList = zip(midpoints, outUppers, outLowers, outNames)
sorted_pairs = sorted(zippedList, reverse=True)
outNamesSorted = [tup[3] for tup in sorted_pairs]
outNamesSorted.append('')
outNamesSorted.append('(Prior)')
# Plot
fig, (ax) = plt.subplots(figsize=(8, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='purple')
plt.plot((outNamesSorted[-1], outNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(outNamesSorted)), outNamesSorted, rotation=90)
plt.title('Regional Aggregates - ' + intStr + '% Intervals' + '\nCambodia Antimalarials',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Regional Aggregate', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(11)
fig.tight_layout()
plt.show()
plt.close()
util.Summarize(logistigateDict_CAM_antimalarial)
# Run with Ethiopia provinces
dataTblDict_ETH = util.testresultsfiletotable('MQDfiles/MQD_ETHIOPIA.csv')
countryMean = np.sum(dataTblDict_ETH['Y']) / np.sum(dataTblDict_ETH['N'])
dataTblDict_ETH.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_ETH = lg.runlogistigate(dataTblDict_ETH)
util.plotPostSamples(logistigateDict_ETH)
util.printEstimates(logistigateDict_ETH)
# Run with Ghana provinces
dataTblDict_GHA = util.testresultsfiletotable(dataDict['dataTbl_GHA'], csvName=False)
#dataTblDict_GHA = util.testresultsfiletotable('MQDfiles/MQD_GHANA.csv')
countryMean = np.sum(dataTblDict_GHA['Y']) / np.sum(dataTblDict_GHA['N'])
dataTblDict_GHA.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_GHA = lg.runlogistigate(dataTblDict_GHA)
util.plotPostSamples(logistigateDict_GHA, plotType='int90',
subTitleStr=['\nGhana', '\nGhana'])
util.printEstimates(logistigateDict_GHA)
# Plot importers subset where median sample is above 0.4
totalEntities = logistigateDict_GHA['importerNum'] + logistigateDict_GHA['outletNum']
sampMedians = [np.median(logistigateDict_GHA['postSamples'][:, i]) for i in range(totalEntities)]
highImporterInds = [i for i, x in enumerate(sampMedians[:logistigateDict_GHA['importerNum']]) if x > 0.4]
highOutletInds = [i for i, x in enumerate(sampMedians[logistigateDict_GHA['importerNum']:]) if x > 0.15]
util.plotPostSamples(logistigateDict_GHA, importerIndsSubset=highImporterInds,
outletIndsSubset=highOutletInds,
subTitleStr=['\nGhana - Subset', '\nGhana - Subset'])
util.printEstimates(logistigateDict_GHA, importerIndsSubset=highImporterInds,outletIndsSubset=highOutletInds)
# Run with Ghana provinces filtered for outlet-type samples
dataTblDict_GHA_filt = util.testresultsfiletotable(dataDict['dataTbl_GHA_filt'], csvName=False)
#dataTblDict_GHA_filt = util.testresultsfiletotable('MQDfiles/MQD_GHANA_FACILITYFILTER.csv')
countryMean = np.sum(dataTblDict_GHA_filt['Y']) / np.sum(dataTblDict_GHA_filt['N'])
dataTblDict_GHA_filt.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_GHA_filt = lg.runlogistigate(dataTblDict_GHA_filt)
util.plotPostSamples(logistigateDict_GHA_filt, plotType='int90',
subTitleStr=['\nGhana (filtered)', '\nGhana (filtered)'])
util.printEstimates(logistigateDict_GHA_filt)
# Run with Ghana provinces filtered for antimalarials
dataTblDict_GHA_antimalarial = util.testresultsfiletotable(dataDict['dataTbl_GHA_antimalarial'], csvName=False)
#dataTblDict_GHA_antimalarial = util.testresultsfiletotable('MQDfiles/MQD_GHANA_ANTIMALARIAL.csv')
countryMean = np.sum(dataTblDict_GHA_antimalarial['Y']) / np.sum(dataTblDict_GHA_antimalarial['N'])
dataTblDict_GHA_antimalarial.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_GHA_antimalarial = lg.runlogistigate(dataTblDict_GHA_antimalarial)
#util.plotPostSamples(logistigateDict_GHA_antimalarial, plotType='int90',
# subTitleStr=['\nGhana (Antimalarials)', '\nGhana (Antimalarials)'])
#util.printEstimates(logistigateDict_GHA_antimalarial)
# Special plotting for these data sets
numImp, numOut = logistigateDict_GHA_antimalarial['importerNum'], logistigateDict_GHA_antimalarial['outletNum']
lowerQuant, upperQuant = 0.05, 0.95
intStr = '90'
priorSamps = logistigateDict_GHA_antimalarial['prior'].expitrand(5000)
priorLower, priorUpper = np.quantile(priorSamps, lowerQuant), np.quantile(priorSamps, upperQuant)
importerIndsSubset = range(numImp)
impNames = [logistigateDict_GHA_antimalarial['importerNames'][i] for i in importerIndsSubset]
impLowers = [np.quantile(logistigateDict_GHA_antimalarial['postSamples'][:, l], lowerQuant) for l in
importerIndsSubset]
impUppers = [np.quantile(logistigateDict_GHA_antimalarial['postSamples'][:, l], upperQuant) for l in
importerIndsSubset]
midpoints = [impUppers[i] - (impUppers[i] - impLowers[i]) / 2 for i in range(len(impUppers))]
zippedList = zip(midpoints, impUppers, impLowers, impNames)
sorted_pairs = sorted(zippedList, reverse=True)
impNamesSorted = [tup[3] for tup in sorted_pairs]
impNamesSorted.append('')
impNamesSorted.append('(Prior)')
# Plot
import matplotlib.pyplot as plt
fig, (ax) = plt.subplots(figsize=(10, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot((impNamesSorted[-1], impNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(impNamesSorted)), impNamesSorted, rotation=90)
plt.title('Manufacturers - ' + intStr + '% Intervals' + '\nGhana Antimalarials',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Manufacturer Name', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(9)
fig.tight_layout()
plt.show()
plt.close()
outletIndsSubset = range(numOut)
outNames = [logistigateDict_GHA_antimalarial['outletNames'][i][6:] for i in outletIndsSubset]
outNames[7] = 'Western'
outLowers = [np.quantile(logistigateDict_GHA_antimalarial['postSamples'][:, numImp + l], lowerQuant) for l in
outletIndsSubset]
outUppers = [np.quantile(logistigateDict_GHA_antimalarial['postSamples'][:, numImp + l], upperQuant) for l in
outletIndsSubset]
midpoints = [outUppers[i] - (outUppers[i] - outLowers[i]) / 2 for i in range(len(outUppers))]
zippedList = zip(midpoints, outUppers, outLowers, outNames)
sorted_pairs = sorted(zippedList, reverse=True)
outNamesSorted = [tup[3] for tup in sorted_pairs]
outNamesSorted.append('')
outNamesSorted.append('(Prior)')
# Plot
fig, (ax) = plt.subplots(figsize=(8, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='purple')
plt.plot((outNamesSorted[-1], outNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(outNamesSorted)), outNamesSorted, rotation=90)
plt.title('Regional Aggregates - ' + intStr + '% Intervals' + '\nGhana Antimalarials',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Regional Aggregate', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(11)
fig.tight_layout()
plt.show()
plt.close()
util.Summarize(logistigateDict_GHA_antimalarial)
# Run with Kenya provinces
dataTblDict_KEN = util.testresultsfiletotable('MQDfiles/MQD_KENYA.csv')
countryMean = np.sum(dataTblDict_KEN['Y']) / np.sum(dataTblDict_KEN['N'])
dataTblDict_KEN.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_KEN = lg.runlogistigate(dataTblDict_KEN)
util.plotPostSamples(logistigateDict_KEN)
util.printEstimates(logistigateDict_KEN)
# Run with Laos provinces
dataTblDict_LAO = util.testresultsfiletotable('MQDfiles/MQD_LAOS.csv')
countryMean = np.sum(dataTblDict_LAO['Y']) / np.sum(dataTblDict_LAO['N'])
dataTblDict_LAO.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_LAO = lg.runlogistigate(dataTblDict_LAO)
util.plotPostSamples(logistigateDict_LAO)
util.printEstimates(logistigateDict_LAO)
# Run with Mozambique provinces
dataTblDict_MOZ = util.testresultsfiletotable('MQDfiles/MQD_MOZAMBIQUE.csv')
countryMean = np.sum(dataTblDict_MOZ['Y']) / np.sum(dataTblDict_MOZ['N'])
dataTblDict_MOZ.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_MOZ = lg.runlogistigate(dataTblDict_MOZ)
util.plotPostSamples(logistigateDict_MOZ)
util.printEstimates(logistigateDict_MOZ)
# Run with Nigeria provinces
dataTblDict_NIG = util.testresultsfiletotable('MQDfiles/MQD_NIGERIA.csv')
countryMean = np.sum(dataTblDict_NIG['Y']) / np.sum(dataTblDict_NIG['N'])
dataTblDict_NIG.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_NIG = lg.runlogistigate(dataTblDict_NIG)
util.plotPostSamples(logistigateDict_NIG)
util.printEstimates(logistigateDict_NIG)
# Run with Peru provinces
dataTblDict_PER = util.testresultsfiletotable('MQDfiles/MQD_PERU.csv')
countryMean = np.sum(dataTblDict_PER['Y']) / np.sum(dataTblDict_PER['N'])
dataTblDict_PER.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_PER = lg.runlogistigate(dataTblDict_PER)
numPeruImps_half = int(np.floor(logistigateDict_PER['importerNum']/2))
util.plotPostSamples(logistigateDict_PER, plotType='int90',
importerIndsSubset=np.arange(0,numPeruImps_half).tolist(), subTitleStr=['\nPeru - 1st Half', '\nPeru'])
util.plotPostSamples(logistigateDict_PER, plotType='int90',
importerIndsSubset=np.arange(numPeruImps_half,logistigateDict_PER['importerNum']).tolist(),
subTitleStr=['\nPeru - 2nd Half', '\nPeru'])
util.printEstimates(logistigateDict_PER)
# Plot importers subset where median sample is above 0.4
totalEntities = logistigateDict_PER['importerNum'] + logistigateDict_PER['outletNum']
sampMedians = [np.median(logistigateDict_PER['postSamples'][:, i]) for i in range(totalEntities)]
highImporterInds = [i for i, x in enumerate(sampMedians[:logistigateDict_PER['importerNum']]) if x > 0.4]
highImporterInds = [highImporterInds[i] for i in [3,6,7,8,9,12,13,16]] # Only manufacturers with more than 1 sample
highOutletInds = [i for i, x in enumerate(sampMedians[logistigateDict_PER['importerNum']:]) if x > 0.12]
util.plotPostSamples(logistigateDict_PER, importerIndsSubset=highImporterInds,
outletIndsSubset=highOutletInds,
subTitleStr=['\nPeru - Subset', '\nPeru - Subset'])
util.printEstimates(logistigateDict_PER, importerIndsSubset=highImporterInds, outletIndsSubset=highOutletInds)
# Run with Peru provinces filtered for outlet-type samples
dataTblDict_PER_filt = util.testresultsfiletotable('MQDfiles/MQD_PERU_FACILITYFILTER.csv')
countryMean = np.sum(dataTblDict_PER_filt['Y']) / np.sum(dataTblDict_PER_filt['N'])
dataTblDict_PER_filt.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_PER_filt = lg.runlogistigate(dataTblDict_PER_filt)
numPeruImps_half = int(np.floor(logistigateDict_PER_filt['importerNum'] / 2))
util.plotPostSamples(logistigateDict_PER_filt, plotType='int90',
importerIndsSubset=np.arange(0, numPeruImps_half).tolist(),
subTitleStr=['\nPeru - 1st Half (filtered)', '\nPeru (filtered)'])
util.plotPostSamples(logistigateDict_PER_filt, plotType='int90',
importerIndsSubset=np.arange(numPeruImps_half, logistigateDict_PER_filt['importerNum']).tolist(),
subTitleStr=['\nPeru - 2nd Half (filtered)', '\nPeru (filtered)'])
util.printEstimates(logistigateDict_PER_filt)
# Run with Peru provinces filtered for antibiotics
dataTblDict_PER_antibiotics = util.testresultsfiletotable('MQDfiles/MQD_PERU_ANTIBIOTIC.csv')
countryMean = np.sum(dataTblDict_PER_antibiotics['Y']) / np.sum(dataTblDict_PER_antibiotics['N'])
dataTblDict_PER_antibiotics.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_PER_antibiotics = lg.runlogistigate(dataTblDict_PER_antibiotics)
numPeruImps_half = int(np.floor(logistigateDict_PER_antibiotics['importerNum'] / 2))
util.plotPostSamples(logistigateDict_PER_antibiotics, plotType='int90',
importerIndsSubset=np.arange(numPeruImps_half).tolist(),
subTitleStr=['\nPeru - 1st Half (Antibiotics)', '\nPeru (Antibiotics)'])
util.plotPostSamples(logistigateDict_PER_antibiotics, plotType='int90',
importerIndsSubset=np.arange(numPeruImps_half, logistigateDict_PER_antibiotics['importerNum']).tolist(),
subTitleStr=['\nPeru - 2nd Half (Antibiotics)', '\nPeru (Antibiotics)'])
util.printEstimates(logistigateDict_PER_antibiotics)
# Run with Philippines provinces
dataTblDict_PHI = util.testresultsfiletotable(dataDict['dataTbl_PHI'], csvName=False)
#dataTblDict_PHI = util.testresultsfiletotable('MQDfiles/MQD_PHILIPPINES.csv')
countryMean = np.sum(dataTblDict_PHI['Y']) / np.sum(dataTblDict_PHI['N'])
dataTblDict_PHI.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_PHI = lg.runlogistigate(dataTblDict_PHI)
util.plotPostSamples(logistigateDict_PHI,plotType='int90',subTitleStr=['\nPhilippines','\nPhilippines'])
util.printEstimates(logistigateDict_PHI)
# Plot importers subset where median sample is above 0.1
totalEntities = logistigateDict_PHI['importerNum'] + logistigateDict_PHI['outletNum']
sampMedians = [np.median(logistigateDict_PHI['postSamples'][:, i]) for i in range(totalEntities)]
highImporterInds = [i for i, x in enumerate(sampMedians[:logistigateDict_PHI['importerNum']]) if x > 0.1]
#highImporterInds = [highImporterInds[i] for i in
# [3, 6, 7, 8, 9, 12, 13, 16]] # Only manufacturers with more than 1 sample
highOutletInds = [i for i, x in enumerate(sampMedians[logistigateDict_PHI['importerNum']:]) if x > 0.1]
#util.plotPostSamples(logistigateDict_PHI, importerIndsSubset=highImporterInds,
# outletIndsSubset=highOutletInds,
# subTitleStr=['\nPhilippines - Subset', '\nPhilippines - Subset'])
# Special plotting for these data sets
numImp, numOut = logistigateDict_PHI['importerNum'], logistigateDict_PHI['outletNum']
lowerQuant, upperQuant = 0.05, 0.95
intStr = '90'
priorSamps = logistigateDict_PHI['prior'].expitrand(5000)
priorLower, priorUpper = np.quantile(priorSamps, lowerQuant), np.quantile(priorSamps, upperQuant)
importerIndsSubset = range(numImp)
impNames = [logistigateDict_PHI['importerNames'][i] for i in importerIndsSubset]
impLowers = [np.quantile(logistigateDict_PHI['postSamples'][:, l], lowerQuant) for l in
importerIndsSubset]
impUppers = [np.quantile(logistigateDict_PHI['postSamples'][:, l], upperQuant) for l in
importerIndsSubset]
midpoints = [impUppers[i] - (impUppers[i] - impLowers[i]) / 2 for i in range(len(impUppers))]
zippedList = zip(midpoints, impUppers, impLowers, impNames)
sorted_pairs = sorted(zippedList, reverse=True)
impNamesSorted = [tup[3] for tup in sorted_pairs]
impNamesSorted.append('')
impNamesSorted.append('(Prior)')
# Plot
import matplotlib.pyplot as plt
fig, (ax) = plt.subplots(figsize=(10, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='red')
plt.plot((impNamesSorted[-1], impNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(impNamesSorted)), impNamesSorted, rotation=90)
plt.title('Manufacturers - ' + intStr + '% Intervals' + '\nPhilippines Anti-tuberculosis Medicines',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Manufacturer Name', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(9)
fig.tight_layout()
plt.show()
plt.close()
outletIndsSubset = range(numOut)
outNames = [logistigateDict_PHI['outletNames'][i] for i in outletIndsSubset]
outLowers = [np.quantile(logistigateDict_PHI['postSamples'][:, numImp + l], lowerQuant) for l in
outletIndsSubset]
outUppers = [np.quantile(logistigateDict_PHI['postSamples'][:, numImp + l], upperQuant) for l in
outletIndsSubset]
midpoints = [outUppers[i] - (outUppers[i] - outLowers[i]) / 2 for i in range(len(outUppers))]
zippedList = zip(midpoints, outUppers, outLowers, outNames)
sorted_pairs = sorted(zippedList, reverse=True)
outNamesSorted = [tup[3] for tup in sorted_pairs]
outNamesSorted.append('')
outNamesSorted.append('(Prior)')
# Plot
fig, (ax) = plt.subplots(figsize=(8, 10), ncols=1)
sorted_pairs.append((np.nan, np.nan, np.nan, ' ')) # for spacing
for _, upper, lower, name in sorted_pairs:
plt.plot((name, name), (lower, upper), 'o-', color='purple')
plt.plot((outNamesSorted[-1], outNamesSorted[-1]), (priorLower, priorUpper), 'o--', color='gray')
plt.ylim([0, 1])
plt.xticks(range(len(outNamesSorted)), outNamesSorted, rotation=90)
plt.title('Regional Aggregates - ' + intStr + '% Intervals' + '\nPhilippines Anti-tuberculosis Medicines',
fontdict={'fontsize': 18, 'fontname': 'Trebuchet MS'})
plt.xlabel('Regional Aggregate', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
plt.ylabel('Interval value', fontdict={'fontsize': 14, 'fontname': 'Trebuchet MS'})
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Times New Roman')
label.set_fontsize(11)
fig.tight_layout()
plt.show()
plt.close()
util.Summarize(logistigateDict_PHI)
util.printEstimates(logistigateDict_PHI, importerIndsSubset=highImporterInds, outletIndsSubset=highOutletInds)
# Run with Philippines provinces filtered for outlet-type samples
dataTblDict_PHI_filt = util.testresultsfiletotable('MQDfiles/MQD_PHILIPPINES_FACILITYFILTER.csv')
countryMean = np.sum(dataTblDict_PHI_filt['Y']) / np.sum(dataTblDict_PHI_filt['N'])
dataTblDict_PHI_filt.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 1000,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_PHI_filt = lg.runlogistigate(dataTblDict_PHI_filt)
util.plotPostSamples(logistigateDict_PHI_filt, plotType='int90', subTitleStr=['\nPhilippines (filtered)', '\nPhilippines (filtered)'])
util.printEstimates(logistigateDict_PHI_filt)
# Run with Thailand provinces
dataTblDict_THA = util.testresultsfiletotable('MQDfiles/MQD_THAILAND.csv')
countryMean = np.sum(dataTblDict_THA['Y']) / np.sum(dataTblDict_THA['N'])
dataTblDict_THA.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_THA = lg.runlogistigate(dataTblDict_THA)
util.plotPostSamples(logistigateDict_THA)
util.printEstimates(logistigateDict_THA)
# Run with Viet Nam provinces
dataTblDict_VIE = util.testresultsfiletotable('MQDfiles/MQD_VIETNAM.csv')
countryMean = np.sum(dataTblDict_VIE['Y']) / np.sum(dataTblDict_VIE['N'])
dataTblDict_VIE.update({'diagSens': 1.0,
'diagSpec': 1.0,
'numPostSamples': 500,
'prior': methods.prior_normal(mu=sps.logit(countryMean)),
'MCMCdict': MCMCdict})
logistigateDict_VIE = lg.runlogistigate(dataTblDict_VIE)
util.plotPostSamples(logistigateDict_VIE)
util.printEstimates(logistigateDict_VIE)
return
| [
2,
5521,
14145,
329,
262,
705,
24396,
82,
6,
2393,
407,
852,
1498,
284,
17276,
262,
705,
76,
11215,
6359,
321,
489,
364,
6,
9483,
329,
33332,
198,
11748,
25064,
198,
11748,
28686,
198,
6173,
46023,
62,
34720,
796,
28686,
13,
6978,
1... | 2.084768 | 29,457 |
"""Feature extraction code for the VerBIO project
"""
import pandas as pd
import numpy as np
from scipy import stats
import opensmile
from scipy.io import wavfile
import preprocessing
import neurokit2 as nk
import scipy
import math
def get_df_gradient(df, feature_keys):
"""Given a list of keys for a dataframe, takes the gradient of those features and adds it to a new
column with '_grad' appended to the original key name.
Parameters
----------
df : Pandas dataframe
Dataframe that has columns in feature_keys
feature_keys : list[str]
Keys in the dataframe we want to take the gradient of
Returns
-------
df : Pandas dataframe
Modified Dataframe with new gradient keys
grad_keys : list[str]
New keys added with '_grad' appended to it
"""
grad_keys = []
for key in feature_keys:
new_key = key+'_grad'
df[new_key] = np.gradient(df[key].to_numpy, axis=0, dtype='float64')
grad_keys.append(new_key)
return df, grad_keys
def format_extracted_features(df, target_keys=[], time_key='', repair_fns={}, shift_fn=None, lookback_fn=None, sampling_fn=None):
"""Summary
Parameters
----------
df : Pandas dataframe
Dataframe that holds our features, does NOT contain the outcome (i.e., only 'X', not 'y')
target_keys : list[str], optional
Keep only 'target_keys' and drop the rest. If empty (or not specified), then keep all columns
time_key : str, optional
If there is a time key in the dataframe that needs to be dropped, then specify it. Otherwise
we assume there is no time key in the dataframe
repair_fns : list, optional
A dictionary of lambda functions, where the key to the function is the key
in the dataframe that we repair. By default, every key is eventually repaired
with interpolation
shift_fn : None, optional
An optional lambda function to shift the data back or forward in time
sampling_fn : None, optional
An optional lambda function to upsample or downsample the data
Returns
-------
df : Pandas dataframe
The prepared dataframe for training
"""
if len(target_keys) > 0:
kept_keys = set()
kept_keys.update(target_keys)
if time_key != '': kept_keys.add(time_key)
for key in df.columns:
if key not in kept_keys: df.drop(columns=key, inplace=True)
if len(repair_fns) > 0:
for key in repair_fns.keys():
df[key] = repair_fns[key](df[key])
# Regardless of repair functions, every column needs to be repaired just in case
df = preprocessing.repair_dataframe(df, 'inter')
# Shift, remove time key, then resample (this is correct, see on paper)
# TODO: Support multiple shift functions
if shift_fn != None: df = shift_fn(df)
if time_key != None and time_key in df.columns: df = df.drop(columns=time_key)
# Lookback happens here
if lookback_fn != None: df = lookback_fn(df)
# TODO: Support multiple sampling functions
if sampling_fn != None: df = sampling_fn(df)
return df
def format_annotation(df, window_size=1, stride=1, window_fn=lambda x: np.mean(x, axis=0), threshold=None, time_key='', target_keys=[]):
"""Prepare the annotation features to be used for training.
Parameters
----------
df : Pandas dataframe
Dataframe containing annotations of anxiety levels
window_size : float
Length of the window in seconds to apply to the annotations
stride : float
Stride of the window in seconds to apply to the annotations
window_fn : function, optional
Optional window function to be apply to the annotations. Default to mean
threshold : int, optional
Threshold to binarize the data. If annotation < threshold, 0, otherwise 1
time_key : str, optional
If there is a time key in the dataframe that needs to be dropped, then specify it. Otherwise
we assume there is no time key in the dataframe
target_keys : list, optional
Keep only 'target_keys' and drop the rest. If empty (or not specified), then keep all columns
Returns
-------
df : Pandas dataframe
The prepared dataframe for training
"""
# TODO: Allow to combine annotators
if target_keys != None:
kept_keys = set()
kept_keys.update(target_keys)
if time_key != None:
kept_keys.add(time_key)
for key in df.columns:
if key not in kept_keys: df.drop(columns=key, inplace=True)
df = preprocessing.repair_dataframe(df, 'inter')
df = preprocessing.window_dataframe(df, time_key, window_size, stride, window_fn)
if threshold != None: df = preprocessing.binarize_dataframe(df, threshold, target_keys)
if time_key != '' and time_key in df.columns: df = df.drop(columns=time_key)
return df
def get_audio_features(signal, sr, frame_length, frame_skip, feature_set='eGeMAPSv02', feature_level='LLDs'):
"""Extract ComParE16 features using the OpenSMILE toolkit
Parameters
----------
signal : ndarray
Array of signal data from audio file
sr : int
Sampling rate of audio
frame_length : float
Time in seconds of window during extraction
frame_skip : float
Stride in seconds of window during windowing
times : ndarray, optional
Used to make this broadcastable (unused since times are inferred)
time_key : str, optional
Optional time key to include for a time axis in the new dataframe.
Default to 'Time (s)'. The time is assumed to start at 0 and
is inferred from the sampling rate
Returns
-------
df : Pandas dataframe
Dataframe with the ComParE16 features with a time axis specified by time_key
"""
# Times are inferred!
n_samples = signal.shape[0]
# Frame length and frame skip in samples
samples_per_frame = int(sr*frame_length)
samples_per_skip = int(sr*frame_skip)
# For functionals: OpenSMILE does the windowing for you
# For LLD's: OpenSMILE does NOT window for you. It does leave windows, but those are just from the extractor
if feature_set == 'eGeMAPSv02': feature_set_param = opensmile.FeatureSet.eGeMAPSv02
elif feature_set == 'ComParE16': feature_set_param = opensmile.FeatureSet.ComParE_2016
else: raise ValueError(f'Unrecognized feature_set {feature_set}')
if feature_level == 'LLDs': feature_level_param = opensmile.FeatureLevel.LowLevelDescriptors
elif feature_level == 'Functionals': feature_level_param = opensmile.FeatureLevel.Functionals
else: raise ValueError(f'Unrecognized feature_level {feature_level}')
smile = opensmile.Smile(feature_set=feature_set_param, feature_level=feature_level_param)
windowed_dfs = preprocessing.window_array(
signal,
samples_per_frame,
samples_per_skip,
lambda x: smile.process_signal(x, sr),
)
if feature_level == 'LLDs':
# Since OpenSmile doesn't window for us, we just do it here by taking the mean
for i, df in enumerate(windowed_dfs):
df = df.reset_index(drop=True).astype('float64')
windowed_dfs[i] = df.mean(axis=0).to_frame().T
n_windows = len(windowed_dfs) # sketchy...
start_times = np.arange(0.0, (frame_skip*n_windows), frame_skip)
end_times = np.arange(frame_length, (frame_skip*n_windows)+frame_length, frame_skip)
df = pd.concat(windowed_dfs, axis=0)
df['t0'] = start_times
df['tn'] = end_times
# Just to be safe..
df = df.sort_values(by=['t0']).reset_index(drop=True)
return df
def get_EDA_features(signal, sr, frame_length, frame_skip, times):
"""Summary
Parameters
----------
signal : ndarray
Array of EDA data
sr : int
Sampling rate of EDA data
times : ndarray
Timestamps of each EDA sample TODO: Allow this to be inferred from sr
frame_length : float
Windowing length for data in seconds
frame_skip : float
Window stride for data in seconds
time_key : str, optional
Optional time key to include for a time axis in the new dataframe.
Default to 'Time (s)'. The time is assumed to start at 0 and
is inferred from the sampling rate
Returns
-------
df : Pandas dataframe
Windowed EDA features
"""
# TODO: Not sure if we should window the samples, then extract
# or extract, then window samples. My guess is it doesn't matter!
order = 4
w0 = 1.5 # Cutoff frequency for Butterworth (should I remove?)
w0 = 2 * np.array(w0) / sr
signal = nk.signal_sanitize(signal)
b, a = scipy.signal.butter(N=order, Wn=w0, btype='lowpass', analog=False, output='ba')
filtered = scipy.signal.filtfilt(b, a, signal)
signal_clean = nk.signal_smooth(filtered, method='convolution', kernel='blackman', size=48)
signal_decomp = nk.eda_phasic(signal_clean, sampling_rate=sr)
signal_peak, info = nk.eda_peaks(
signal_decomp['EDA_Phasic'].values,
sampling_rate=sr,
method='biosppy',
amplitude_min=0.1
)
# Only window nonzero amplitudes
df = pd.DataFrame({
'SCL': preprocessing.window_timed_array(times, signal_decomp['EDA_Tonic'].to_numpy(), frame_length, frame_skip),
'SCR_Amplitude': preprocessing.window_timed_array(times, signal_peak['SCR_Amplitude'].to_numpy(), frame_length, frame_skip, lambda x: np.mean(x[np.nonzero(x)]) if len(np.nonzero(x)[0]) > 0 else 0),
'SCR_Onsets': preprocessing.window_timed_array(times, signal_peak['SCR_Onsets'].to_numpy(), frame_length, frame_skip, lambda x: np.sum(x)),
'SCR_Peaks': preprocessing.window_timed_array(times, signal_peak['SCR_Peaks'].to_numpy(), frame_length, frame_skip, lambda x: np.sum(x)),
}) # Meh, recoverytime isn't really useful
start_times = np.arange(0.0, (frame_skip*(len(df.index))), frame_skip)
end_times = np.arange(frame_length, (frame_skip*(len(df.index)))+frame_length, frame_skip)
df['t0'] = start_times
df['tn'] = end_times
# Just to be safe..
df = df.sort_values(by=['t0']).reset_index(drop=True)
return df
def get_HRV_features(signal, sr, frame_length, frame_skip, times):
"""Extract HRV time-series features using BVP (PPG) or ECG data.
Extraction is done in a similar way as ComParE16.
# TODO: We could also just use IBI instead of finding peaks?
Parameters
----------
signal : ndarray
Array of BVP (PPG) or ECG data
sr : int
Sampling rate of BVP or ECG data
times : ndarray
Timestamps of each BVP/ECG sample TODO: Allow this to be inferred from sr
frame_length : float
Windowing length for data in seconds
frame_skip : float
Window stride for data in seconds
time_key : str, optional
Optional time key to include for a time axis in the new dataframe.
Default to 'Time (s)'. The time is assumed to start at 0 and
is inferred from the sampling rate
"""
# Unfortunately, we can't get good enough time series data unless
# BVP is at least 4 seconds in duration
assert frame_length >= 4.0 or math.isclose(frame_length, 4.0)
time_slices = preprocessing.get_window_slices(times, frame_length, frame_skip)
n_slices = len(time_slices)
feature_dfs = [None for _ in range(n_slices)]
for i in range(n_slices):
frame = signal[time_slices[i][0]:time_slices[i][1]+1]
frame_clean = nk.ppg_clean(frame, sampling_rate=sr)
info = nk.ppg_findpeaks(frame_clean, sampling_rate=sr)
if frame_length >= 30.0 or math.isclose(frame_length, 30.0): # Minimum required window for accurate freq + nonlinear features
feature_df = nk.hrv(info['PPG_Peaks'], sampling_rate=sr)
else:
feature_df = nk.hrv_time(info['PPG_Peaks'], sampling_rate=sr)
feature_df['t0'] = [i*frame_skip]
feature_df['tn'] = [(i*frame_skip)+frame_length]
feature_dfs[i] = feature_df
df = pd.concat(feature_dfs, axis=0)
df = df.sort_values(by=['t0']).reset_index(drop=True)
return df
| [
37811,
38816,
22236,
2438,
329,
262,
4643,
3483,
46,
1628,
198,
37811,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
220,
198,
6738,
629,
541,
88,
1330,
9756,
198,
11748,
9808,
18085,
198,
6738,
629,
541,
8... | 2.825633 | 4,026 |
scores = {"scores": [{"group": "I", "score": 7.0}, {"group": "II", "score": 5.0}, {"group": "III", "score": 2.0}, {"group": "IV", "score": 8.0}, {"group": "V", "score": 9.0}, {"group": "VI", "score": 16.0}, {"group": "VII", "score": 5.0}, {"group": "VIII", "score": 11.0}, {"group": "other", "score": 10.0}, {"group": "a", "score": 14.0}, {"group": "b", "score": 16.0}, {"group": "c", "score": 43.0}, {"group": "total", "score": 73.0}], "test": {"id": 9, "items": [{"id": 356, "number": "1", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 357, "number": "2", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 358, "number": "3", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 359, "number": "4", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 360, "number": "5", "description": "", "score": "0", "group": "II", "groups": ["II"]}, {"id": 361, "number": "6", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 362, "number": "7", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 363, "number": "8", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 364, "number": "9", "description": "", "score": "2", "group": "V", "groups": ["V"]}, {"id": 365, "number": "10", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 366, "number": "11", "description": "", "score": "1", "group": "IV", "groups": ["IV"]}, {"id": 367, "number": "12", "description": "", "score": "0", "group": "IV", "groups": ["IV"]}, {"id": 368, "number": "13", "description": "", "score": "1", "group": "VI", "groups": ["VI"]}, {"id": 369, "number": "14", "description": "", "score": "1", "group": "I", "groups": ["I"]}, {"id": 370, "number": "15", "description": "", "score": "1", "group": "other", "groups": ["other"]}, {"id": 371, "number": "16", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 372, "number": "17", "description": "", "score": "0", "group": "VI", "groups": ["VI"]}, {"id": 373, "number": "18", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 374, "number": "19", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 375, "number": "20", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 376, "number": "21", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 377, "number": "22", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 378, "number": "23", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 379, "number": "24", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 380, "number": "25", "description": "", "score": "0", "group": "IV", "groups": ["IV"]}, {"id": 381, "number": "26", "description": "", "score": "1", "group": "VII", "groups": ["VII"]}, {"id": 382, "number": "27", "description": "", "score": "1", "group": "IV", "groups": ["IV"]}, {"id": 383, "number": "28", "description": "", "score": "2", "group": "VII", "groups": ["VII"]}, {"id": 384, "number": "29", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 385, "number": "30", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 386, "number": "31", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 387, "number": "32", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 388, "number": "33", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 389, "number": "34", "description": "", "score": "0", "group": "IV", "groups": ["IV"]}, {"id": 390, "number": "35", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 391, "number": "36", "description": "", "score": "1", "group": "IV", "groups": ["IV"]}, {"id": 392, "number": "37", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 393, "number": "38", "description": "", "score": "0", "group": "IV", "groups": ["IV"]}, {"id": 394, "number": "39", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 395, "number": "40", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 396, "number": "41", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 397, "number": "42", "description": "", "score": "0", "group": "II", "groups": ["II"]}, {"id": 398, "number": "43", "description": "", "score": "1", "group": "VII", "groups": ["VII"]}, {"id": 399, "number": "44", "description": "", "score": "2", "group": "other", "groups": ["other"]}, {"id": 400, "number": "45", "description": "", "score": "2", "group": "I", "groups": ["I"]}, {"id": 401, "number": "46", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 402, "number": "47", "description": "", "score": "1", "group": "III", "groups": ["III"]}, {"id": 403, "number": "48", "description": "", "score": "0", "group": "IV", "groups": ["IV"]}, {"id": 404, "number": "49", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 405, "number": "50", "description": "", "score": "2", "group": "I", "groups": ["I"]}, {"id": 406, "number": "51", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 407, "number": "52", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 408, "number": "53", "description": "", "score": "2", "group": "other", "groups": ["other"]}, {"id": 409, "number": "54", "description": "", "score": "1", "group": "III", "groups": ["III"]}, {"id": 410, "number": "55", "description": "", "score": "2", "group": "other", "groups": ["other"]}, {"id": 411, "number": "56a", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 412, "number": "56b", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 413, "number": "56c", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 414, "number": "56d", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 415, "number": "56e", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 416, "number": "56f", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 417, "number": "56g", "description": "", "score": "0", "group": "III", "groups": ["III"]}, {"id": 418, "number": "56h", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 419, "number": "57", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 420, "number": "58", "description": "", "score": "1", "group": "V", "groups": ["V"]}, {"id": 421, "number": "59", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 422, "number": "60", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 423, "number": "61", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 424, "number": "62", "description": "", "score": "2", "group": "IV", "groups": ["IV"]}, {"id": 425, "number": "63", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 426, "number": "64", "description": "", "score": "2", "group": "IV", "groups": ["IV"]}, {"id": 427, "number": "65", "description": "", "score": "1", "group": "II", "groups": ["II"]}, {"id": 428, "number": "66", "description": "", "score": "1", "group": "V", "groups": ["V"]}, {"id": 429, "number": "67", "description": "", "score": "1", "group": "VII", "groups": ["VII"]}, {"id": 430, "number": "68", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 431, "number": "69", "description": "", "score": "1", "group": "II", "groups": ["II"]}, {"id": 432, "number": "70", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 433, "number": "71", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 434, "number": "72", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 435, "number": "73", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 436, "number": "74", "description": "", "score": "1", "group": "other", "groups": ["other"]}, {"id": 437, "number": "75", "description": "", "score": "1", "group": "II", "groups": ["II"]}, {"id": 438, "number": "76", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 439, "number": "77", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 440, "number": "78", "description": "", "score": "2", "group": "VI", "groups": ["VI"]}, {"id": 441, "number": "79", "description": "", "score": "1", "group": "IV", "groups": ["IV"]}, {"id": 442, "number": "80", "description": "", "score": "1", "group": "VI", "groups": ["VI"]}, {"id": 443, "number": "81", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 444, "number": "82", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 445, "number": "83", "description": "", "score": "0", "group": "V", "groups": ["V"]}, {"id": 446, "number": "84", "description": "", "score": "1", "group": "V", "groups": ["V"]}, {"id": 447, "number": "85", "description": "", "score": "1", "group": "V", "groups": ["V"]}, {"id": 448, "number": "86", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 449, "number": "87", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 450, "number": "88", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 451, "number": "89", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 452, "number": "90", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 453, "number": "91", "description": "", "score": "0", "group": "I", "groups": ["I"]}, {"id": 454, "number": "92", "description": "", "score": "1", "group": "V", "groups": ["V"]}, {"id": 455, "number": "93", "description": "", "score": "1", "group": "other", "groups": ["other"]}, {"id": 456, "number": "94", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 457, "number": "95", "description": "", "score": "0", "group": "VIII", "groups": ["VIII"]}, {"id": 458, "number": "96", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 459, "number": "97", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 460, "number": "98", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 461, "number": "99", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 462, "number": "100", "description": "", "score": "2", "group": "V", "groups": ["V"]}, {"id": 463, "number": "101", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 464, "number": "102", "description": "", "score": "1", "group": "II", "groups": ["II"]}, {"id": 465, "number": "103", "description": "", "score": "1", "group": "II", "groups": ["II"]}, {"id": 466, "number": "104", "description": "", "score": "1", "group": "VIII", "groups": ["VIII"]}, {"id": 467, "number": "105", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 468, "number": "106", "description": "", "score": "0", "group": "VII", "groups": ["VII"]}, {"id": 469, "number": "107", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 470, "number": "108", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 471, "number": "109", "description": "", "score": "1", "group": "other", "groups": ["other"]}, {"id": 472, "number": "110", "description": "", "score": "0", "group": "other", "groups": ["other"]}, {"id": 473, "number": "111", "description": "", "score": "0", "group": "II", "groups": ["II"]}, {"id": 474, "number": "112", "description": "", "score": "2", "group": "I", "groups": ["I"]}, {"id": 475, "number": "113", "description": "", "score": "0", "group": "other", "groups": ["other"]}], "created_at": "2018-08-25T01:09:48.166593Z", "updated_at": "2018-08-25T01:09:48.166648Z", "client_number": "1", "test_type": "cbcl_6_18", "owner": 3}} | [
1416,
2850,
796,
19779,
1416,
2850,
1298,
685,
4895,
8094,
1298,
366,
40,
1600,
366,
26675,
1298,
767,
13,
15,
5512,
19779,
8094,
1298,
366,
3978,
1600,
366,
26675,
1298,
642,
13,
15,
5512,
19779,
8094,
1298,
366,
10855,
1600,
366,
26... | 2.687294 | 4,541 |
"""
Helper for easily doing async tasks with coroutines.
It's mostly syntactic sugar that removes the need for .then and .andThen.
Simply:
- make a generator function that yields futures (e.g. from qi.async)
- add the decorator async_generator
For example:
@stk.coroutines.async_generator
def run_test(self):
yield ALTextToSpeech.say("ready", _async=True)
yield ALTextToSpeech.say("steady", _async=True)
time.sleep(1)
yield ALTextToSpeech.say("go", _async=True)
... this will turn run_test into a function that returns a future that is
valid when the call is done - and that is still cancelable (your robot will
start speaking).
As your function now returns a future, it can be used in "yield run_test()" in
another function wrapped with this decorator.
"""
__version__ = "0.1.2"
__copyright__ = "Copyright 2017, Aldebaran Robotics / Softbank Robotics Europe"
__author__ = 'ekroeger'
__email__ = 'ekroeger@softbankrobotics.com'
import functools
import time
import threading
import qi
class _MultiFuture(object):
"""Internal helper for handling lists of futures.
The callback will only be called once, with either an exception or a
list of the right type and size.
"""
def __handle_part_done(self, index, future):
"Internal callback for when a sub-function is done."
if self.failed:
# We already raised an exception, don't do anything else.
return
assert self.expecting, "Got more callbacks than expected!"
try:
self.values[index] = future.value()
except Exception as exception:
self.failed = True
self.callback(exception=exception)
return
self.expecting -= 1
if not self.expecting:
# We have all the values
self.callback(self.returntype(self.values))
class FutureWrapper(object):
"Abstract base class for objects that pretend to be a future."
def then(self, callback):
"""Add function to be called when the future is done; returns a future.
The callback will be called with a (finished) future.
"""
if self.running: # We might want a mutex here...
return self.future.then(callback)
else:
callback(self)
# return something? (to see when we have a testcase for this...)
def andThen(self, callback):
"""Add function to be called when the future is done; returns a future.
The callback will be called with a return value (for now, None).
"""
if self.running: # We might want a mutex here...
return self.future.andThen(callback)
else:
callback(self.future.value()) #?
# return something? (to see when we have a testcase for this...)
def hasError(self):
"Was there an error in one of the generator calls?"
return bool(self._exception)
def wait(self):
"Blocks the thread until everything is finished."
self.future.wait()
def isRunning(self):
"Is the sequence of generators still running?"
return self.future.isRunning()
def value(self):
"""Blocks the thread, and returns the final generator return value.
For now, always returns None."""
if self._exception:
raise self._exception
else:
return self.future.value()
def hasValue(self):
"Tells us whether the generator 1) is finished and 2) has a value."
# For some reason this doesn't do what I expected
# self.future.hasValue() returns True even if we're not finished (?)
if self.running:
return False
elif self._exception:
return False
else:
return self.future.hasValue()
def isFinished(self):
"Is the generator finished?"
return self.future.isFinished()
def error(self):
"Returns the error of the future."
return self.future.error()
def isCancelable(self):
"Is this future cancelable? Yes, it always is."
return True
def cancel(self):
"Cancel the future, and stop executing the sequence of actions."
with self.lock:
self.running = False
self.promise.setCanceled()
def isCanceled(self):
"Has this already been cancelled?"
return not self.running
def addCallback(self, callback):
"Add function to be called when the future is done."
self.then(callback)
# You know what? I'm not implementing unwrap() because I don't see a
# use case.
class GeneratorFuture(FutureWrapper):
"Future-like object (same interface) made for wrapping a generator."
def __handle_done(self, future):
"Internal callback for when the current sub-function is done."
try:
self.__ask_for_next(future.value())
except Exception as exception:
self.__ask_for_next(exception=exception)
def __finish(self, value):
"Finish and return."
with self.lock:
self.running = False
self.promise.setValue(value)
def __ask_for_next(self, arg=None, exception=None):
"Internal - get the next function in the generator."
if self.running:
try:
self.sub_future = None # TODO: handle multifuture
if exception:
future = self.generator.throw(exception)
else:
future = self.generator.send(arg)
if isinstance(future, list):
self.sub_future = _MultiFuture(future, self.__ask_for_next, list)
elif isinstance(future, tuple):
self.sub_future = _MultiFuture(future, self.__ask_for_next, tuple)
elif isinstance(future, Return):
# Special case: we returned a special "Return" object
# in this case, stop execution.
self.__finish(future.value)
else:
future.then(self.__handle_done)
self.sub_future = future
except StopIteration:
self.__finish(None)
except Exception as exc:
with self.lock:
self._exception = exc
self.running = False
self.promise.setError(str(exc))
# self.__finish(None) # May not be best way of finishing?
def async_generator(func):
"""Decorator that turns a future-generator into a future.
This allows having a function that does a bunch of async actions one
after the other without awkward "then/andThen" syntax, returning a
future-like object (actually a GeneratorFuture) that can be cancelled, etc.
"""
@functools.wraps(func)
def function(*args, **kwargs):
"Wrapped function"
return GeneratorFuture(func(*args, **kwargs))
return function
def public_async_generator(func):
"""Variant of async_generator that returns an actual future.
This allows you to expose it through a qi interface (on a service), but
that means cancel will not stop the whole chain.
"""
@functools.wraps(func)
def function(*args, **kwargs):
"Wrapped function"
return GeneratorFuture(func(*args, **kwargs)).future
return function
class Return(object):
"Use to wrap a return function "
@async_generator
def broken_sleep(t):
"Helper - async version of time.sleep"
time.sleep(t)
# TODO: instead of blocking a thread do something with qi.async
yield Return(None)
MICROSECONDS_PER_SECOND = 1000000
sleep = _Sleep
| [
37811,
198,
47429,
329,
3538,
1804,
30351,
8861,
351,
1162,
448,
1127,
13,
198,
198,
1026,
338,
4632,
7419,
12009,
7543,
326,
20694,
262,
761,
329,
764,
8524,
290,
764,
392,
6423,
13,
198,
198,
35596,
25,
198,
532,
787,
257,
17301,
... | 2.484204 | 3,102 |
import uuid
from flask import Flask, request, jsonify, send_from_directory
from flask_socketio import SocketIO
from server.httpexceptions.exceptions import ExceptionHandler
from server.services.writerservice import *
from server.utils.writerencoder import *
import time
app = Flask(__name__)
socket = SocketIO(app, async_mode='threading')
writer_service = WriterService(socket)
UPLOAD_FOLDER = os.path.join(os.path.dirname(__file__), '../../uploads/')
dataset_path = os.path.join(os.path.dirname(__file__), '../../../All Test Cases/')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.json_encoder = WriterEncoder
@app.errorhandler(ExceptionHandler)
def handle_invalid_usage(error):
"""
Error Handler for class Exception Handler
:param error:
:return: response containing:
status code, message, and data
"""
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route("/writers", methods=['GET'])
def get_writers_not_none():
"""
API to get all writers for predition where features not none
:raise: Exception containing:
message:
- "OK" for success
- "No writers found" if there is no writer
status_code:
- 200 for success
- 404 if there is no writer
data:
- list of WritersVo: each writervo contains id, name, username
- None if there is no writer
"""
# # global thread
# # with thread_lock:
# # if thread is None:
# thread = socket.start_background_task(background_thread)
language = request.args.get('lang', None)
if language == 'en':
status_code, message, data = writer_service.get_writers_not_none()
else:
status_code, message, data = writer_service.get_writers_arabic_not_none()
raise ExceptionHandler(message=message.value, status_code=status_code.value, data=data)
@app.route("/allWriters", methods=['GET'])
def get_writers():
"""
API to get all writers for training *Language independent
:raise: Exception containing:
message:
- "OK" for success
- "No writers found" if there is no writer
status_code:
- 200 for success
- 404 if there is no writer
data:
- list of WritersVo: each writervo contains id, name, username
- None if there is no writer
"""
status_code, message, data = writer_service.get_all_writers()
raise ExceptionHandler(message=message.value, status_code=status_code.value, data=data)
@app.route("/fitClassifiers", methods=['GET'])
def fit_classifiers():
"""
API to get fit classifiers for training *Language independent
:raise: Exception containing:
message:
- "OK" for success
status_code:
- 200 for success
"""
language = request.args.get('lang', None)
status_code, message = writer_service.fit_classifiers(language)
raise ExceptionHandler(message=message.value, status_code=status_code.value)
@app.route("/predict", methods=['POST'])
def get_prediction():
"""
API for predicting a writer of the image
:parameter: Query parameter lang
- en for english
- ar for arabic
:parameter: request contains
- writers ids: writers_ids
- image name: _filename
:raise: Exception contains
- response message:
"OK" for success, "Error in prediction" for prediction conflict,"Maximum number of writers exceeded" for exceeding maximum numbers
- response status code:
200 for success, 500 for prediction conflict,400 for exceeding maximum number
"""
print("New prediction request")
try:
# get image from request
filename = request.get_json()['_filename']
testing_image = cv2.imread(UPLOAD_FOLDER + 'testing/' + filename)
# get features of the writers
# writers_ids = request.get_json()['writers_ids']
language = request.args.get('lang', None)
image_base_url = request.host_url + 'image/writers/'
if language == "ar":
status, message, writers_predicted = writer_service.predict_writer_arabic(testing_image, filename,
image_base_url)
else:
status, message, writers_predicted = writer_service.predict_writer(testing_image, filename, image_base_url)
time.sleep(60)
raise ExceptionHandler(message=message.value, status_code=status.value,
data=writers_predicted)
except KeyError as e:
raise ExceptionHandler(message=HttpMessages.CONFLICT_PREDICTION.value, status_code=HttpErrors.CONFLICT.value)
@app.route("/writer", methods=['POST'])
def create_writer():
"""
API for creating a new writer
:parameter: request contains
- writer name: _name
- writer username: _username
- image name: _image
- address: _address
- phone: _phone
- national id: _nid
:raise: Exception contains
- response message:
"OK" for success, "Writer already exists" for duplicate username
- response status code:
200 for success, 409 for duplicate username
"""
# request parameters
new_writer = request.get_json()
status_code, message = validate_writer_request(new_writer)
writer_id = None
if status_code.value == 200:
status_code, message, writer_id = writer_service.add_writer(new_writer)
raise ExceptionHandler(message=message.value, status_code=status_code.value, data=writer_id)
@app.route("/profile", methods=['GET'])
def get_profile():
"""
API to get writer's profile
:parameter: Query parameter id
Query parameter lang
- en for english
- ar for arabic
:raise: Exception containing:
message:
- "OK" for success
- "Writer is not found" if writer does not exist
status_code:
- 200 for success
- 404 if writer does not exist
data:
- ProfileVo object containing writer's: id, name, username, address, phone, nid
- None if writer does not exist
"""
writer_id = request.args.get('id', None)
status_code, message, profile_vo = writer_service.get_writer_profile(writer_id, request.host_url)
raise ExceptionHandler(message=message.value, status_code=status_code.value, data=profile_vo)
@app.route("/image/<path>", methods=['POST'])
def upload_image(path):
"""
API for uploading images
request: image: file of the image
:param: path: path variable to identify the folder to upload in
- writers: for writers
- testing: for testing
- training: for training
:raise: Exception contains
- response message:
"OK" for success, "Upload image failed" for any fail in upload
- response status code:
200 for success, 409 for any fail in upload
"""
try:
path = request.view_args['path']
image = request.files["image"]
image_name = str(uuid.uuid1()) + '.jpg'
image.save(UPLOAD_FOLDER + path + '/' + image_name)
raise ExceptionHandler(message=HttpMessages.SUCCESS.value, status_code=HttpErrors.SUCCESS.value,
data=image_name)
except KeyError as e:
raise ExceptionHandler(message=HttpMessages.UPLOADFAIL.value, status_code=HttpErrors.CONFLICT.value)
@app.route("/image/<path>/<filename>", methods=['GET'])
def get_image(path, filename):
"""
API to get the image
:param path: path variable for folder to get the image from
- writers: for writers
- testing: for testing
- training: for training
:param filename: path variable for image name
:return: url for image in case found, url fo image not found in case not found
"""
try:
path = request.view_args['path'] + '/' + request.view_args['filename']
return send_from_directory(UPLOAD_FOLDER, path)
except:
path = request.view_args['path'] + '/not_found.png'
return send_from_directory(UPLOAD_FOLDER, path)
# raise ExceptionHandler(message=HttpMessages.IMAGENOTFOUND.value, status_code=HttpErrors.NOTFOUND.value)
@app.route("/writer", methods=['PUT'])
def update_writer_features():
"""
API for updating a writer features
:parameter: Query parameter lang
- en for english
- ar for arabic
:parameter: request contains
- image name: _filename
- writer id: _id
:raise: Exception contains
- response message:
"OK" for success, "Not found" for image not found
- response status code:
200 for success, 400 for image not found
"""
try:
# get image from request
filename = request.get_json()['_filename']
training_image = cv2.imread(UPLOAD_FOLDER + 'training/' + filename)
# get writer
writer_id = int(request.get_json()['_id'])
language = request.args.get('lang', None)
if language == "ar":
status_code, message = writer_service.update_features_arabic(training_image, filename, writer_id)
else:
status_code, message = writer_service.update_features(training_image, filename, writer_id)
raise ExceptionHandler(message=message.value, status_code=status_code.value)
except KeyError as e:
raise ExceptionHandler(message=HttpMessages.NOTFOUND.value, status_code=HttpErrors.NOTFOUND.value)
@app.route("/setWriters")
def set_writers():
"""
API for filling database collection with dummy data
:parameter Query parameter lang
- en for english
- ar for arabic
:raise: Exception contains
- response message:
"OK" for success
- response status code:
200 for success
"""
start_class = 1
end_class = 300
language = request.args.get('lang', None)
if language == "ar":
base_path = dataset_path + 'KHATT/Samples/Class'
status_code, message = writer_service.fill_collection_arabic(start_class, end_class, base_path)
else:
base_path = dataset_path + 'Dataset/Training/Class'
status_code, message = writer_service.fill_collection(start_class, end_class, base_path)
raise ExceptionHandler(message=message.value, status_code=status_code.value)
if __name__ == '__main__':
writer_service.fit_classifiers()
print("Classifiers are fitted!")
socket.run(app)
| [
11748,
334,
27112,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
33918,
1958,
11,
3758,
62,
6738,
62,
34945,
198,
6738,
42903,
62,
44971,
952,
1330,
47068,
9399,
198,
198,
6738,
4382,
13,
2804,
24900,
11755,
13,
1069,
11755,
1330,
35528,... | 2.438535 | 4,531 |
#
# Copyright (C) 2018 ETH Zurich and University of Bologna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Germain Haugou, ETH (germain.haugou@iis.ee.ethz.ch)
from bridge.default_debug_bridge import *
import time
JTAG_SOC_AXIREG = 4
JTAG_SOC_CONFREG = 7
JTAG_SOC_CONFREG_WIDTH = 4
BOOT_MODE_JTAG = 1
BOOT_MODE_JTAG_HYPER = 11
CONFREG_BOOT_WAIT = 1
CONFREG_PGM_LOADED = 1
CONFREG_INIT = 0
| [
2,
198,
2,
15069,
357,
34,
8,
2864,
35920,
43412,
290,
2059,
286,
347,
928,
2616,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 2.960912 | 307 |
class NoContent(Exception):
"""
Triggert, wenn das ausgewählte Objekt kein Inhalt enthält
Caller: CLI
"""
| [
4871,
1400,
19746,
7,
16922,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
833,
6950,
861,
11,
266,
1697,
288,
292,
257,
385,
39909,
11033,
18519,
660,
38764,
988,
83,
885,
259,
554,
71,
2501,
920,
71,
11033,
2528,
198,
220,
... | 2.259259 | 54 |
import unittest
import yaml
import logging
logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(format='%(message)s')
logger = logging.getLogger(__name__)
class ExecutablesTest(unittest.TestCase):
"""Check if we can get the map of executables"""
def test_getexemap(self):
"""Can we construct the dictionary for executables?"""
yamldata = """executables:
atom: gridatom
skgen: skgen.sh
lammps: mpirun -n 4 lmp_mpi
bands: dp_bands band.out bands
"""
exedict = yaml.load(yamldata).get('executables', None)
try:
for key, val in exedict.items():
logger.debug ("{:>10s} : {}".format(key, " ".join(val.split())))
except AttributeError:
# assume no executables are remapped
pass
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
331,
43695,
198,
11748,
18931,
198,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
30531,
8,
198,
6404,
2667,
13,
35487,
16934,
7,
18982,
11639,
4,
7,
20500,
8,
82,
11537,
198,
6404... | 2.195545 | 404 |
"""
[E] We are given an unsorted array containing 'n' numbers taken from the range 1 to 'n'.
The array has some numbers appearing twice, find all these duplicate numbers without using any extra space.
Example 1:
Input: [3, 4, 4, 5, 5]
Output: [4, 5]
"""
# Time: O(n) Space: O(1)
main() | [
628,
198,
37811,
198,
58,
36,
60,
775,
389,
1813,
281,
5576,
9741,
7177,
7268,
705,
77,
6,
3146,
2077,
422,
262,
2837,
352,
284,
705,
77,
4458,
220,
198,
464,
7177,
468,
617,
3146,
12655,
5403,
11,
1064,
477,
777,
23418,
3146,
123... | 2.979798 | 99 |
from django.conf.urls import url
from django.conf.urls import include
from myapp import views
urlpatterns = [
url(r'^$', views.dashBoard, name='dashboard'),
#url(r'^myapp/', include('myapp.urls')),
]
| [
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
198,
6738,
616,
1324,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
628,
220,
220,
220,
19016,
7,
81,
6,... | 2.554217 | 83 |
import tensorflow as tf
import numpy as np
import tensorflow_datasets as tfds
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalize image"""
return tf.cast(image, tf.float32) / 255., label
ds_train = ds_train.map( normalize_img, num_parallel_calls=tf.data.AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.AUTOTUNE)
""" Testing pipeline"""
ds_test = ds_test.map(
normalize_img, num_parallel_calls=tf.data.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.AUTOTUNE)
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
])
model.compile(
optimizer=tf.keras.optimizers.Adam(0.001),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
run_eagerly = True
)
model.fit(
ds_train,
epochs=6,
validation_data=ds_test,
)
"""Custom Inference test"""
model.summary
count = 0
#for data in ds_train:
# print(model(data[0]))
""" Converting to TFlite"""
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
with open('model.tflite', 'wb') as f:
f.write(tflite_model)
interpreter = tf.lite.Interpreter(model_path="model.tflite")
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
""" Giving random input to model to see if it is computing properly"""
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
print(output_data)
print("Evaluate on test data")
results = model.evaluate(ds_test, batch_size=128)
print("test loss, test acc:", results)
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
62,
19608,
292,
1039,
355,
48700,
9310,
198,
7,
9310,
62,
27432,
11,
288,
82,
62,
9288,
828,
288,
82,
62,
10951,
796,
48700,
9310... | 2.52164 | 878 |
#! -*- encoding: utf-8 -*-
try:
from urllib import unquote
except ImportError: # assume python3
from urllib.parse import unquote
from string import punctuation
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.sites.models import Site
from mezzanine.pages.models import Page
from mezzanine.core.models import Orderable
from mezzanine.core.fields import FileField
class Slide(Orderable):
"""
Allows for pretty banner images across the top of pages that will cycle
through each other with a fade effect.
"""
page = models.ForeignKey(Page)
file = FileField(_('File'), max_length=200, upload_to='slides', format='Image')
description = models.CharField(_('Description'), blank=True, max_length=200)
caption = models.CharField(_('Caption'), blank=True, max_length=200)
url = models.URLField(_(u'Link'), max_length=255, default="", blank=True, null=True)
public = models.BooleanField(default=True, blank=True, verbose_name=u"Público",)
site = models.ForeignKey(Site)
objects = SlideManager()
def save(self, *args, **kwargs):
"""
If no description is given when created, create one from the
file name.
"""
if not self.id and not self.description:
name = unquote(self.file.url).split('/')[-1].rsplit('.', 1)[0]
name = name.replace("'", '')
name = ''.join([c if c not in punctuation else ' ' for c in name])
# str.title() doesn't deal with unicode very well.
# http://bugs.python.org/issue6412
name = ''.join([s.upper() if i == 0 or name[i - 1] == ' ' else s
for i, s in enumerate(name)])
self.description = name
super(Slide, self).save(*args, **kwargs)
| [
2,
0,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
28311,
25,
198,
220,
220,
220,
422,
2956,
297,
571,
1330,
555,
22708,
198,
16341,
17267,
12331,
25,
220,
1303,
7048,
21015,
18,
198,
220,
220,
220,
422,
2956,
297,
... | 2.574648 | 710 |