content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from rest_framework import serializers
from core.models import CarbureLot, CarbureLotEvent, CarbureLotComment, CarbureNotification, CarbureStock, CarbureStockTransformation, Depot, Entity, EntityCertificate, EntityDepot, GenericCertificate, GenericError, SustainabilityDeclaration
from doublecount.serializers import BiofuelSerializer, CountrySerializer, EntitySerializer, FeedStockSerializer
from producers.models import ProductionSite
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
6738,
4755,
13,
27530,
1330,
20453,
495,
48601,
11,
20453,
495,
48601,
9237,
11,
20453,
495,
48601,
21357,
11,
20453,
495,
3673,
2649,
11,
20453,
495,
26207,
11,
20453,
495,
26207,
82... | 4.226415 | 106 |
"""
Writes the energy transfer section of a MESS input file
"""
import os
from ioformat import build_mako_str
# OBTAIN THE PATH TO THE DIRECTORY CONTAINING THE TEMPLATES #
SRC_PATH = os.path.dirname(os.path.realpath(__file__))
TEMPLATE_PATH = os.path.join(SRC_PATH, 'templates')
SECTION_PATH = os.path.join(TEMPLATE_PATH, 'sections')
ENE_TRANS_PATH = os.path.join(SECTION_PATH, 'energy_transfer')
def energy_down(exp_factor, exp_power, exp_cutoff):
""" Writes the energy transfer section of the MESS input file by
formatting input information into strings a filling Mako template.
:param exp_factor: 300 K energy-down value for collision model (cm-1)
:type exp_factor: float
:param exp_power: n, for [energy-down * (T/300K)^n] for collision model
:type exp_power: float
:param exp_cutoff: cutoff for assuming transition probability is zero
:type exp_cutoff: float
:rtype: string
"""
# Put the values into a string
exp_factor_str = '{0:<10.3f}'.format(exp_factor)
exp_power_str = '{0:<10.3f}'.format(exp_power)
exp_cutoff_str = '{0:<10.3f}'.format(exp_cutoff)
# Create dictionary to fill template
etrans_keys = {
'exp_factor': exp_factor_str,
'exp_power': exp_power_str,
'exp_cutoff': exp_cutoff_str
}
return build_mako_str(
template_file_name='edown.mako',
template_src_path=ENE_TRANS_PATH,
template_keys=etrans_keys)
def collision_frequency(eps1, eps2, sig1, sig2, mass1, mass2):
""" Writes the energy transfer section of the MESS input file by
formatting input information into strings a filling Mako template.
:param eps1: A+A Lennard-Jones epsilon parameter of spc 1 (cm-1)
:type eps1: float
:param eps2: A+A Lennard-Jones epsilon parameter of spc 2 (cm-1)
:type eps2: float
:param sig1: A+A Lennard-Jones sigma parameter of spc 1 (Angstrom)
:type sig1: float
:param sig2: A+A Lennard-Jones sigma parameter of spc 2 (Angstrom)
:type sig2: float
:param mass1: mass of Species 1 (amu)
:type mass1: float
:param mass2: mass of Species 2 (amu)
:type mass2: float
:rtype: string
"""
# Put the values into a string
epsilon_str = '{0:<10.3f} {1:<10.3f}'.format(eps1, eps2)
sigma_str = '{0:<10.3f} {1:<10.3f}'.format(sig1, sig2)
mass_str = '{0:<10.3f} {1:<10.3f}'.format(mass1, mass2)
# Create dictionary to fill template
etrans_keys = {
'epsilons': epsilon_str,
'sigmas': sigma_str,
'masses': mass_str
}
return build_mako_str(
template_file_name='collid_freq.mako',
template_src_path=ENE_TRANS_PATH,
template_keys=etrans_keys)
| [
37811,
198,
20257,
274,
262,
2568,
4351,
2665,
286,
257,
337,
7597,
5128,
2393,
198,
37811,
198,
198,
11748,
28686,
198,
6738,
33245,
18982,
1330,
1382,
62,
76,
25496,
62,
2536,
628,
198,
2,
25334,
30339,
3336,
46490,
5390,
3336,
42242,... | 2.306612 | 1,210 |
# Based on: http://www.djangosnippets.org/snippets/73/
#
# Modified by Sean Reifschneider to be smarter about surrounding page
# link context. For usage documentation see:
#
# http://www.tummy.com/Community/Articles/django-pagination/
from django import template
register = template.Library()
def paginator(context, adjacent_pages=2, page_obj=None, paginator=None):
"""
To be used in conjunction with the object_list generic view.
Adds pagination context variables for use in displaying first, adjacent and
last page links in addition to those created by the object_list generic
view.
"""
page_obj = page_obj or context['page_obj']
paginator = paginator or context['paginator']
start_page = max(page_obj.number - adjacent_pages, 1)
if start_page <= 3:
start_page = 1
end_page = page_obj.number + adjacent_pages + 1
if end_page >= paginator.num_pages - 1:
end_page = paginator.num_pages + 1
page_numbers = [
n
for n in range(start_page, end_page)
if n > 0 and n <= paginator.num_pages]
prev = page_obj.previous_page_number() if page_obj.has_previous() else None
return {
'page_obj': page_obj,
'request': context['request'],
'paginator': paginator,
'results_per_page': paginator.per_page,
'page': page_obj.number,
'pages': paginator.num_pages,
'page_numbers': page_numbers,
'next': page_obj.next_page_number() if page_obj.has_next() else None,
'previous': prev,
'has_next': page_obj.has_next(),
'has_previous': page_obj.has_previous(),
'show_first': 1 not in page_numbers,
'show_last': paginator.num_pages not in page_numbers,
}
register.inclusion_tag(
'components/pagination.html', takes_context=True)(paginator)
| [
2,
220,
13403,
319,
25,
2638,
1378,
2503,
13,
28241,
648,
418,
77,
3974,
1039,
13,
2398,
14,
16184,
3974,
1039,
14,
4790,
14,
198,
2,
198,
2,
220,
40499,
416,
11465,
797,
361,
20601,
710,
1304,
284,
307,
23714,
546,
7346,
2443,
19... | 2.53022 | 728 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
628,
198
] | 3 | 7 |
from sentence_transformers.util import batch_to_device
from torch import Tensor
import pytorch_lightning as pl
from torch import nn, sigmoid, tensor
from torchmetrics import Accuracy, F1, Recall, Precision, MatthewsCorrcoef, StatScores
from sentence_transformers import SentenceTransformer
import transformers
import torch
from copy import deepcopy
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts
from .helpers import F1Loss
| [
6738,
6827,
62,
35636,
364,
13,
22602,
1330,
15458,
62,
1462,
62,
25202,
198,
6738,
28034,
1330,
309,
22854,
198,
11748,
12972,
13165,
354,
62,
2971,
768,
355,
458,
198,
6738,
28034,
1330,
299,
77,
11,
264,
17225,
1868,
11,
11192,
273... | 3.639344 | 122 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
410,
30488,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
... | 2.102041 | 49 |
from app import create_app
from app.models.user import User
from flask_script import Manager, Server
app = create_app('production')
manager = Manager(app)
server = Server(port=9527)
manager.add_command("run", server)
@manager.command
@manager.command
def adduser(user_name, email, role=2):
"""
Register a new user.
python manage.py adduser ben ben@gmail -r0
"""
from getpass import getpass
password = getpass()
password2 = getpass(prompt='Confirm: ')
if password != password2:
import sys
sys.exit('Error: passwords do not match.')
User.create_user(user_name, password, email, role)
print('User {0} was created successfully.'.format(user_name))
@manager.command
if __name__ == '__main__':
manager.run()
| [
6738,
598,
1330,
2251,
62,
1324,
198,
6738,
598,
13,
27530,
13,
7220,
1330,
11787,
198,
6738,
42903,
62,
12048,
1330,
9142,
11,
9652,
198,
198,
1324,
796,
2251,
62,
1324,
10786,
25493,
11537,
198,
37153,
796,
9142,
7,
1324,
8,
198,
... | 2.802158 | 278 |
# coding:utf-8
#
# Author: Lucas Airam Castro de Souza
# Laboratory: Grupo de Teleinformática e Automação
# University: Universidade Federal do Rio de Janeiro
#
#
#
#
#
#
#
#
# Usage: python pkt2csv.py <pcap_path> <path_to_write> <pcap_class> -1 or 1
import pickle
import pyshark
import sys
pcap_file = pyshark.FileCapture(sys.argv[1])
flows = {}
for pkt in pcap_file:
if 'IP' in pkt:
if pkt['ip'].src+','+pkt['ip'].dst in flows.keys():
flows[pkt['ip'].src+','+pkt['ip'].dst][0].append(int(pkt.length))
else:
if pkt['ip'].dst+','+pkt['ip'].src in flows.keys():
flows[pkt['ip'].dst+','+pkt['ip'].src][0].append(-1*int(pkt.length))
else:
flows[pkt['ip'].src+','+pkt['ip'].dst] = [[int(pkt.length)]]
writer = []
if sys.argv[3] == "1":
print("positive class")
for key in flows.keys():
writer.append([flows[key][0],1])
elif sys.argv[3] == "-1":
print("negative class")
for key in flows.keys():
writer.append([flows[key][0],-1])
else:
print("no class found, choose -1 for negative class or 1 for positive class")
pass
f = open(sys.argv[2], "wb")
pickle.dump(writer,f)
f.close()
| [
2,
19617,
25,
40477,
12,
23,
198,
2,
220,
198,
2,
6434,
25,
15257,
3701,
321,
21193,
390,
22862,
4496,
198,
2,
18643,
25,
25665,
7501,
390,
14318,
259,
687,
6557,
83,
3970,
304,
5231,
6086,
16175,
28749,
198,
2,
2059,
25,
26986,
3... | 2.113438 | 573 |
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponseRedirect
from django.views.generic import TemplateView, DetailView, CreateView
from django.shortcuts import get_object_or_404, redirect
from allauth.account.adapter import get_adapter
from allauth.socialaccount.models import SocialToken
from annoying.functions import get_object_or_None
from braces.views import JSONResponseMixin
from customers.forms import RequestDemoForm
from posts.models import Post
from users.models import User
__all__ = ('HomeView', 'WaitListView', 'InviteView', 'GetVetted',
'InviteApplyView', 'ThankYou', 'About', 'HeartBeat',)
| [
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
11,
9575,
62,
75,
12582,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
7738,
1060,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
37350,
7680,
11,
... | 3.38191 | 199 |
import click
import gym
from pynput import keyboard
from pynput.keyboard import Key
from stable_baselines3 import DQN
from stable_baselines3.dqn import MlpPolicy
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.env_checker import check_env
@click.command()
@click.option('-m', '--mode', default='ai', help='Select execution mode: ai, train, human, check.')
@click.option('-n', '--n-envs', default=1, help='Number of parallel envs to train with.')
@click.option('-ts', '--training-steps', default=50000, help='Number of time steps to train.')
@click.option('-ds', '--delayed-start', default=0, help='Requires additional key press to start.')
class ActionState:
"""
Actions:
Type: Discrete(9)
Representation Details
XY-Direction Acceleration NOOP[0], U[1], UL[2], L[3], DL[4], D[5], DR[6], R[7], UR[8]
"""
if __name__ == '__main__':
start()
| [
11748,
3904,
198,
11748,
11550,
198,
6738,
279,
2047,
1996,
1330,
10586,
198,
6738,
279,
2047,
1996,
13,
2539,
3526,
1330,
7383,
198,
6738,
8245,
62,
12093,
20655,
18,
1330,
360,
48,
45,
198,
6738,
8245,
62,
12093,
20655,
18,
13,
4950... | 2.608696 | 368 |
from threading import Lock
from copy import deepcopy
| [
6738,
4704,
278,
1330,
13656,
198,
6738,
4866,
1330,
2769,
30073,
198,
220,
220,
220,
220,
220,
220,
220,
220
] | 3.05 | 20 |
from setuptools import setup
from glob import glob
import os
package_name = 'vt_steering_controller'
setup(
name=package_name,
version='1.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name), glob('launch/*.launch.py'))
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='Will Heitman',
maintainer_email='will.heitman@utdallas.edu',
description='Our custom steering controller implementation.',
license='MIT',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'controller_exe = vt_steering_controller.controller_exe:main',
],
},
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
6738,
15095,
1330,
15095,
198,
11748,
28686,
198,
198,
26495,
62,
3672,
796,
705,
36540,
62,
4169,
1586,
62,
36500,
6,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
28,
26495,
62,
3672,
11,
... | 2.477477 | 333 |
import pytest
from . import polite
tv_id = 1418 # The Big Bang Theory
season_number = 12
episode_number = 1
@polite
@polite
@polite
@pytest.mark.parametrize("with_guest_session", [True, False])
@polite
@pytest.mark.parametrize("with_guest_session", [True, False])
@polite
@polite
@polite
@polite
@polite
| [
11748,
12972,
9288,
198,
198,
6738,
764,
1330,
23507,
628,
198,
14981,
62,
312,
796,
1478,
1507,
220,
1303,
383,
4403,
9801,
17003,
198,
6230,
62,
17618,
796,
1105,
198,
38668,
62,
17618,
796,
352,
628,
198,
31,
16104,
578,
628,
198,
... | 2.527132 | 129 |
# -*- coding: utf-8 -*-
"""Test data_util :mod:`~utilipy.data_utils.decorators`."""
__all__ = [
"test_idxDecorator_standard",
"test_idxDecorator_defaults",
"test_idxDecorator_new_decorator",
"test_idxDecorator_existing_function",
]
##############################################################################
# IMPORTS
# THIRD PARTY
import numpy as np
# PROJECT-SPECIFIC
from utilipy.data_utils.decorators import idxDecorator
##############################################################################
# PARAMETERS
x = np.arange(2)
y = np.arange(2) + 2
z = np.c_[x, y].T
##############################################################################
# TESTS
##############################################################################
##########################################################################
# idxDecorator
def test_idxDecorator_standard():
"""Test standard use of idxDecorator."""
# defining function
@idxDecorator
# /def
# calling normally
assert all(func1(x) == np.array([True, False]))
# using added kwarg
assert all(func1(x, as_ind=True) == np.array([0]))
return
# /def
# ------------------------------------------------------------------------
def test_idxDecorator_defaults():
"""Test setting default in idxDecorator."""
# defining function
@idxDecorator(as_ind=True)
# /def
# calling normally, defaulted to index
assert all(func2(x) == np.array([0]))
# using added kwarg
assert all(func2(x, as_ind=False) == np.array([True, False]))
return
# /def
# ------------------------------------------------------------------------
def test_idxDecorator_new_decorator():
"""Test making new decorator."""
# making new decorator with different value
trueidxdec = idxDecorator(as_ind=True)
# defining function
@trueidxdec
# /def
# calling normally, defaulted to index
assert func3(x) == np.array([0])
# using added kwarg
assert all(func3(x, as_ind=False) == np.array([True, False]))
return
# /def
# ------------------------------------------------------------------------
def test_idxDecorator_existing_function():
"""Test wrapping existing function with idxDecorator."""
# defining function
# /def
# wrapping existing function
newfunc = idxDecorator(func, as_ind=True)
# calling normally, defaulted to index
assert newfunc(x) == np.array([0])
# using added kwarg
assert all(newfunc(x, as_ind=False) == np.array([True, False]))
return
# /def
# ------------------------------------------------------------------------
##############################################################################
# END
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
14402,
1366,
62,
22602,
1058,
4666,
25,
63,
93,
22602,
541,
88,
13,
7890,
62,
26791,
13,
12501,
273,
2024,
63,
526,
15931,
628,
198,
834,
439,
834,
796,
... | 3.193961 | 861 |
from forum.models import Question | [
6738,
10041,
13,
27530,
1330,
18233
] | 5.5 | 6 |
import logging
import os
import secrets
import socket
import sys
from timeit import default_timer as timer
from urllib import request
import yaml
logging.basicConfig(level=logging.DEBUG,
# Use Jormungandr logging format
format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',
datefmt='%b %d %H:%M:%S',
)
logger = logging.getLogger(__file__)
ENV_PREFIX = os.environ.get('ENV_PREFIX')
CONFIG_URL = os.environ.get('CONFIG')
PUBLIC_PORT = os.environ.get('PUBLIC_PORT', default=8299)
REST_PORT = os.environ.get('REST_PORT', default=8443)
STORAGE_DIR = os.environ.get('STORAGE_DIR', default="/mnt/storage")
PUBLIC_ID = os.environ.get('PUBLIC_ID', default=secrets.token_hex(24))
if os.path.isfile(f'{STORAGE_DIR}/config.yaml'):
logger.info('Using stored config')
with open(f'{STORAGE_DIR}/config.yaml', 'r+') as file:
config = yaml.load(file, Loader=yaml.SafeLoader)
else:
logger.info('Retrieving config from Jormungandr status page')
with request.urlopen(CONFIG_URL) as response:
config = yaml.load(response.read(), Loader=yaml.SafeLoader)
with request.urlopen('https://api.ipify.org') as response:
PUBLIC_IP = response.read().decode('utf-8')
# Required
config['p2p']['public_address'] = f"/ip4/{PUBLIC_IP}/tcp/{PUBLIC_PORT}"
config['p2p']['listen_address'] = f"/ip4/0.0.0.0/tcp/{PUBLIC_PORT}"
config['storage'] = STORAGE_DIR
config['rest']['listen'] = f"127.0.0.1:{REST_PORT}"
# High/High for stake pools
config['p2p']['topics_of_interest']['blocks'] = "high"
config['p2p']['topics_of_interest']['messages'] = "high"
# From Jormungandr-for-Newbs tutorial
config['p2p']['max_connections'] = 1024
config['p2p']['gossip_interval'] = "10s"
config['mempool'] = {}
config['mempool']['fragment_ttl'] = '2h'
config['mempool']['log_ttl'] = '24h'
config['mempool']['garbage_collection_interval'] = '2h'
# Optional
#config['log'][0]['output'] = 'journald'
config['p2p']['public_id'] = PUBLIC_ID
config['no_blockchain_updates_warning_interval'] = '360s'
# Check peers
n_peers = len(config['p2p']['trusted_peers'])
logging.info(f"Checking {n_peers} trusted peers...")
for idx, peer in enumerate(config['p2p']['trusted_peers']):
_, _, host, _, port = peer['address'].split('/')
try:
t = tcpping(host, port)
except (ValueError, ConnectionRefusedError, ConnectionError) as e:
logger.warning(f"FAIL: Bad peer {idx}: {peer['id']}")
if len(config['p2p']['trusted_peers']) > 1:
config['p2p']['trusted_peers'].remove(peer)
else:
logger.warning('Could not remove peer because it was the last one.')
continue
# Can set peers dynamically with t in future
logger.info(f"Using {len(config['p2p']['trusted_peers'])}/{n_peers} trusted peers")
with open(f"{STORAGE_DIR}/config.yaml", 'w') as file:
documents = yaml.dump(config, file)
"""
I'm now blocking anyone with multiple connections from the same IP
you get the nodes list of established connections
then just look for duplicates
echo "Proto Recv-Q Send-Q Local Address Foreign Address State"
nodes="$(netstat -tupan | grep jor | grep EST | cut -c 1-80)"
total="$(netstat -tupan | grep jor | grep EST | cut -c 1-80 | wc -l)"
printf "%s\n" "${nodes}" "----------" "Total:" "${total}"
sudo ufw deny from <any duplicate IP's in established connections>
or cron
#!/bin/bash
netstat -tupan | grep jor | grep EST | awk '{print $5}' | uniq > tmp
IPS=$(sort tmp | uniq -d | cut -d ':' -f1)
for IP in $IPS; do
ufw deny from $IP to any
done
""" | [
11748,
18931,
198,
11748,
28686,
198,
11748,
13141,
198,
11748,
17802,
198,
11748,
25064,
198,
6738,
640,
270,
1330,
4277,
62,
45016,
355,
19781,
198,
6738,
2956,
297,
571,
1330,
2581,
198,
198,
11748,
331,
43695,
628,
198,
6404,
2667,
... | 2.372215 | 1,526 |
import os
os.system("cls") #limpa janela terminal antes da execução
sexo = str(input("Informe seu sexo: [M/F] ")).strip().upper()[0]
while sexo not in 'MmFf':
print(sexo)
sexo = str(input("Dados inválidos. Informe seu sexo: [M/F] ")).strip().upper()[0]
print(f'Sexo {sexo} registrado com sucesso!')
| [
11748,
28686,
198,
418,
13,
10057,
7203,
565,
82,
4943,
1303,
2475,
8957,
42897,
10304,
12094,
1885,
274,
12379,
2452,
84,
16175,
28749,
198,
198,
8044,
78,
796,
965,
7,
15414,
7203,
818,
687,
68,
384,
84,
1714,
78,
25,
685,
44,
14,... | 2.246377 | 138 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Xiangtai(lxtpku@pku.edu.cn)
# Implementation of Paper Learning a Discriminative Feature Network for Semantic Segmentation (CVPR2018)(face_plus_plus)
import torch
import torch.nn as nn
from model.resnet import resnet101
#from torchvision.models import resnet101
__all__ = ["DFN"]
if __name__ == '__main__':
model = DFN(19).cuda()
model.freeze_bn()
model.eval()
image = torch.autograd.Variable(torch.randn(1, 3, 512, 512), volatile=True).cuda()
res1, res2 = model(image)
print (res1.size(), res2.size())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
6434,
25,
45641,
83,
1872,
7,
75,
742,
79,
23063,
31,
79,
23063,
13,
15532,
13,
31522,
8,
198,
2,
46333,
286,
14962... | 2.604444 | 225 |
import sqlite3 as lite
con = lite.connect('JobDetails.db')
cur = con.cursor()
cur.execute("SELECT ReviewID, DateRange FROM Reviews")
results = cur.fetchall()
for result in results:
rID = result[0]
r = result[1]
split = r.split("-")
slash = r.split("/")
if len(split) == 1:
years = str(2000 + int(slash[-1]))
else:
years = ""
for i in range(2000 + int(split[0].split("/")[-1]), 2000 + int(split[-2].split("/")[-1])):
years += str(i) + ", "
years += str(2000 + int(split[-1].split("/")[-1]))
cur.execute("UPDATE Reviews SET PossibleYears = '" + years + "' WHERE ReviewID = " + str(rID))
con.commit()
| [
11748,
44161,
578,
18,
355,
300,
578,
198,
198,
1102,
796,
300,
578,
13,
8443,
10786,
33308,
24259,
13,
9945,
11537,
198,
22019,
796,
369,
13,
66,
21471,
3419,
198,
198,
22019,
13,
41049,
7203,
46506,
6602,
2389,
11,
7536,
17257,
1603... | 2.316327 | 294 |
import unittest
import pandasql.tests.test_pandasql
unittest.main(pandasql.tests.test_pandasql)
| [
11748,
555,
715,
395,
198,
11748,
19798,
292,
13976,
13,
41989,
13,
9288,
62,
79,
392,
292,
13976,
198,
198,
403,
715,
395,
13,
12417,
7,
79,
392,
292,
13976,
13,
41989,
13,
9288,
62,
79,
392,
292,
13976,
8,
198
] | 2.365854 | 41 |
# -*- coding: utf-8 -*-
# Copyright 2021, CS GROUP - France, https://www.csgroup.eu/
#
# This file is part of EODAG project
# https://www.github.com/CS-SI/EODAG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from eodag.plugins.crunch.base import Crunch
from eodag.utils import get_geometry_from_various
try:
from shapely.errors import TopologicalError
except ImportError:
from shapely.geos import TopologicalError
logger = logging.getLogger("eodag.plugins.crunch.filter_overlap")
class FilterOverlap(Crunch):
"""FilterOverlap cruncher
Filter products, retaining only those that are overlapping with the search_extent
:param config: Crunch configuration, may contain :
- `minimum_overlap` : minimal overlap percentage
- `contains` : True if product geometry contains the search area
- `intersects` : True if product geometry intersects the search area
- `within` : True if product geometry is within the search area
These configuration parameters are mutually exclusive.
:type config: dict
"""
def proceed(self, products, **search_params):
"""Execute crunch: Filter products, retaining only those that are overlapping with the search_extent
:param products: A list of products resulting from a search
:type products: list(:class:`~eodag.api.product._product.EOProduct`)
:param search_params: Search criteria that must contain `geometry`
:type search_params: dict
:returns: The filtered products
:rtype: list(:class:`~eodag.api.product._product.EOProduct`)
"""
logger.debug("Start filtering for overlapping products")
filtered = []
add_to_filtered = filtered.append
search_geom = get_geometry_from_various(**search_params)
if not search_geom:
logger.warning(
"geometry not found in cruncher arguments, filtering disabled."
)
return products
minimum_overlap = float(self.config.get("minimum_overlap", "0"))
contains = self.config.get("contains", False)
intersects = self.config.get("intersects", False)
within = self.config.get("within", False)
if contains and (within or intersects) or (within and intersects):
logger.warning(
"contains, intersects and within parameters are mutually exclusive"
)
return products
elif (
minimum_overlap > 0
and minimum_overlap < 100
and (contains or within or intersects)
):
logger.warning(
"minimum_overlap will be ignored because of contains/intersects/within usage"
)
elif not contains and not within and not intersects:
logger.debug("Minimum overlap is: {} %".format(minimum_overlap))
logger.debug("Initial requested extent area: %s", search_geom.area)
if search_geom.area == 0:
logger.debug(
"No product can overlap a requested extent that is not a polygon (i.e with area=0)"
)
else:
for product in products:
logger.debug("Uncovered extent area: %s", search_geom.area)
if product.search_intersection:
intersection = product.search_intersection
product_geometry = product.geometry
else: # Product geometry may be invalid
if not product.geometry.is_valid:
logger.debug(
"Trying our best to deal with invalid geometry on product: %r",
product,
)
product_geometry = product.geometry.buffer(0)
try:
intersection = search_geom.intersection(product_geometry)
except TopologicalError:
logger.debug(
"Product geometry still invalid. Overlap test restricted to containment"
)
if search_geom.contains(product_geometry):
logger.debug(
"Product %r overlaps the search extent. Adding it to filtered results"
)
add_to_filtered(product)
continue
else:
product_geometry = product.geometry
intersection = search_geom.intersection(product_geometry)
if (
(contains and product_geometry.contains(search_geom))
or (within and product_geometry.within(search_geom))
or (intersects and product_geometry.intersects(search_geom))
):
add_to_filtered(product)
continue
elif contains or within or intersects:
continue
ipos = (intersection.area / search_geom.area) * 100
ipop = (intersection.area / product_geometry.area) * 100
logger.debug(
"Intersection of product extent and search extent covers %f percent of the search extent "
"area",
ipos,
)
logger.debug(
"Intersection of product extent and search extent covers %f percent of the product extent "
"area",
ipop,
)
if any(
(
search_geom.contains(product.geometry),
ipos >= minimum_overlap,
ipop >= minimum_overlap,
)
):
logger.debug(
"Product %r overlaps the search extent by the specified constraint. Adding it to "
"filtered results",
product,
)
add_to_filtered(product)
else:
logger.debug(
"Product %r does not overlaps the search extent by the specified constraint. "
"Skipping it",
product,
)
logger.info("Finished filtering products. %s resulting products", len(filtered))
return filtered
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
33448,
11,
9429,
44441,
532,
4881,
11,
3740,
1378,
2503,
13,
6359,
8094,
13,
12496,
14,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
412,
3727,
4760,
1628,
19... | 2.105325 | 3,380 |
from eNMS.controller import controller
from eNMS.custom import CustomApp # noqa: F401
from eNMS.database import db
from eNMS.environment import env
from eNMS.forms import form_factory
from eNMS.server import server
from eNMS.variables import vs
initialize()
| [
6738,
304,
45,
5653,
13,
36500,
1330,
10444,
198,
6738,
304,
45,
5653,
13,
23144,
1330,
8562,
4677,
220,
1303,
645,
20402,
25,
376,
21844,
198,
6738,
304,
45,
5653,
13,
48806,
1330,
20613,
198,
6738,
304,
45,
5653,
13,
38986,
1330,
... | 3.234568 | 81 |
__version__ = '0.1dev'
from . import (
math,
arithmetic,
datastr,
functions,
numeric_operations
)
from .math import (
complex_math,
ntheory,
calculus,
geometry,
trigonometry,
linalg,
polynomial,
statistics,
quaternion,
octonion,
)
from .arithmetic import (
addition,
multiplication,
power,
Variable,
)
from .datastr import (
BinaryTree,
Stack,
Node,
Queue,
Tree,
)
from .functions import (
AbsoluteValue,
BinomialDistribution,
ComplexFunction,
Exponential,
Linear,
Log,
NormalDistribution,
NRoot,
Quadratic, quadratic_roots,
Rational,
)
from . import numeric_operations
from .numeric_operations import (
round_if_close,
is_scalar,
is_iterable,
is_integer,
kwargsParser,
trunc,
vectorize
)
from .science import c, H, ket, time_dilation
# TODO
# Add all objects to be imported when importing *
__all__ = [
'algebra', '_complex', 'math', 'arithmetic', 'functions', 'datastr', 'linalg', 'numeric_operations',
'polynomial', 'science', 'statistics', 'Vector', 'Infinite', 'Variable', 'AbsoluteValue',
'BinomialDistribution', 'ComplexFunction', 'Exponential', 'Linear', 'Log', 'NormalDistribution',
'NRoot', 'Quadratic', 'quadratic_roots', 'Rational'
]
| [
834,
9641,
834,
796,
705,
15,
13,
16,
7959,
6,
198,
198,
6738,
764,
1330,
357,
198,
220,
220,
220,
10688,
11,
198,
220,
220,
220,
34768,
11,
198,
220,
220,
220,
4818,
459,
81,
11,
198,
220,
220,
220,
5499,
11,
198,
220,
220,
2... | 2.446886 | 546 |
import torch
from torch import nn
from torch.distributions import Normal
from typing import Tuple
from torch.nn import functional
class GlimpseSensor(nn.Module):
"""The glimpse network.
Combines the "what" and the "where" into a glimpse
feature vector `g_t`.
- "what": glimpse extracted from the retina.
- "where": location tuple where glimpse was extracted.
Concretely, feeds the output of the retina `phi` to
a fc layer and the glimpse location vector `l_t_prev`
to a fc layer. Finally, these outputs are fed each
through a fc layer and their sum is rectified.
In other words:
`g_t = relu( fc( fc(l) ) + fc( fc(phi) ) )`
Args:
h_g: hidden layer size of the fc layer for `phi`.
h_l: hidden layer size of the fc layer for `l`.
g: size of the square patches in the glimpses extracted
by the retina.
k: number of patches to extract per glimpse.
s: scaling factor that controls the size of successive patches.
c: number of channels in each image.
x: a 4D Tensor of shape (B, H, W, C). The minibatch
of images.
l_t_prev: a 2D tensor of shape (B, 2). Contains the glimpse
coordinates [x, y] for the previous timestep `t-1`.
Returns:
g_t: a 2D tensor of shape (B, hidden_size).
The glimpse representation returned by
the glimpse network for the current
timestep `t`."""
class Retina:
"""A visual retina.
Extracts a foveated glimpse `phi` around location `l`
from an image `x`.
Concretely, encodes the region around `l` at a
high-resolution but uses a progressively lower
resolution for pixels further from `l`, resulting
in a compressed representation of the original
image `x`.
Args:
x: a 4D Tensor of shape (B, H, W, C). The minibatch
of images.
l: a 2D Tensor of shape (B, 2). Contains normalized
coordinates in the range [-1, 1].
size_first_patch: size of the first square patch.
num_patches_per_glimpse: number of patches to extract in the glimpse.
scale_factor_suc: scaling factor that controls the size of
successive patches.
Returns:
phi: a 5D tensor of shape (B, k, g, g, C). The
foveated glimpse of the image."""
def foveate(self, x: torch.Tensor, l: torch.Tensor) -> torch.Tensor:
"""Extract `k` square patches of size `g`, centered
at location `l`. The initial patch is a square of
size `g`, and each subsequent patch is a square
whose side is `s` times the size of the previous
patch.
The `k` patches are finally resized to (g, g) and
concatenated into a tensor of shape (B, k, g, g, C)."""
phi = []
size = self.g
# extract k patches of increasing size
for i in range(self.k):
phi.append(self.extract_patch(x, l, size))
size = int(self.s * size)
# resize the patches to squares of size g
for i in range(1, len(phi)):
k = phi[i].shape[-1] // self.g
phi[i] = functional.avg_pool2d(phi[i], k)
# concatenate into a single tensor and flatten
phi = torch.cat(phi, 1)
phi = phi.view(phi.shape[0], -1)
return phi
def extract_patch(self, x, l, size) -> torch.Tensor:
"""Extract a single patch for each image in `x`.
Args:
x: a 4D Tensor of shape (B, H, W, C). The minibatch
of images.
l: a 2D Tensor of shape (B, 2).
size: a scalar defining the size of the extracted patch.
Returns:
patch: a 4D Tensor of shape (B, size, size, C)"""
B, C, H, W = x.shape
start = self.denormalize(H, l)
end = start + size
# pad with zeros
x = functional.pad(x, (size // 2, size // 2, size // 2, size // 2))
# loop through mini-batch and extract patches
patch = []
for i in range(B):
patch.append(x[i, :, start[i, 1]: end[i, 1], start[i, 0]: end[i, 0]])
return torch.stack(patch)
def denormalize(self, T, coords) -> torch.LongTensor:
"""Convert coordinates in the range [-1, 1] to
coordinates in the range [0, T] where `T` is
the size of the image."""
return (0.5 * ((coords + 1.0) * T)).long()
def exceeds(self, from_x, to_x, from_y, to_y, T) -> bool:
"""Check whether the extracted patch will exceed
the boundaries of the image of size `T`."""
if (from_x < 0) or (from_y < 0) or (to_x > T) or (to_y > T):
return True
return False
def forward(self, x: torch.Tensor, l_t_prev: torch.Tensor) -> torch.Tensor:
"""
:param x:
:type x:
:param l_t_prev:
:type l_t_prev:
:return:
:rtype:"""
return functional.relu(
self.fc3(
functional.relu(self.fc1(self.retina.foveate(x, l_t_prev)))
) # what # generate glimpse phi from image x
+ self.fc4(
functional.relu(self.fc2(l_t_prev.view(l_t_prev.size(0), -1)))
) # where
)
class CoreRNN(nn.Module):
"""The core network.
An RNN that maintains an internal state by integrating
information extracted from the history of past observations.
It encodes the agent's knowledge of the environment through
a state vector `h_t` that gets updated at every time step `t`.
Concretely, it takes the glimpse representation `g_t` as input,
and combines it with its internal state `h_t_prev` at the previous
time step, to produce the new internal state `h_t` at the current
time step.
In other words:
`h_t = relu( fc(h_t_prev) + fc(g_t) )`
Args:
input_size: input size of the rnn.
hidden_size: hidden size of the rnn.
g_t: a 2D tensor of shape (B, hidden_size). The glimpse
representation returned by the glimpse network for the
current timestep `t`.
h_t_prev: a 2D tensor of shape (B, hidden_size). The
hidden state vector for the previous timestep `t-1`.
Returns:
h_t: a 2D tensor of shape (B, hidden_size). The hidden
state vector for the current timestep `t`."""
def forward(self, g_t: torch.Tensor, h_t_prev: torch.Tensor) -> torch.Tensor:
"""
:param g_t:
:type g_t:
:param h_t_prev:
:type h_t_prev:
:return:
:rtype:"""
h1 = self.i2h(g_t)
h2 = self.h2h(h_t_prev)
h_t = functional.relu(h1 + h2)
return h_t
class Actor(nn.Module):
"""The action network.
Uses the internal state `h_t` of the core network to
produce the final output classification.
Concretely, feeds the hidden state `h_t` through a fc
layer followed by a softmax to create a vector of
output probabilities over the possible classes.
Hence, the environment action `a_t` is drawn from a
distribution conditioned on an affine transformation
of the hidden state vector `h_t`, or in other words,
the action network is simply a linear softmax classifier.
Args:
input_size: input size of the fc layer.
output_size: output size of the fc layer.
h_t: the hidden state vector of the core network
for the current time step `t`.
Returns:
a_t: output probability vector over the classes."""
def forward(self, h_t: torch.Tensor) -> torch.Tensor:
"""
:param h_t:
:type h_t:
:return:
:rtype:"""
return functional.log_softmax(self.fc(h_t), dim=1)
class Locator(nn.Module):
"""The location network.
Uses the internal state `h_t` of the core network to
produce the location coordinates `l_t` for the next
time step.
Concretely, feeds the hidden state `h_t` through a fc
layer followed by a tanh to clamp the output beween
[-1, 1]. This produces a 2D vector of means used to
parametrize a two-component Gaussian with a fixed
variance from which the location coordinates `l_t`
for the next time step are sampled.
Hence, the location `l_t` is chosen stochastically
from a distribution conditioned on an affine
transformation of the hidden state vector `h_t`.
Args:
input_size: input size of the fc layer.
output_size: output size of the fc layer.
std: standard deviation of the normal distribution.
h_t: the hidden state vector of the core network for
the current time step `t`.
Returns:
mu: a 2D vector of shape (B, 2).
l_t: a 2D vector of shape (B, 2)."""
def forward(self, h_t: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param h_t:
:type h_t:
:return:
:rtype:"""
# compute mean
mu = torch.tanh(self.fc_lt(functional.relu(self.fc(h_t.detach()))))
# reparametrization trick
l_t = torch.distributions.Normal(mu, self.std).rsample()
# we assume both dimensions are independent
# 1. pdf of the joint is the product of the pdfs
# 2. log of the product is the sum of the logs
return (
torch.sum(Normal(mu, self.std).log_prob(l_t.detach()), dim=1),
torch.clamp(l_t, -1, 1), # bound between [-1, 1]
)
class SignalBaseline(nn.Module):
"""The baseline network.
This network regresses the baseline in the
reward function to reduce the variance of
the gradient update.
Args:
input_size: input size of the fc layer.
output_size: output size of the fc layer.
h_t: the hidden state vector of the core network
for the current time step `t`.
Returns:
b_t: a 2D vector of shape (B, 1). The baseline
for the current time step `t`."""
def forward(self, h_t: torch.Tensor) -> torch.Tensor:
"""
:param h_t:
:type h_t:
:return:
:rtype:"""
return self.fc(h_t.detach())
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
17080,
2455,
507,
1330,
14435,
198,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
6738,
28034,
13,
20471,
1330,
10345,
628,
198,
4871,
402,
2475,
7752,
47864,
7,
20471... | 2.303131 | 4,503 |
from cms.api import add_plugin
from cms.test_utils.testcases import CMSTestCase
from djangocms_frontend.contrib.alert.cms_plugins import AlertPlugin
from djangocms_frontend.contrib.alert.forms import AlertForm
from ..fixtures import TestFixture
| [
6738,
269,
907,
13,
15042,
1330,
751,
62,
33803,
198,
6738,
269,
907,
13,
9288,
62,
26791,
13,
9288,
33964,
1330,
16477,
2257,
395,
20448,
198,
198,
6738,
42625,
648,
420,
907,
62,
8534,
437,
13,
3642,
822,
13,
44598,
13,
46406,
62,... | 3.220779 | 77 |
"""
Coupling Knob
------------------
Creates a coupling knob from current optics.
Not implemented yet. TODO!
"""
from operator import itemgetter
from typing import Sequence
# from optics_functions.coupling import coupling_via_cmatrix
import pandas as pd
from cpymad.madx import Madx
import numpy as np
import logging
LOG = logging.getLogger(__name__)
COL_REFERENCE = "Reference"
def get_attribute_response(madx: Madx, sequence: str, variables: Sequence[str], attribute: str) -> pd.DataFrame:
""" Creates the linear response matrix of the given `variables` for the desired `attributed`. """
# find elements in sequence that have the attribute defined
valid_elements = {e.name: idx for idx, e in enumerate(madx.sequence[sequence].elements) if attribute in e}
if not len(valid_elements):
raise AttributeError(f"No elements found in sequence '{sequence}' with attribute '{attribute}'.")
get_valid_elements = itemgetter(*valid_elements.values())
# create DataFrame
df = pd.DataFrame(index=valid_elements.keys(), columns=variables)
# all-zero reference
for var in variables:
madx.globals[var] = 0
reference = get_attribute_values()
# responses
for var in variables:
madx.globals[var] = 1
df[var] = get_attribute_values() - reference
madx.globals[var] = 0
# drop all-zero rows
df = df.loc[(df!=0).any(axis=1)]
return df
| [
37811,
198,
34,
280,
11347,
6102,
672,
198,
1783,
438,
198,
198,
16719,
274,
257,
40204,
36842,
422,
1459,
36237,
13,
198,
3673,
9177,
1865,
13,
16926,
46,
0,
198,
37811,
198,
198,
6738,
10088,
1330,
2378,
1136,
353,
198,
6738,
19720,... | 2.869215 | 497 |
import torch.nn as nn
import torch.nn.functional as F
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Decoder(10, 10*3, 1).to(device)
x = torch.rand(4, 5, 10).to(device)
h = torch.rand(4, 5, 20).to(device)
trigger = torch.tensor([1, 2, 3, 4]).to(device)
mask = torch.ones(4, 5).to(device)
out = model(h, x, trigger, mask)
print(out.shape) | [
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
628,
198,
198,
25202,
796,
28034,
13,
25202,
7203,
66,
15339,
1,
611,
28034,
13,
66,
15339,
13,
271,
62,
15182,
3419,
2073,
... | 2.532895 | 152 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import by
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
| [
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
220,
220,
... | 3.4375 | 240 |
#!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
import sys
from pxr import Tf
import unittest
Test = Tf.Tf_TestPyOptional
maxint = (2 ** 31) - 1
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
8416,
81,
79,
5272,
684,
549,
301,
198,
2,
198,
2,
15069,
1584,
46706,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
25189,
4891,
13789,
4943,
198,
2,
351,
262,
1708,
17613,
26,
... | 3.584795 | 342 |
"""
Desafio 1 | Faça um programa que peça para o usuário digitar o nome e a idade de um aluno e o número de provas que esse aluno fez. Depois, o programa deve pedir para o usuário digitar as notas de cada prova do aluno. Ao final o programa deve imprimir uma lista contendo:
a. Nome do aluno na posição 0
b. Idade do aluno na posição 1
c. Uma lista com todas as notas na posição 2
d. A média do aluno na posição 3
e. True ou False, caso a média seja maior que 5 ou não, na posição 4
"""
nota = []
lista = []
nome = str(input("NOME: "))
lista.append(nome)
idade = int(input("IDADE: "))
lista.append(idade)
num_provas = int(input("NÚMERO DE PROVAS: "))
for c in range(1, num_provas + 1):
nota.append(float(input(f"NOTA{c} ")))
lista.append(nota)
soma = media = 0
for num in nota:
soma = soma + num
media = soma/len(nota)
lista.append(media)
if media > 5:
lista.append(True)
else:
lista.append(False)
print(lista)
| [
37811,
198,
5960,
1878,
952,
352,
930,
18350,
50041,
23781,
1430,
64,
8358,
613,
50041,
31215,
267,
514,
84,
6557,
27250,
3100,
7940,
267,
299,
462,
304,
257,
4686,
671,
390,
23781,
435,
36909,
304,
267,
299,
21356,
647,
78,
390,
899,... | 2.285366 | 410 |
import cv2 as cv
import numpy as np
import time
import get_eviz_url
import upload_file
if __name__ == '__main__':
obj = DetectVideo()
# detect_img(src="person.png")
# 获取地址 (萤石云产品序列号)
src_rtmp = get_eviz_url.get_url(serial_str='F77671789')
# 检测视频流
obj.detect_video(src=src_rtmp)
| [
11748,
269,
85,
17,
355,
269,
85,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
11748,
651,
62,
1990,
528,
62,
6371,
198,
11748,
9516,
62,
7753,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
19... | 1.8 | 170 |
"""
Initialization file for tweets library module.
These exist here in lib as some of them are useful as help functions of other
scripts (such as getting available campaigns). However, these could be moved to
utils/reporting/ as individual scripts. And they could be called directly or
with make, to avoid having multiple ways of calling something.
"""
| [
37811,
198,
24243,
1634,
2393,
329,
12665,
5888,
8265,
13,
198,
198,
4711,
2152,
994,
287,
9195,
355,
617,
286,
606,
389,
4465,
355,
1037,
5499,
286,
584,
198,
46521,
357,
10508,
355,
1972,
1695,
9964,
737,
2102,
11,
777,
714,
307,
... | 4.657895 | 76 |
# Data Parallel Control (dpctl)
#
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
| [
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
6060,
42945,
6779,
357,
26059,
34168,
8,
198,
2,
198,
2,
15069,
12131,
12,
1238,
2481,
8180,
10501,
198,
2,
198,
2,
4996... | 3.516129 | 186 |
# -*- coding: utf-8 -*-
import logging
from typing import List
from flask_appbuilder import ModelRestApi
logger = logging.getLogger(__name__)
class BaseRavenModelRestApi(ModelRestApi):
"""
Extends FAB's ModelResApi to implement specific raven generic functionality.
"""
csrf_exempt = False
add_columns: List[str]
edit_columns: List[str]
list_columns: List[str]
show_columns: List[str]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
18931,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
42903,
62,
1324,
38272,
1330,
9104,
19452,
32,
14415,
628,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
... | 2.694268 | 157 |
# -*- coding: utf-8 -*-
"""App version module."""
from .. import __version__
def get_version():
"""Return app version."""
return __version__
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
4677,
2196,
8265,
526,
15931,
198,
6738,
11485,
1330,
11593,
9641,
834,
628,
198,
4299,
651,
62,
9641,
33529,
198,
220,
220,
220,
37227,
13615,
598,
2196,
526,
15... | 2.849057 | 53 |
import collections
print(Customer('Dave', '123 Main').greeting())
| [
11748,
17268,
198,
198,
4798,
7,
44939,
10786,
27984,
3256,
705,
10163,
8774,
27691,
70,
2871,
278,
28955,
198
] | 3.526316 | 19 |
#!/usr/bin/env python3
from cc_pathlib.path import Path
from cc_pathlib.case import CaseCollation
from cc_pathlib.uniw import UniversalWriter
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
201,
198,
201,
198,
6738,
36624,
62,
6978,
8019,
13,
6978,
1330,
10644,
201,
198,
6738,
36624,
62,
6978,
8019,
13,
7442,
1330,
8913,
22667,
341,
201,
198,
6738,
36624,
62,
6978,
8019,
... | 2.814815 | 54 |
# coding:utf8
from threading import Thread, Event
from fastweb.util.log import recorder
class FThread(Thread):
"""封装系统thread,方便线程停止"""
_fthreads = []
def __init__(self, name, task, period=0, frequency=-1):
"""初始化线程
:parameter:
- `name`: 线程名
- `task`: 任务函数,线程名会作为参数传递给task
- `period`: 执行时间间隔
- `frequency`: 执行次数,-1为永远执行,默认为永远执行
"""
self._event = Event()
self._period = period
self._task = task
self._frequency = frequency
self._fthreads.append(self)
super(FThread, self).__init__(name=name)
def run(self):
"""运行函数,可以通过start开始线程,该函数会被自动调用"""
while not self._event.isSet() and self._frequency:
self._event.wait(self._period)
self._task(self)
self._frequency -= 1
def join(self, timeout=0):
"""结束当前线程"""
self._event.set()
Thread.join(self, timeout)
@staticmethod
def stop(timeout=None):
"""等待所有线程执行完毕并结束线程"""
for thread in FThread._fthreads:
thread.join(timeout)
| [
2,
19617,
25,
40477,
23,
198,
198,
6738,
4704,
278,
1330,
14122,
11,
8558,
198,
198,
6738,
3049,
12384,
13,
22602,
13,
6404,
1330,
38156,
628,
198,
4871,
376,
16818,
7,
16818,
2599,
198,
220,
220,
220,
37227,
22887,
223,
35318,
163,
... | 1.563202 | 712 |
from django.test import TestCase
from django.utils import timezone
from .models import Game, Platform, Genre
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
198,
6738,
764,
27530,
1330,
3776,
11,
19193,
11,
5215,
260,
198
] | 3.666667 | 30 |
# third party
import numpy as np
import pytest
# syft absolute
import syft as sy
from syft.core.adp.data_subject import DataSubject
from syft.core.tensor.autodp.phi_tensor import PhiTensor as PT
from syft.core.tensor.tensor import Tensor
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
def dims() -> int:
"""This generates a random integer for the number of dimensions in our testing tensors"""
dims = int(max(3, np.random.randint(10) + 3)) # Avoid size 0 and 1
# Failsafe
if dims < 2:
dims += 3
assert dims > 1, "Tensor not large enough for several tests."
return dims
@pytest.fixture
def reference_data(highest, dims) -> np.ndarray:
"""This generates random data to test the equality operators"""
reference_data = np.random.randint(
low=-highest, high=highest, size=(dims, dims), dtype=np.int32
)
assert dims > 1, "Tensor not large enough"
return reference_data
@pytest.fixture
def upper_bound(reference_data: np.ndarray, highest: int) -> np.ndarray:
"""This is used to specify the max_vals that is either binary or randomly generated b/w 0-1"""
max_values = np.ones_like(reference_data) * highest
return max_values
@pytest.fixture
def lower_bound(reference_data: np.ndarray, highest: int) -> np.ndarray:
"""This is used to specify the min_vals that is either binary or randomly generated b/w 0-1"""
min_values = np.ones_like(reference_data) * -highest
return min_values
@pytest.fixture
def reference_binary_data(dims: int) -> np.ndarray:
"""Generate binary data to test the equality operators with bools"""
binary_data = np.random.randint(2, size=(dims, dims))
return binary_data
def test_eq(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: DataSubject,
) -> None:
"""Test equality between two identical PhiTensors"""
reference_tensor = PT(
child=reference_data,
data_subjects=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
# Duplicate the tensor and check if equality holds
same_tensor = PT(
child=reference_data,
data_subjects=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
assert (
reference_tensor == same_tensor
).all(), "Equality between identical PTs fails"
def test_add_wrong_types(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: DataSubject,
) -> None:
"""Ensure that addition with incorrect types aren't supported"""
reference_tensor = PT(
child=reference_data,
data_subjects=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
with pytest.raises(NotImplementedError):
reference_tensor + "some string"
reference_tensor + dict()
# TODO: Double check how tuples behave during addition/subtraction with np.ndarrays
def test_add_tensor_types(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: DataSubject,
highest: int,
dims: int,
) -> None:
"""Test addition of a PT with various other kinds of Tensors"""
# TODO: Add tests for GammaTensor, etc when those are built out.
reference_tensor = PT(
child=reference_data,
data_subjects=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
simple_tensor = Tensor(
child=np.random.randint(
low=-highest, high=highest, size=(dims + 10, dims + 10), dtype=np.int64
)
)
with pytest.raises(NotImplementedError):
result = reference_tensor + simple_tensor
assert isinstance(result, PT), "PT + Tensor != PT"
assert (
result.max_vals == reference_tensor.max_vals + simple_tensor.child.max()
), "PT + Tensor: incorrect max_val"
assert (
result.min_vals == reference_tensor.min_vals + simple_tensor.child.min()
), "PT + Tensor: incorrect min_val"
def test_add_single_data_subjects(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: DataSubject,
) -> None:
"""Test the addition of PhiTensors"""
tensor1 = PT(
child=reference_data,
data_subjects=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
tensor2 = PT(
child=reference_data,
data_subjects=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
result = tensor2 + tensor1
assert isinstance(result, PT), "Addition of two PTs is wrong type"
assert (
result.max_vals == 2 * upper_bound
).all(), "Addition of two PTs results in incorrect max_val"
assert (
result.min_vals == 2 * lower_bound
).all(), "Addition of two PTs results in incorrect min_val"
# Try with negative values
tensor3 = PT(
child=reference_data * -1.5,
data_subjects=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
result = tensor3 + tensor1
assert isinstance(result, PT), "Addition of two PTs is wrong type"
assert (
result.max_vals == tensor3.max_vals + tensor1.max_vals
).all(), "PT + PT results in incorrect max_val"
assert (
result.min_vals == tensor3.min_vals + tensor1.min_vals
).all(), "PT + PT results in incorrect min_val"
def test_serde(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: DataSubject,
) -> None:
"""Test basic serde for PT"""
tensor1 = PT(
child=reference_data,
data_subjects=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
ser = sy.serialize(tensor1)
de = sy.deserialize(ser)
assert de == tensor1
assert (de.child == tensor1.child).all()
assert (de.min_vals == tensor1.min_vals).all()
assert (de.max_vals == tensor1.max_vals).all()
assert de.data_subjects == tensor1.data_subjects
assert np.shares_memory(tensor1.child.child, tensor1.child.child)
assert not np.shares_memory(de.child.child, tensor1.child.child)
def test_copy(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: DataSubject,
) -> None:
"""Test copy for PT"""
reference_tensor = PT(
child=reference_data,
data_subjects=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
# Copy the tensor and check if it works
copy_tensor = reference_tensor.copy()
assert (reference_tensor == copy_tensor).all(), "Copying of the PT fails"
def test_copy_with(
reference_data: np.ndarray,
reference_binary_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: DataSubject,
) -> None:
"""Test copy_with for PT"""
reference_tensor = PT(
child=reference_data,
data_subjects=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
reference_binary_tensor = PT(
child=reference_binary_data,
data_subjects=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
encode_func = reference_tensor.child.encode
# Copy the tensor and check if it works
copy_with_tensor = reference_tensor.copy_with(encode_func(reference_data))
copy_with_binary_tensor = reference_tensor.copy_with(
encode_func(reference_binary_data)
)
assert (
reference_tensor == copy_with_tensor
).all(), "Copying of the PT with the given child fails"
assert (
reference_binary_tensor == copy_with_binary_tensor
).all(), "Copying of the PT with the given child fails"
def test_ne_vals(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: DataSubject,
) -> None:
"""Test inequality between two different PhiTensors"""
# TODO: Add tests for GammaTensor when having same values but different entites.
reference_tensor = PT(
child=reference_data,
data_subjects=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
comparison_tensor = PT(
child=reference_data + 1,
data_subjects=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
assert (
reference_tensor != comparison_tensor
).all(), "Inequality between different PTs fails"
def test_neg(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: DataSubject,
) -> None:
"""Test neg for PT"""
reference_tensor = PT(
child=reference_data,
data_subjects=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
neg_tensor = reference_tensor.__neg__()
assert (neg_tensor.child == reference_tensor.child * -1).all()
assert (neg_tensor.min_vals == reference_tensor.max_vals * -1).all()
assert (neg_tensor.max_vals == reference_tensor.min_vals * -1).all()
assert neg_tensor.shape == reference_tensor.shape
| [
2,
2368,
2151,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
2,
827,
701,
4112,
198,
11748,
827,
701,
355,
827,
198,
6738,
827,
701,
13,
7295,
13,
324,
79,
13,
7890,
62,
32796,
1330,
6060,
19776,
198,
6738,
... | 2.418264 | 3,756 |
"""
configure.py
Takes user input file and returns settings namespace object
"""
import argparse
import pytraj # to support pytraj calls in input file
import mdtraj # to support mdtraj calls in the input file
import numpy # to support numpy calls in input file
import numpy as np # to support numpy calls even if called as np
import sys
import os
import shutil
import pickle
from jinja2 import Environment, FileSystemLoader
import typing
import pydantic
def configure(input_file, user_working_directory=''):
"""
Configure the settings namespace based on the config file.
Parameters
----------
input_file : str
Name of the configuration file to read
user_working_directory : str
User override for working directory (overrides value in input_file), ignored if set to ''
Returns
-------
settings : argparse.Namespace
Settings namespace object
"""
# Import config file line-by-line using exec()
try:
lines = open(input_file, 'r').readlines()
except FileNotFoundError:
try:
lines = open('atesa/' + input_file, 'r').readlines() # for testing
except:
lines = open(input_file, 'r').readlines() # to reproduce original error
line_index = 0
for line in lines: # each line in the input file is just python code setting a variable;
line_index += 1
try:
exec(line) # this means that comments are supported using '#' and whitespace is ignored.
except Exception as e:
raise ValueError('error raised while reading line ' + str(int(line_index)) + ' of configuration file '
+ input_file + ': ' + str(e))
# Define settings namespace to store all these variables
config_dict = {}
config_dict.update(locals())
settings = argparse.Namespace()
settings.__dict__.update(Settings(**config_dict))
# Override working directory if provided with user_working_directory
if user_working_directory:
settings.working_directory = user_working_directory
# Format directories properly (no trailing '/')
if settings.working_directory[-1] == '/':
settings.working_directory = settings.working_directory[:-1]
if settings.path_to_input_files[-1] == '/':
settings.path_to_input_files = settings.path_to_input_files[:-1]
if settings.path_to_templates[-1] == '/':
settings.path_to_templates = settings.path_to_templates[:-1]
if settings.storage_directory[-1] == '/':
settings.storage_directory = settings.storage_directory[:-1]
# Set Jinja2 environment
if os.path.exists(settings.path_to_templates):
settings.env = Environment(loader=FileSystemLoader(settings.path_to_templates))
else:
raise FileNotFoundError('could not locate templates folder: ' + settings.path_to_templates)
return settings
if __name__ == "__main__":
configure('','')
| [
37811,
198,
11250,
495,
13,
9078,
198,
51,
1124,
2836,
5128,
2393,
290,
5860,
6460,
25745,
2134,
198,
37811,
198,
198,
11748,
1822,
29572,
198,
11748,
12972,
9535,
73,
220,
220,
220,
220,
220,
220,
1303,
284,
1104,
12972,
9535,
73,
38... | 2.814745 | 1,058 |
# Arvore Binaria de Busca em Python
# Kelvin Salton do Prado
# 2015
############# Metodos de Busca #############
############################################
############ Metodo de Insercao ############
############################################
########### Metodos de Impressao ###########
ImprimeArvore = ''
############################################
######### Acha a Altura da Arvore ##########
############################################
########### Metodos de Exclusao ############
############################################
arvore = Arvore(3); # Cria arvore (raiz)
# Insere varios valores na arvore
arvore = insere(arvore, 2);
arvore = insere(arvore, 1);
arvore = insere(arvore, 4);
arvore = insere(arvore, 6);
arvore = insere(arvore, 8);
arvore = insere(arvore, 5);
arvore = insere(arvore, 7);
arvore = insere(arvore, 0);
buscaRecursiva(arvore, 6) # Busca que imprime na propria funcao
if buscaLinear(arvore, 6) is not None: # Retorna o NO ou None se nao encontrou
print 'valor encontrado\n'
else:
print 'valor nao encontrado\n'
print 'Altura : %d' % altura(arvore) # Retorna a altura da arvore (int)
# Exclui varios valores
exclui(arvore, 7)
exclui(arvore, 5)
exclui(arvore, 8)
exclui(arvore, 3)
# Chama os metodos de impressao
ImprimeArvore = ""
preOrdem(arvore)
print "PreOrdem: " + ImprimeArvore + "\n"
ImprimeArvore = ""
emOrdem(arvore)
print "EmOrdem: " + ImprimeArvore + "\n"
ImprimeArvore = ""
posOrdem(arvore)
print "PosOrdem: " + ImprimeArvore + "\n"
# Mostra a altura da arvore apos remover os itens
print 'Altura : %d' % altura(arvore)
| [
201,
198,
2,
943,
85,
382,
20828,
10312,
390,
5869,
6888,
795,
11361,
201,
198,
2,
46577,
4849,
1122,
466,
1736,
4533,
201,
198,
2,
1853,
201,
198,
201,
198,
7804,
4242,
2,
3395,
375,
418,
390,
5869,
6888,
1303,
7804,
4242,
201,
1... | 2.437685 | 674 |
"""
Use the turtle module to draw a tree recursively.
"""
import turtle
from random import randrange
main()
| [
37811,
198,
11041,
262,
28699,
8265,
284,
3197,
257,
5509,
664,
1834,
2280,
13,
198,
37811,
628,
198,
11748,
28699,
198,
6738,
4738,
1330,
43720,
9521,
628,
628,
198,
12417,
3419,
198
] | 3.5625 | 32 |
#!/usr/bin/env python
"""An ivyprobe script for ivy-python
"""
import getopt
import os
import signal
import sys
import time
from ivy.std_api import *
try:
import readline
except ImportError:
pass
# The next line taken from https://pythonhosted.org/six/
# Copyright (c) 2010-2016 Benjamin Peterson
PY2 = sys.version_info[0] == 2
IVYAPPNAME = 'pyivyprobe'
on_die_accepted = False
if __name__ == '__main__':
from ivy.ivy import ivylogger
import logging
ivybus = ''
readymsg = '[%s is ready]' % IVYAPPNAME
verbose = 0
showbind = False
toggle_showbind = False
ivylogger.setLevel(logging.WARN)
try:
optlist, left_args = \
getopt.getopt(sys.argv[1:],
'hb:n:Vvs',
['help', 'ivybus=', 'name=', 'version', 'verbose',
'show-bindings'])
except getopt.GetoptError:
usage(sys.argv[0])
sys.exit(2)
for opt, arg in optlist:
if opt in ('-h', '--help'):
usage(sys.argv[0])
sys.exit()
elif opt in ('-b', '--ivybus'):
ivybus = arg
elif opt in ('-V', '--version'):
import ivy
info('ivyprobe supplied with ivy-python library version%s',
ivy.__version__)
sys.exit()
elif opt in ('-v', '--verbose'):
if not verbose:
ivylogger.setLevel(logging.INFO)
verbose += 1
elif verbose == 1:
ivylogger.setLevel(logging.DEBUG)
verbose += 1
else:
if hasattr(logging, 'TRACE'):
ivylogger.setLevel(logging.TRACE)
elif opt in ('-n', '--name'):
IVYAPPNAME = arg
elif opt in ('-s', '--showbind'):
showbind = True
info('Broadcasting on %s',
ivybus or os.environ.get('IVYBUS') or 'ivydefault')
# initialising the bus
IvyInit(IVYAPPNAME, # application name for Ivy
readymsg, # ready message
0, # parameter ignored
on_connection_change, # handler called on connection/disconnection
on_die) # handler called when a die msg is received
_set_showbind(showbind)
# starting the bus
IvyStart(ivybus)
# bind the supplied regexps
for regexp in left_args:
IvyBindMsg(on_msg, regexp)
# direct msg
IvyBindDirectMsg(on_direct_msg)
# pong
IvyBindPong(on_pong)
# Ok, time to go
time.sleep(0.5)
info('Go ahead! (type .help for help on commands)')
while 1:
if toggle_showbind:
toggle_showbind = False
showbind = not showbind
_set_showbind(showbind)
try:
if PY2:
msg = raw_input('')
else:
msg = input('')
except (EOFError, KeyboardInterrupt):
msg = '.quit'
if msg == '.help':
info("""Available commands:
.bind 'regexp' - add a msg to receive. The displayed index
can be supplied to .remove
.die appname - send die msg to appname
.dieall-yes-i-am-sure - send die msg to all applications
.direct appname id arg - send direct msg to appname
.help - print this message
.error appname id err_msg - send an error msg to an appname
.ping appname - send a ping to an appname
.quit - terminate this application
.remove idx - remove a binding (see .bind, .regexps)
.regexps - show current bindings
.regexps appname - show all bindings registered for appname
.showbind - show/hide bindings (toggle)
.where appname - print the host for appname
.who - display the names of all applications on
the bus
Everything that is not a (valid) command is interpreted as a message and sent to the appropriate applications on the bus.
""")
elif msg[:5] == '.bind':
regexp = msg[6:]
if not regexp:
print('Error: missing argument')
info('Bound regexp, id: %d', IvyBindMsg(on_msg, regexp))
elif msg == '.die-all-yes-i-am-sure':
app_names = IvyGetApplicationList()
if not app_names:
info('No application on the bus')
continue
for app_name in IvyGetApplicationList():
app = IvyGetApplication(app_name)
if not app:
info('No application %s' % app_name)
else:
IvySendDieMsg(app)
elif msg[:4] == '.die':
app_name = msg[5:]
app = IvyGetApplication(app_name)
if app is None:
info('No application named %s', app_name)
continue
IvySendDieMsg(app)
elif msg[:7] == '.direct':
try:
app_name, num = msg[8:].split()[:2]
arg = ' '.join(msg[8:].split()[2:])
if not arg:
raise ValueError
except ValueError:
print('Error: wrong number of parameters')
continue
app = IvyGetApplication(app_name)
if app is None:
info('No application named %s', app_name)
continue
IvySendDirectMsg(app, num, arg)
elif msg[:6] == '.error':
try:
app_name, num = msg[7:].split()[:2]
err_msg = ' '.join(msg[7:].split()[2:])
if not err_msg:
raise ValueError
except ValueError:
print('Error: wrong number of parameters')
continue
app = IvyGetApplication(app_name)
if app is None:
info('No application named %s', app_name)
continue
IvySendError(app, num, err_msg)
elif msg[:7] == '.remove':
try:
regexp_id = int(msg[8:])
info('Removed %d:%s', regexp_id, IvyUnbindMsg(regexp_id))
except KeyError:
info('No such binding')
except ValueError:
info('Error: expected an integer')
elif msg[:5] == '.ping':
app_name = msg[6:]
app = IvyGetApplication(app_name)
if app is None:
info("No application named '%s'", app_name)
continue
IvySendPing(app)
info('Sent PING')
elif msg == '.regexps':
from ivy import std_api
info('Our subscriptions: %s',
', '.join(["%s:'%s'" % (_id, regexp) for _id, regexp in std_api._IvyServer.get_subscriptions()]))
elif msg[:9] == '.regexps ':
app_name = msg[9:]
app = IvyGetApplication(app_name)
if app is None:
info("Error: no application found with name '%s'"%app_name)
else:
info('Subscriptions for %s: %s',
app_name, ', '.join(["%s:'%s'" % (_id, regexp) for _id, regexp in IvyGetApplicationMessages(app)]))
elif msg[:9] == '.showbind':
toggle_showbind = True
elif msg == '.quit':
# Do not IvyStop if we were already notified that the
# agent is about to die
if not on_die_accepted:
IvyStop()
break
elif msg[:6] == '.where':
app_name = msg[7:]
app = IvyGetApplication(app_name)
if app is None:
info('No application named %s', app)
continue
info('Application %s on %s:%s', app_name, app.ip, app.port)
elif msg == '.who':
print(IvyGetApplicationList())
else:
info('Sent to %s peers' % IvySendMsg(msg))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
2025,
21628,
88,
1676,
1350,
4226,
329,
21628,
88,
12,
29412,
198,
37811,
198,
11748,
651,
8738,
198,
11748,
28686,
198,
11748,
6737,
198,
11748,
25064,
198,
11748,
640,
198,
198,... | 1.8794 | 4,403 |
# ----------------------------------------------------------------------------
# Title: Scientific Visualisation - Python & Matplotlib
# Author: Nicolas P. Rougier
# License: BSD
# ----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import PolyCollection
# Data processing
V, F = [], []
with open("bunny.obj") as f:
for line in f.readlines():
if line.startswith("#"):
continue
values = line.split()
if not values:
continue
if values[0] == "v":
V.append([float(x) for x in values[1:4]])
elif values[0] == "f":
F.append([int(x) for x in values[1:4]])
V, F = np.array(V), np.array(F) - 1
V = (V - (V.max(0) + V.min(0)) / 2) / max(V.max(0) - V.min(0))
model = xrotate(20) @ yrotate(45)
view = translate(0, 0, -3.5)
proj = perspective(25, 1, 1, 100)
MVP = proj @ view @ model
V = np.c_[V, np.ones(len(V))] @ MVP.T
V /= V[:, 3].reshape(-1, 1)
V = V[F]
T = V[:, :, :2]
Z = -V[:, :, 2].mean(axis=1)
zmin, zmax = Z.min(), Z.max()
Z = (Z - zmin) / (zmax - zmin)
C = plt.get_cmap("magma")(Z)
I = np.argsort(Z)
T, C = T[I, :], C[I, :]
# Rendering
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0, 0, 1, 1], xlim=[-1, +1], ylim=[-1, +1], aspect=1, frameon=False)
collection = PolyCollection(
T, closed=True, linewidth=0.1, facecolor=C, edgecolor="black"
)
ax.add_collection(collection)
plt.savefig("../../figures/threed/bunny-8.pdf")
plt.show()
| [
2,
16529,
10541,
198,
2,
11851,
25,
220,
220,
22060,
15612,
5612,
532,
11361,
1222,
6550,
29487,
8019,
198,
2,
6434,
25,
220,
29737,
350,
13,
13876,
70,
959,
198,
2,
13789,
25,
347,
10305,
198,
2,
16529,
10541,
198,
11748,
299,
3215... | 2.292467 | 677 |
#!/usr/bin/env python
# -*- encoding: utf-8
"""
On sites where I don't want to use my standard username (@alexwlchan) --
for example, if I'm starring content but not creating anything -- I create
alliterative usernames from the names provided by Docker.
e.g. on GitHub I might use "goofy_galileo"
This script generates an alliterative username for me.
Usage: pass a single letter as first argument, and it offers five suggestions:
$ ./usernames.py a
angry_almeida
admiring_ardinghelli
admiring_austin
amazing_aryabhata
admiring_albattani
"""
import os
import random
import sys
import tempfile
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
198,
37811,
198,
2202,
5043,
810,
314,
836,
470,
765,
284,
779,
616,
3210,
20579,
4275,
1000,
87,
40989,
3147,
8,
1377,
198,
1640,
1672,... | 3 | 256 |
#!/usr/bin/env python
domain_name = os.environ['DOMAIN_NAME']
admin_server_listen_address = os.environ['ADMIN_SERVER_LISTEN_ADDRESS']
admin_server_listen_port = os.environ['ADMIN_SERVER_LISTEN_PORT']
admin_username = os.environ['ADMIN_USERNAME']
admin_password = os.environ['ADMIN_PASSWORD']
######################################################################
######################################################################
admin_server_url = 't3://' + admin_server_listen_address + ':' + admin_server_listen_port
connect(admin_username, admin_password, admin_server_url)
edit()
startEdit()
set_domain_web_app(domain_name)
save()
activate()
exit()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
27830,
62,
3672,
796,
28686,
13,
268,
2268,
17816,
39170,
29833,
62,
20608,
20520,
198,
28482,
62,
15388,
62,
4868,
268,
62,
21975,
796,
28686,
13,
268,
2268,
17816,
2885,
23678,
... | 3.227053 | 207 |
import argparse
PSSTGUY_TEMPLATE = """
┻┳|
┻┳|
┳┻|
┳┻|
┻┳|
┳┻|
┻┳|
┳┻| _
┻┳|•.•) {SECRET_TXT}
┳┻|⊂ノ
┻┳|
"""
if __name__ == '__main__':
cli()
| [
11748,
1822,
29572,
628,
198,
3705,
2257,
38022,
56,
62,
51,
3620,
6489,
6158,
796,
37227,
198,
6552,
119,
6552,
111,
91,
220,
198,
6552,
119,
6552,
111,
91,
220,
198,
6552,
111,
6552,
119,
91,
220,
198,
6552,
111,
6552,
119,
91,
... | 1.205882 | 136 |
# hook gets lauches from an api
import os
import pathlib
import subprocess
import re
import time
import posixpath
import requests
import airflow
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
from airflow.utils import apply_defaults
from airflow.utils.log.logging_mixin import LoggingMixin
class LaunchHook(BaseHook, LoggingMixin):
"""
"""
template_fields = ('_query', '_destination')
@apply_defaults
| [
2,
8011,
3011,
300,
559,
2052,
422,
281,
40391,
198,
198,
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
850,
14681,
198,
11748,
302,
198,
11748,
640,
198,
198,
11748,
1426,
844,
6978,
198,
11748,
7007,
198,
11748,
45771,
198,
6738,
... | 3.263889 | 144 |
__author__ = 'Robert Meyer'
from pypet import Trajectory, pypetexceptions, PickleResult
from pypet.tests.testutils.data import TrajectoryComparator
from pypet.tests.testutils.ioutils import make_temp_dir, run_suite, parse_args
import pypet.pypetexceptions as pex
if __name__ == '__main__':
opt_args = parse_args()
run_suite(**opt_args) | [
834,
9800,
834,
796,
705,
19156,
23975,
6,
198,
198,
6738,
279,
4464,
316,
1330,
4759,
752,
652,
11,
279,
4464,
316,
1069,
11755,
11,
12346,
293,
23004,
198,
6738,
279,
4464,
316,
13,
41989,
13,
9288,
26791,
13,
7890,
1330,
4759,
75... | 2.724409 | 127 |
import torch
import torch.nn as nn
import torch.nn.init as init
import numpy as np
import pandas as pd
import torchvision
import cv2
from torchvision import transforms, utils, models
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader, ConcatDataset
import torchvision.models as models
from os import walk
import torch.optim as optim
import torch.nn.functional as F
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from torch.utils.tensorboard import SummaryWriter
import argparse
from datetime import datetime
from Tooth_Dataset import ToothDataset
from data_load import tooth_data_load, other_tooth_data_load
learning_rate = 0.001
num_epochs = 30
num_classes = 2
writer = SummaryWriter('log')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tooth NETWORK TRAINER AND TESTER')
parser.add_argument("--path", default="model4_multi.pth", type=str, help="Frozen model path")
parser.add_argument("--mode", default="train",type=str, help="eval or train")
parser.add_argument("--lr", default=0.001, type=float, help="Learning Rate")
parser.add_argument("--ne", default=100,type=int, help="Number of Epochs")
parser.add_argument("--ft", default="Fine", type=str, help="Fine Tuning")
args = parser.parse_args()
learning_rate = args.lr
num_epochs = args.ne
ft = args.ft
print("-- Sequence Started --")
train_data, val_data = tooth_data_load()
#feats = extract_features("Alexnet",train_data)
#, "alexnet", ,
nets = ["alexnet","resnet", "squeezenet", "densenet","mobilenet_v2", "shufflenet"]
for nt in nets:
if ft == "Fine":
model,lastlay = initialize_model(nt, num_classes, False, False)
model = model.cuda()
print("********"+ nt + " ***Training is started*********")
net = train(train_data, model,nt)
validation(net,val_data, nt)
feats, ids, tooths = extract_features(net,val_data,lastlay)
feats.to_csv(nt+"_"+"feats.csv")
ids.to_csv(nt+"_"+"ids.csv")
tooths.to_csv(nt+"_"+"tooths.csv")
else:
print("********* FineTune on proccess")
model,lastlay = initialize_model(nt, num_classes, False, True)
net = model.cuda()
feats, ids, tooths = extract_features(net,val_data,lastlay)
feats.to_csv(nt+"_"+"feats_withoudfine.csv")
ids.to_csv(nt+"_"+"ids_withoudfine.csv")
tooths.to_csv(nt+"_"+"tooths_withoudfine.csv")
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
15003,
355,
2315,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28034,
10178,
198,
11748,
269,
85,
17,
... | 2.574642 | 978 |
import random as r
from pprint import pprint
N = r.randint(2,10)
taken = [False for i in range(N)]
areFriends = setFriends(N)
pprint(setFriends(5))
print(N)
print(countParing(taken)) | [
11748,
4738,
355,
374,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
198,
45,
796,
374,
13,
25192,
600,
7,
17,
11,
940,
8,
198,
83,
1685,
796,
685,
25101,
329,
1312,
287,
2837,
7,
45,
15437,
198,
198,
533,
36705,
796,
900,
36705,
... | 2.569444 | 72 |
import os
import logging
from subprocess import call as system_call
if os.name == "nt":
else:
def find_sensitive_path(insensitive_path, dir=None):
"""
Borrowed from https://stackoverflow.com/a/37708342
Returns a case-sensitive filepath when given a case-insensitive path
"""
if dir is None:
dir = os.getcwd()
insensitive_path = insensitive_path.strip(os.path.sep)
parts = insensitive_path.split(os.path.sep)
next_name = parts[0]
for name in os.listdir(dir):
if next_name.lower() == name.lower():
improved_path = os.path.join(dir, name)
if len(parts) == 1:
return improved_path
else:
return find_sensitive_path(os.path.sep.join(parts[1:]), improved_path)
# os.path.exists returns False when given an empty string, so...
return ""
| [
11748,
28686,
198,
11748,
18931,
198,
6738,
850,
14681,
1330,
869,
355,
1080,
62,
13345,
628,
198,
198,
361,
28686,
13,
3672,
6624,
366,
429,
1298,
628,
198,
17772,
25,
628,
198,
4299,
1064,
62,
30176,
62,
6978,
7,
1040,
18464,
62,
... | 2.415042 | 359 |
#----------------------------------------------------------
# Imports
#----------------------------------------------------------
import pytest
#----------------------------------------------------------
# Simple test function
#----------------------------------------------------------
#----------------------------------------------------------
# A function to show assertion introspection
#----------------------------------------------------------
#----------------------------------------------------------
# A function that verifies an exception
#----------------------------------------------------------
| [
201,
198,
2,
43801,
438,
201,
198,
2,
1846,
3742,
201,
198,
2,
43801,
438,
201,
198,
201,
198,
11748,
12972,
9288,
201,
198,
201,
198,
2,
43801,
438,
201,
198,
2,
17427,
1332,
2163,
201,
198,
2,
43801,
438,
201,
198,
201,
198,
2... | 7.238636 | 88 |
from time import sleep
from pytest import fail
def poll_result(fn, result_test_fn, *, max_checks=5, interval=1):
"""Polling utility for cases where we need to wait for BigchainDB
processing. After 'max_checks' attempts, will fail the test with the
last result.
Args:
fn (func): polling function to invoke
result_test_fn (func): test function to validate the result of
the polling function; return true if the result is valid and
can be returned
max_checks (int): maximum poll attempts before failing test
interval (num): interval between each poll attempt
Returns:
(any): the result of 'fn' if it passed validation
"""
for _ in range(max_checks):
try:
result = fn()
except Exception:
# Just fail this polling instance and try again
pass
else:
if result_test_fn(result):
return result
sleep(interval)
fail("Polling result failed with result: '{}'".format(result))
| [
6738,
640,
1330,
3993,
198,
198,
6738,
12972,
9288,
1330,
2038,
628,
628,
198,
4299,
3278,
62,
20274,
7,
22184,
11,
1255,
62,
9288,
62,
22184,
11,
1635,
11,
3509,
62,
42116,
28,
20,
11,
16654,
28,
16,
2599,
198,
220,
220,
220,
372... | 2.605392 | 408 |
from requests import Session
import xml.etree.ElementTree as ET
import json
from ..tools import (xmltools, log)
from outpost24hiabclient.clients.hiabclient import HiabClient
from ..entities.scanner import Scanner
| [
6738,
7007,
1330,
23575,
201,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
201,
198,
11748,
33918,
201,
198,
6738,
11485,
31391,
1330,
357,
87,
76,
2528,
10141,
11,
2604,
8,
201,
198,
6738,
38327,
1731,
5303,
397,
16... | 3.054054 | 74 |
import unittest
from social_networks.twitter_controller.twitter_api import *
@unittest.skip
# @unittest.skip
# @unittest.skip
@unittest.skip
| [
11748,
555,
715,
395,
198,
6738,
1919,
62,
3262,
5225,
13,
6956,
62,
36500,
13,
6956,
62,
15042,
1330,
1635,
628,
198,
31,
403,
715,
395,
13,
48267,
628,
198,
198,
2,
2488,
403,
715,
395,
13,
48267,
198,
198,
2,
2488,
403,
715,
... | 2.660714 | 56 |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import pyro.distributions as dist
from .reparam import Reparam
class LinearHMMReparam(Reparam):
"""
Auxiliary variable reparameterizer for
:class:`~pyro.distributions.LinearHMM` random variables.
This defers to component reparameterizers to create auxiliary random
variables conditioned on which the process becomes a
:class:`~pyro.distributions.GaussianHMM` . If the ``observation_dist`` is a
:class:`~pyro.distributions.TransformedDistribution` this reorders those
transforms so that the result is a
:class:`~pyro.distributions.TransformedDistribution` of
:class:`~pyro.distributions.GaussianHMM` .
This is useful for training the parameters of a
:class:`~pyro.distributions.LinearHMM` distribution, whose
:meth:`~pyro.distributions.LinearHMM.log_prob` method is undefined. To
perform inference in the presence of non-Gaussian factors such as
:meth:`~pyro.distributions.Stable`, :meth:`~pyro.distributions.StudentT` or
:meth:`~pyro.distributions.LogNormal` , configure with
:class:`~pyro.infer.reparam.studentt.StudentTReparam` ,
:class:`~pyro.infer.reparam.stable.StableReparam` ,
:class:`~pyro.infer.reparam.stable.SymmetricStableReparam` , etc. component
reparameterizers for ``init``, ``trans``, and ``scale``. For example::
hmm = LinearHMM(
init_dist=Stable(1,0,1,0).expand([2]).to_event(1),
trans_matrix=torch.eye(2),
trans_dist=MultivariateNormal(torch.zeros(2), torch.eye(2)),
obs_matrix=torch.eye(2),
obs_dist=TransformedDistribution(
Stable(1.5,-0.5,1.0).expand([2]).to_event(1),
ExpTransform()))
rep = LinearHMMReparam(init=SymmetricStableReparam(),
obs=StableReparam())
with poutine.reparam(config={"hmm": rep}):
pyro.sample("hmm", hmm, obs=data)
:param init: Optional reparameterizer for the initial distribution.
:type init: ~pyro.infer.reparam.reparam.Reparam
:param trans: Optional reparameterizer for the transition distribution.
:type trans: ~pyro.infer.reparam.reparam.Reparam
:param obs: Optional reparameterizer for the observation distribution.
:type obs: ~pyro.infer.reparam.reparam.Reparam
"""
| [
2,
15069,
25767,
669,
284,
262,
44954,
1628,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
11748,
12972,
305,
13,
17080,
2455,
507,
355,
1233,
198,
198,
6738,
764,
260,
17143,
1330,
1432,
... | 2.48488 | 959 |
import torch
import torch.nn as nn
import torch.nn.functional as F
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
628,
220,
220,
220,
220,
198
] | 2.92 | 25 |
from setuptools import find_packages, setup
setup(
name="mapupdater",
packages=find_packages() + ["twisted.plugins"],
install_requires=[
"autobahn == 0.13.0",
"twisted >= 15.0.0",
"treq",
"bs4",
"html5lib",
"service_identity >= 14.0.0"
],
include_package_data=True
)
| [
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
628,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
8899,
929,
67,
729,
1600,
198,
220,
220,
220,
10392,
28,
19796,
62,
43789,
3419,
1343,
14631,
4246,
6347,
13,
37390,
... | 2.060976 | 164 |
"""
:Created: 18 September 2016
:Author: Lucas Connors
"""
from django.apps import apps
from django.contrib.auth.models import User
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
from django.test import TransactionTestCase
from django.utils.text import slugify
from pigeon.test import RenderTestCase
from basecategory.models import Platform
from games.models import Game, GameCategory, GameVideo
from productions.models import Production, ProductionCategory
from software.models import Software, SoftwareButton
class MigrationTestCase(TransactionTestCase):
"""
Ref: https://www.caktusgroup.com/blog/2016/02/02/writing-unit-tests-django-migrations/
"""
migrate_from = None
migrate_to = None
@property
| [
37811,
198,
25,
41972,
25,
1248,
2693,
1584,
198,
25,
13838,
25,
15257,
20776,
669,
198,
198,
37811,
198,
198,
6738,
42625,
14208,
13,
18211,
1330,
6725,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
... | 3.493274 | 223 |
# The power set of a set is the set of all its subsets.
# Write a function that, given a set, generates its power set.
# For example, given the set {1, 2, 3},
# it should return {{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}}.
import math
if __name__ == "__main__":
inp = [1, 2, 3]
print "Power set:", power_set(inp, len(inp))
| [
2,
383,
1176,
900,
286,
257,
900,
318,
262,
900,
286,
477,
663,
6352,
1039,
13,
198,
2,
19430,
257,
2163,
326,
11,
1813,
257,
900,
11,
18616,
663,
1176,
900,
13,
198,
2,
1114,
1672,
11,
1813,
262,
900,
1391,
16,
11,
362,
11,
5... | 2.471429 | 140 |
#!/usr/bin/env python3
# -*- coding: ascii -*-
from __future__ import division, with_statement
from setuptools import setup, find_packages
import sys
import io
version = '1.3.0'
author = "Omoto Kenji"
license = "MIT License"
author_email = 'doloopwhile@gmail.com'
with io.open('README.rst', encoding='ascii') as fp:
long_description = fp.read()
install_requires = ["six"]
if sys.version_info < (2, 7):
install_requires = "argparse ordereddict".split()
setup(
packages=find_packages(),
include_package_data=True,
name='PyExecJS',
version=version,
description='Run JavaScript code from Python',
long_description=long_description,
author=author,
author_email=author_email,
url='https://github.com/doloopwhile/PyExecJS',
license=license,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: JavaScript',
],
install_requires=install_requires,
test_suite="test_execjs",
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
355,
979,
72,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
7297,
11,
351,
62,
26090,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,... | 2.716698 | 533 |
from __future__ import unicode_literals
from django.test import TestCase
from djangocms_baseplugins.baseplugin.tests.base import BasePluginTestCase
from djangocms_baseplugins.image.cms_plugins import ImagePlugin
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
6738,
42625,
648,
420,
907,
62,
8692,
37390,
13,
8692,
33803,
13,
41989,
13,
8692,
1330,
7308,
37233,
14402... | 3.52459 | 61 |
import argparse
import requests
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
# Get command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--county", help="county name - default: Cork", default="Cork")
parser.add_argument("--days", help="number of days to display data for - default: 30", type=int, default="30")
args = parser.parse_args()
# Get latest county date in csv format
filename = 'covid-ie-counties.csv'
data_url = 'https://opendata-geohive.hub.arcgis.com/datasets/d9be85b30d7748b5b7c09450b8aede63_0.csv?outSR=%7B%22latestWkid%22%3A3857%2C%22wkid%22%3A102100%7D'
data_content = requests.get(data_url).content
csv_file = open(filename, 'wb')
csv_file.write(data_content)
csv_file.close()
# Filter data by county and number of days
df = pd.read_csv(filename)
df = df.loc[df['CountyName'] == args.county].tail(args.days)
# Converting time stamp to a to datetime e.g. 2020/03/22 00:00:0
df['TimeStamp'] = pd.to_datetime(df['TimeStamp'], format='%Y/%m/%d %H:%M:%S')
# Bar Chart - Cumulative Confirmed Cases By County
plt.bar(df["TimeStamp"], df["ConfirmedCovidCases"])
plt.xlabel('Date')
plt.ylabel('Total Cases')
plt.title('Cumulative Confirmed Cases for ' + args.county + ' for last ' + str(args.days) + ' days')
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d-%b'))
plt.gcf().autofmt_xdate()
plt.show()
# Bar Chart - New Confirmed Cases By County
plt.bar(df["TimeStamp"], df["ConfirmedCovidCases"].diff())
plt.xlabel('Date')
plt.ylabel('Confirmed Cases')
plt.title('New Confirmed Cases for ' + args.county + ' for last ' + str(args.days) + ' days')
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d-%b'))
plt.gcf().autofmt_xdate()
plt.show()
| [
11748,
1822,
29572,
198,
11748,
7007,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
19581,
355,
285,
19581,
198,
198,
2,
3497,
3141,
1627,
7159,
... | 2.572271 | 678 |
import numpy as np
def compute_fans(shape):
"""
Taken from Keras compute_fans
"""
if len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
elif len(shape) in {3, 4, 5}:
receptive_field_size = np.prod(shape[:-2])
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
else:
# No specific assumptions.
fan_in = np.sqrt(np.prod(shape))
fan_out = np.sqrt(np.prod(shape))
return fan_in, fan_out
| [
11748,
299,
32152,
355,
45941,
198,
198,
4299,
24061,
62,
69,
504,
7,
43358,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
30222,
422,
17337,
292,
24061,
62,
69,
504,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
611,
18896,... | 2.060241 | 249 |
"""Discover plugins."""
from functools import partial
from backuppy.location import PathSource, PathTarget, SshTarget, FirstAvailableTarget
from backuppy.notifier import NotifySendNotifier, CommandNotifier, FileNotifier, StdioNotifier
def _new(available_plugin_types, configuration, plugin_type, plugin_configuration_data=None):
"""Create a new plugin instance.
:param available_plugin_types: Iterable
:param configuration: Configuration
:param plugin_type: str
:param plugin_configuration_data: Dict
:return: Any
:raise: ValueError
"""
if plugin_type not in available_plugin_types:
raise ValueError('`Type must be one of the following: %s, but `%s` was given.' % (
', '.join(available_plugin_types.keys()), plugin_type))
return available_plugin_types[plugin_type](configuration, plugin_configuration_data)
def _new_path_location_from_configuration_data(cls, configuration, configuration_data):
"""Parse configuration from raw, built-in types such as dictionaries, lists, and scalars.
:param configuration: Configuration
:param configuration_data: dict
:return: cls
:raise: ValueError
"""
if 'path' not in configuration_data:
raise ValueError('`path` is required.')
path_data = configuration_data['path']
if '/' != path_data[0]:
path_data = '%s/%s' % (configuration.working_directory, path_data)
path = path_data
return cls(configuration.logger, configuration.notifier, path)
def _discover_source_types():
"""Discover the available source types.
:return: Dict
"""
return {
'path': partial(_new_path_location_from_configuration_data, PathSource),
}
new_source = partial(_new, _discover_source_types())
def _new_ssh_target_from_configuration_data(configuration, configuration_data):
"""Parse configuration from raw, built-in types such as dictionaries, lists, and scalars.
:param notifier: Notifier
:param configuration_data: dict
:return: cls
:raise: ValueError
"""
kwargs = {}
required_string_names = ('user', 'host', 'path')
for required_string_name in required_string_names:
if required_string_name not in configuration_data:
raise ValueError('`%s` is required.' % required_string_name)
kwargs[required_string_name] = configuration_data[required_string_name]
if 'port' in configuration_data:
if configuration_data['port'] < 0 or configuration_data['port'] > 65535:
raise ValueError(
'`port` must be an integer ranging from 0 to 65535.')
kwargs['port'] = configuration_data['port']
return SshTarget(configuration.notifier, interactive=configuration.interactive, **kwargs)
def _new_first_available_target_from_configuration_data(configuration, configuration_data):
"""Parse configuration from raw, built-in types such as dictionaries, lists, and scalars.
:param configuration: Configuration
:param configuration_data: dict
:return: cls
:raise: ValueError
"""
targets = []
for target_configuration_data in configuration_data['targets']:
target_configuration_data.setdefault('configuration')
targets.append(
new_target(configuration, target_configuration_data['type'], target_configuration_data['configuration']))
return FirstAvailableTarget(targets)
def _discover_target_types():
"""Discover the available target types.
:return: Dict
"""
return {
'path': partial(_new_path_location_from_configuration_data, PathTarget),
'ssh': _new_ssh_target_from_configuration_data,
'first_available': _new_first_available_target_from_configuration_data,
}
new_target = partial(_new, _discover_target_types())
def _new_command_notifier_from_configuration_data(configuration, configuration_data):
"""Parse configuration from raw, built-in types such as dictionaries, lists, and scalars.
:param configuration: Configuration
:param configuration_data: dict
:return: CommandNotifier
:raise: ValueError
"""
state_args = configuration_data['state'] if 'state' in configuration_data else None
inform_args = configuration_data['inform'] if 'inform' in configuration_data else None
confirm_args = configuration_data['confirm'] if 'confirm' in configuration_data else None
alert_args = configuration_data['alert'] if 'alert' in configuration_data else None
fallback_args = configuration_data['fallback'] if 'fallback' in configuration_data else None
if None in [state_args, inform_args, confirm_args, alert_args] and fallback_args is None:
raise ValueError(
'`fallback` must be given if one or more of the other arguments are omitted.')
return CommandNotifier(state_args, inform_args, confirm_args, alert_args, fallback_args)
def _new_file_notifier_from_configuration_data(configuration, configuration_data):
"""Parse configuration from raw, built-in types such as dictionaries, lists, and scalars.
:param configuration: Configuration
:param configuration_data: dict
:return: CommandNotifier
:raise: ValueError
"""
state_file = open(
configuration_data['state'], mode='a+t') if 'state' in configuration_data else None
inform_file = open(
configuration_data['inform'], mode='a+t') if 'inform' in configuration_data else None
confirm_file = open(
configuration_data['confirm'], mode='a+t') if 'confirm' in configuration_data else None
alert_file = open(
configuration_data['alert'], mode='a+t') if 'alert' in configuration_data else None
fallback_file = open(
configuration_data['fallback'], mode='a+t') if 'fallback' in configuration_data else None
if None in [state_file, inform_file, confirm_file, alert_file] and fallback_file is None:
raise ValueError(
'`fallback` must be given if one or more of the other arguments are omitted.')
return FileNotifier(state_file, inform_file, confirm_file, alert_file, fallback_file)
def _discover_notifier_types():
"""Discover the available notifier types.
:return: Dict
"""
return {
'notify-send': lambda configuration, configuration_data: NotifySendNotifier(),
'command': _new_command_notifier_from_configuration_data,
'stdio': lambda configuration, configuration_data: StdioNotifier(),
'file': _new_file_notifier_from_configuration_data,
}
new_notifier = partial(_new, _discover_notifier_types())
| [
37811,
44596,
20652,
526,
15931,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
6738,
736,
7211,
88,
13,
24886,
1330,
10644,
7416,
11,
10644,
21745,
11,
311,
1477,
21745,
11,
3274,
10493,
21745,
198,
6738,
736,
7211,
88,
13,
1662,
... | 2.962077 | 2,215 |
# -*- coding: utf-8 -*-
"""
.. module:: card
:synopsis: Encapsulate card behavior
.. moduleauthor:: Zach Mitchell <zmitchell@fastmail.com>
"""
from enum import Enum
from functools import total_ordering
from uuid import uuid4
class Card(object):
""" Represents a card in the game
Parameters
----------
card_primitive : CardPrimitive
A CardPrimitive instance from the ORM to convert into a Card
uuid : UUID
A unique identifier to assign to the card
Attributes
----------
uuid : str
A unique identifier for this instance of this card
name : str
A display name for the card
faction : CardFaction
The faction to which the card belongs
base : bool
Is the card a base
outpost : bool
If the card is a base, is it also an outpost
defense : int
If the card is a base, the amount of damage required to destroy it
cost : int
The amount of trade needed to acquire the card
effects_basic : [CardEffect]
The effects activated when the card is played
effects_ally : [CardEffect]
The effects activated by other cards of the same faction
effects_scrap : [CardEffect]
The effects activated when the player chooses to scrap the card
Note
----
Different instances of a single card i.e. different Vipers will have
different UUIDs
"""
class CardFaction(Enum):
"""The set of allowed card factions
Warning
-------
Take care when dealing with ``CardFaction`` and ``FactionPrimitive`` instances.
The ``name`` attribute of ``FactionPrimitive`` is of the form "Star Empire",
whereas the ``name`` attribute of an Enum subclass refers to the name of the
Enum member i.e. "STAR" in the case of ``CardFaction.STAR``.
"""
ALL = 'All'
"""A card, such as Machine Base, that triggers ally abilities for all factions
"""
BLOB = 'Blob'
FEDERATION = 'Federation'
MACHINE = 'Machine Cult'
STAR = 'Star Empire'
UNALIGNED = 'Unaligned'
"""A card with no faction, such as Viper, Scout, or Explorer
"""
@classmethod
def from_primitive(cls, primitive) -> 'CardFaction':
"""Creates the corresponding ``CardFaction`` instance from a
``FactionPrimitive`` instance
"""
return next(f for f in cls if f.value == primitive.name)
def __str__(self) -> str:
"""
Display string representation of the faction
Returns
-------
str
The display-ready string representation of the faction
Examples
--------
>>> fac = CardFaction.STAR
>>> str(fac)
Star Empire
"""
return self.value
@total_ordering
class CardEffect(object):
"""A single effect provided by a card
Identifies an action, any values associated with the action,
and who the effect should be applied to.
Attributes
----------
target : CardTarget
The player who should receive the effect of the card
action : CardAction
The type of action to apply
value : int
The value associated with the action
"""
class CardTarget(Enum):
"""The receiver of a card's effect
"""
OPPONENT = 0
OWNER = 1
@classmethod
@total_ordering
class CardAction(Enum):
"""The type of action that an effect entails
"""
ATTACK = 0
"""Reduce the target's health"""
ACQUIRE = 1
"""Buy a card without paying its cost"""
DESTROY = 2
"""Destroy a target base without spending any attack power"""
DISCARD = 3
"""The target sends a card from his hand to his discard pile"""
DRAW = 4
"""Draw cards from the player's deck"""
HEAL = 5
"""Increase the player's health"""
MONEY = 6
"""Provides currency with which to buy cards"""
SCRAP = 7
"""Permanently discard a card"""
@classmethod
@total_ordering
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
492,
8265,
3712,
2657,
198,
220,
220,
220,
1058,
28869,
24608,
25,
14711,
1686,
5039,
2657,
4069,
198,
492,
8265,
9800,
3712,
18825,
14526,
1279,
89,
2781,
1... | 2.814631 | 1,408 |
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.exc import SQLAlchemyError
db = SQLAlchemy()
class User(db.Model):
"""User object model."""
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(230), unique=True, nullable=False)
pwd_hash = db.Column(db.String(102), nullable=False)
created_on = db.Column(db.DateTime, default=datetime.utcnow)
last_seen = db.Column(db.DateTime)
@staticmethod
@staticmethod
@staticmethod
class Broadcast(db.Model):
"""Broadcast object model."""
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
message = db.Column(db.Text(), nullable=False)
created_on = db.Column(db.DateTime, default=datetime.utcnow)
updated_on = db.Column(
db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
@staticmethod
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
266,
9587,
2736,
1018,
13,
12961,
1330,
7716,
62,
28712,
62,
17831,
11,
2198,
62,
28712,
62,
17831,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
6738,
44161,
282,... | 2.685185 | 378 |
### Author - Raghav Maheshwari ###
import notify2
#from wolframalpha import Client
from os import listdir
#app_id = "KHHH6P-HTUJE286QJ"
#client = Client(app_id)
LOGO_PATH = '/home/imnobody0396/Documents/Nancy-The-Virtual-Assistant/logo.png'
#LOGO_PATH = '/home/imnobody0396/Desktop/nancy.png'
HOME_DIR = '/home/imnobody0396/'
DRIVE_DIR = '/media/imnobody0396/'
LOG_DIR = '/home/imnobody0396/Documents/Nancy-The-Virtual-Assistant/'
LYRICS_DIR = '/media/imnobody0396/Green/Videos/Lyrics/'
MP3_DIR = '/media/imnobody0396/Green/Music/'
MP4_DIR = '/media/imnobody0396/Green/Videos/'
IGNORE_DIRS= ['/media/imnobody0396/Green/Matlab', '/media/imnobody0396/Blue/.Tempp']
DRIVES = [dir for dir in listdir(DRIVE_DIR)]
| [
21017,
6434,
532,
371,
10471,
615,
6669,
956,
36599,
2743,
44386,
198,
198,
11748,
19361,
17,
198,
2,
6738,
17481,
859,
26591,
1330,
20985,
198,
6738,
28686,
1330,
1351,
15908,
628,
198,
2,
1324,
62,
312,
796,
366,
42,
16768,
39,
21,
... | 2.357143 | 308 |
doc = {
'name': 'hash_md5',
'inputs' : ['whitelist_csv', 'vt_hash', 'et_hash_dns'],
'id': 'hash',
'cache': None,
}
| [
198,
15390,
796,
1391,
198,
220,
220,
220,
705,
3672,
10354,
705,
17831,
62,
9132,
20,
3256,
198,
220,
220,
220,
705,
15414,
82,
6,
1058,
37250,
1929,
270,
46331,
62,
40664,
3256,
705,
36540,
62,
17831,
3256,
705,
316,
62,
17831,
62... | 1.913043 | 69 |
# -*- coding: utf-8 -*-
from flask import (Flask,
request,
render_template,
redirect,
url_for,
jsonify)
import json
from ming import (create_datastore,
Session,
collection,
Field,
Document,
schema)
from bson.objectid import ObjectId
from models import (BookModel,
InproceedingsModel,
ArticleModel,
Book,
Inproceedings,
Article)
from database_operations import (Enc,
delete_book,
delete_inproceedings,
delete_article,
edit_book,
edit_inproceedings,
edit_article,
add_book,
add_inproceedings,
add_article,
get_book,
get_inproceedings,
get_article,
get_index_content,
list_books,
list_inproceedings,
list_articles)
app = Flask(__name__)
app.config['STATIC_FOLDER'] = 'static'
@app.route('/')
@app.route('/bibtex')
@app.route('/show_single_bibtex/<db_type>/<db_id>')
@app.route('/add_book', methods=['GET', 'POST'])
@app.route('/add_inproceedings', methods=['GET', 'POST'])
@app.route('/add_article', methods=['GET', 'POST'])
@app.route('/delete_book/<b_id>')
@app.route('/delete_inproceedings/<i_id>')
@app.route('/delete_article/<a_id>')
@app.route('/edit_book/<b_id>', methods=['GET', 'POST'])
@app.route('/edit_inproceedings/<i_id>', methods=['GET', 'POST'])
@app.route('/edit_article/<a_id>', methods=['GET', 'POST'])
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42903,
1330,
357,
7414,
2093,
11,
198,
220,
220,
220,
220,
220,
220,
220,
2581,
11,
198,
220,
220,
220,
220,
220,
220,
220,
8543,
62,
28243,
11,
198,
220,
220,... | 2.066929 | 762 |
import os
import logging
import sys
import requests
from collections import OrderedDict
import datetime
import cartosql
# Constants
LATEST_URL = 'https://api.acleddata.com/acled/read?page={page}'
MIN_PAGES = 10
MAX_PAGES = 200
CLEAR_TABLE_FIRST = False
CARTO_TABLE = 'soc_016_conflict_protest_events'
CARTO_SCHEMA = OrderedDict([
("the_geom", "geometry"),
("data_id", "int"),
("event_date", "timestamp"),
("year", "int"),
("time_precision", "int"),
("event_type", "text"),
("actor1", "text"),
("assoc_actor_1", "text"),
("inter1", "int"),
("actor2", "text"),
("assoc_actor_2", "text"),
("inter2", "int"),
("interaction", "int"),
("country", "text"),
("iso3", "text"),
("region", "text"),
("admin1", "text"),
("admin2", "text"),
("admin3", "text"),
("location", "text"),
("geo_precision", "int"),
("time_precision", "int"),
("source", "text"),
("source_scale", "text"),
("notes", "text"),
("fatalities", "int"),
])
UID_FIELD = 'data_id'
TIME_FIELD = 'event_date'
DATA_DIR = 'data'
LOG_LEVEL = logging.INFO
# Limit 1M rows, drop older than 10yrs
MAXROWS = 1000000
#MAXAGE = datetime.datetime.today() - datetime.timedelta(days=3650)
def genUID(obs):
'''Generate unique id'''
return str(obs[UID_FIELD])
def processNewData(exclude_ids):
'''
Iterively fetch parse and post new data
'''
page = 1
new_count = 1
new_ids = []
# get and parse each page; stop when no new results or 200 pages
while page <= MIN_PAGES or new_count and page < MAX_PAGES:
# 1. Fetch new data
logging.info("Fetching page {}".format(page))
r = requests.get(LATEST_URL.format(page=page))
page += 1
# 2. Parse data excluding existing observations
new_rows = []
for obs in r.json()['data']:
uid = genUID(obs)
if uid not in exclude_ids + new_ids:
new_ids.append(uid)
row = []
for field in CARTO_SCHEMA.keys():
if field == 'the_geom':
# construct geojson geometry
geom = {
"type": "Point",
"coordinates": [
obs['longitude'],
obs['latitude']
]
}
row.append(geom)
elif field == UID_FIELD:
row.append(uid)
else:
try:
row.append(obs[field])
except:
logging.debug('{} not available for this row'.format(field))
row.append('')
new_rows.append(row)
# 3. Insert new rows
new_count = len(new_rows)
if new_count:
logging.info('Pushing {} new rows'.format(new_count))
cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(),
CARTO_SCHEMA.values(), new_rows)
return new_ids
##############################################################
# General logic for Carto
# should be the same for most tabular datasets
##############################################################
def createTableWithIndex(table, schema, id_field, time_field=''):
'''Get existing ids or create table'''
cartosql.createTable(table, schema)
cartosql.createIndex(table, id_field, unique=True)
if time_field:
cartosql.createIndex(table, time_field)
def getIds(table, id_field):
'''get ids from table'''
r = cartosql.getFields(id_field, table, f='csv')
return r.text.split('\r\n')[1:-1]
def deleteExcessRows(table, max_rows, time_field, max_age=''):
'''Delete excess rows by age or count'''
num_dropped = 0
if isinstance(max_age, datetime.datetime):
max_age = max_age.isoformat()
# 1. delete by age
if max_age:
r = cartosql.deleteRows(table, "{} < '{}'".format(time_field, max_age))
num_dropped = r.json()['total_rows']
# 2. get sorted ids (old->new)
r = cartosql.getFields('cartodb_id', table, order='{}'.format(time_field),
f='csv')
ids = r.text.split('\r\n')[1:-1]
# 3. delete excess
if len(ids) > max_rows:
r = cartosql.deleteRowsByIDs(table, ids[:-max_rows])
num_dropped += r.json()['total_rows']
if num_dropped:
logging.info('Dropped {} old rows from {}'.format(num_dropped, table))
| [
11748,
28686,
198,
11748,
18931,
198,
11748,
25064,
198,
11748,
7007,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
4818,
8079,
198,
11748,
6383,
418,
13976,
628,
198,
2,
4757,
1187,
198,
43,
1404,
6465,
62,
21886,
796,
705,... | 2.077893 | 2,221 |
from __future__ import absolute_import
from __future__ import unicode_literals
import csv342 as csv
import six
import sys
import time
from datetime import (
datetime,
date,
timedelta,
)
from io import open
from xml.etree import cElementTree as ElementTree
from django.core.management.base import BaseCommand
from corehq.apps.users.util import SYSTEM_USER_ID
from corehq.form_processor.backends.sql.dbaccessors import CaseReindexAccessor
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.util.log import with_progress_bar
from corehq.apps.locations.models import SQLLocation
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.form_processor.backends.sql.dbaccessors import iter_all_rows
from dimagi.utils.chunked import chunked
from casexml.apps.case.mock import CaseBlock
from six.moves import range
DOMAIN = "icds-cas"
CASE_TYPE = "person"
DATE_OF_REGISTRATION_PROPERTY = "date_of_registration"
PHONE_NUMBER_PROPERTY = "contact_phone_number"
HAS_MOBILE_PROPERTY = "has_mobile"
HAS_MOBILE_PROPERTY_NO_VALUE = "no"
CASE_ITERATION_COUNT = 10000
MAX_RESCUE_EXCEPTIONS_ON_UPDATE = 5
CSV_HEADERS = ['Case Id']
TEST_STATES = [
'Test State',
'Test State 2',
'VL State',
'Trial State',
'Uttar Pradesh_GZB',
'AWW Test State',
]
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
269,
21370,
31575,
355,
269,
21370,
198,
11748,
2237,
198,
11748,
25064,
198,
11748,
640,
198,
198,
6738,
... | 2.753138 | 478 |
# -*- coding: utf-8 -*-
# @createTime : 2020/3/24 13:56
# @author : 王江桥
# @fileName: mq_rpc_server.py
# @email: jiangqiao.wang@mabotech.com
import json
import os
import sys
import pika
from mesService.modules.RabbitMQ import logger
from mesService.modules.ERPInterface.erp_to_mes.deviartion.receive_deviating import DeviationOrder
from mesService.modules.ERPInterface.erp_to_mes.bom.reveive_bom import BomOrder
from mesService.modules.ERPInterface.erp_to_mes.item.reveive_item import ItemOrder
from mesService.modules.ERPInterface.erp_to_mes.wip_order.reveive_wiporder import WipOrderInterface
from mesService.modules.ERPInterface.erp_to_mes.wip_sequence.reveive_sequence import SequenceInterface
from mesService.config import PRESENT_WORK_MODE
from mesService.config import CURRENT_ENV, config_dict
func_dict = {
"DeviationOrder": {"parse_xml": "parse_xml", "insertDatabase": "insertDatabase"},
"BomOrder": {"parse_xml": "parse_xml", "insertDatabase": "insertDatabase"},
"ItemOrder": {"parse_xml": "parse_xml", "insertDatabase": "insertDatabase"},
"WipOrderInterface": {"parse_xml": "analysisFromXML", "insertDatabase": "insertDatabase"},
"SequenceInterface": {"parse_xml": "analysisFromXML", "insertDatabase": "insertDatabase"},
}
# 回调函数
# 对RPC请求队列中的请求进行处理
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
17953,
7575,
220,
220,
220,
1058,
12131,
14,
18,
14,
1731,
1511,
25,
3980,
198,
2,
2488,
9800,
220,
1058,
13328,
23329,
162,
109,
253,
162,
94,
98,
198,
2,
... | 2.690283 | 494 |
import json
import datetime
import click
from tabulate import tabulate
| [
11748,
33918,
198,
11748,
4818,
8079,
198,
11748,
3904,
198,
6738,
7400,
5039,
1330,
7400,
5039,
628
] | 4.235294 | 17 |
from CRL import CRL, DEFAULTS_FILE
from console_utils import console
if __name__ == '__main__':
"""
Example of execution:
$ python crl_console.py --cart_ids 2 4 6 7 8 --energy=21500 --p0=6.52
"d","d_ideal","f","p0","p1","p1_ideal"
0.000372455276869,-0.0669574652539,1.04864377922,6.52,1.24962754472,1.31695746525
"""
console(CRL, DEFAULTS_FILE)
| [
6738,
327,
7836,
1330,
327,
7836,
11,
5550,
7708,
35342,
62,
25664,
198,
6738,
8624,
62,
26791,
1330,
8624,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
17934,
286,... | 2.16092 | 174 |
# built-in
from argparse import REMAINDER, ArgumentParser
# app
from ..actions import make_json
from ..config import builders
from ..repositories import get_repo
from .base import BaseCommand
class PackageSearchCommand(BaseCommand):
"""Search packages on PyPI.org or Anaconda Cloud.
"""
@staticmethod
| [
2,
3170,
12,
259,
198,
6738,
1822,
29572,
1330,
22657,
32,
12115,
1137,
11,
45751,
46677,
198,
198,
2,
598,
198,
6738,
11485,
4658,
1330,
787,
62,
17752,
198,
6738,
11485,
11250,
1330,
31606,
198,
6738,
11485,
260,
1930,
270,
1749,
13... | 3.361702 | 94 |
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
import pytest
import sys
import tempfile
from pathlib import Path
sys.path.append(Path(__file__).parent.parent.as_posix())
from get_weights import get_weights # noqa
@pytest.mark.parametrize('model_name', ['VGG16', 'VGG19', 'ResNet101'])
@pytest.mark.parametrize('model_name', ["MobileNet", "MobileNetV2", "NASNetMobile", "DenseNet121", "ResNet50",
"Xception", "InceptionV3", "GoogleNet", "InceptionV1"])
| [
2,
15069,
357,
66,
8,
13130,
29681,
7295,
12052,
13,
1439,
2489,
10395,
13,
198,
198,
11748,
12972,
9288,
198,
11748,
25064,
198,
11748,
20218,
7753,
198,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
17597,
13,
6978,
13,
33295,
7,
15... | 2.463768 | 207 |
# Copyright (c) 2021 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import alf
from alf.examples import sac_conf
from alf.algorithms.one_step_qr_loss import OneStepQRLoss
from alf.algorithms.qrsac_algorithm import QRSacAlgorithm
from alf.networks import NormalProjectionNetwork, ActorDistributionNetwork, CriticNetwork
from alf.optimizers import AdamTF
from alf.utils.math_ops import clipped_exp
from alf.utils.losses import element_wise_squared_loss
from alf.tensor_specs import TensorSpec
# environment config
alf.config(
'create_environment', env_name="HalfCheetah-v2", num_parallel_environments=1)
# algorithm config
fc_layer_params = (256, 256)
actor_network_cls = partial(
ActorDistributionNetwork,
fc_layer_params=fc_layer_params,
continuous_projection_net_ctor=partial(
NormalProjectionNetwork,
state_dependent_std=True,
scale_distribution=True,
std_transform=partial(
clipped_exp, clip_value_min=-10, clip_value_max=2)))
num_quatiles = 50
critic_network_cls = partial(
CriticNetwork, joint_fc_layer_params=fc_layer_params, output_tensor_spec=TensorSpec((num_quatiles,)))
alf.config('calc_default_target_entropy', min_prob=0.184)
alf.config(
'QRSacAlgorithm',
critic_loss_ctor=OneStepQRLoss,
actor_network_cls=actor_network_cls,
critic_network_cls=critic_network_cls,
target_update_tau=0.005,
actor_optimizer=AdamTF(lr=3e-4),
critic_optimizer=AdamTF(lr=3e-4),
alpha_optimizer=AdamTF(lr=3e-4))
alf.config('QRSacAlgorithm._compute_critics', min_based_on_q_mean=True)
# training config
alf.config('Agent', rl_algorithm_cls=QRSacAlgorithm)
alf.config(
'TrainerConfig',
initial_collect_steps=10000,
mini_batch_length=2,
unroll_length=1,
mini_batch_size=256,
num_updates_per_train_iter=1,
num_iterations=2500000,
num_checkpoints=1,
evaluate=True,
eval_interval=1000,
num_eval_episodes=5,
debug_summaries=False,
random_seed=0,
summarize_grads_and_vars=False,
summary_interval=1000,
replay_buffer_length=1000000)
| [
2,
15069,
357,
66,
8,
33448,
22776,
47061,
290,
8355,
37,
25767,
669,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,... | 2.708502 | 988 |
import unittest, os
import pytest
import numpy as np
import pyuvdata as uv
from .. import pspecbeam, conversions
from hera_pspec.data import DATA_PATH
| [
11748,
555,
715,
395,
11,
28686,
198,
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
14795,
7890,
355,
334,
85,
198,
6738,
11485,
1330,
279,
16684,
40045,
11,
32626,
198,
6738,
339,
430,
62,
862,
43106,
13,
... | 3.234043 | 47 |
# noinspection PyTypeChecker
import torch
import operator
from functools import reduce
from torch import Tensor
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# NOTE: This is deprecated
# noinspection PyTypeChecker
def build_temporal_clique(node_index_aggr, node_types_aggr, order=1):
"""
Build the temporal fully-connected graph (clique)
:param node_index_aggr: aggregation node as indices, Shape (num_frames, num_aggr_nodes_per_frame)
:param node_types_aggr: aggregation node types as integers, Shape (num_frames, num_aggr_nodes_per_frame)
:param order: the highest connectivity
:return:
"""
assert order > 0, "Expected order > 0 but got {}".format(order)
temporal_edge_index = []
temporal_edge_types = []
# loop through each order level
for odr in range(1, order + 1):
# build clique at each level of order
for start in range(odr):
for frame in list(range(start, len(node_index_aggr), odr))[:-1]:
node_index_curr = node_index_aggr[frame] # node index in the current frame
node_index_next = node_index_aggr[frame + odr] # node index in the next frame
node_types_curr = node_types_aggr[frame] # node types in the current frame
node_types_next = node_types_aggr[frame + odr] # node types in the next frame
# mesh the grids
curr_inds, next_inds = torch.meshgrid(torch.arange(len(node_index_curr)),
torch.arange(len(node_index_next)))
curr_index, next_index = node_index_curr[curr_inds.reshape(1, -1)], \
node_index_next[next_inds.reshape(1, -1)]
source_index, target_index = torch.cat((curr_index, next_index), dim=1), \
torch.cat((next_index, curr_index), dim=1)
curr_type, next_type = node_types_curr[curr_inds.reshape(1, -1)], \
node_types_next[next_inds.reshape(1, -1)]
source_type, target_type = torch.cat((curr_type, next_type), dim=1), \
torch.cat((next_type, curr_type), dim=1)
# build the edge index
edge_index = torch.cat((source_index, target_index), dim=0)
temporal_edge_index.append(edge_index)
# build the edge types
edge_types = torch.cat((source_type, target_type), dim=0)
temporal_edge_types.append(edge_types)
temporal_edge_index = torch.cat(temporal_edge_index, dim=1)
temporal_edge_types = torch.cat(temporal_edge_types, dim=1)
return temporal_edge_index, temporal_edge_types
# noinspection PyTypeChecker
def build_edges(node_index_aggr, node_types_aggr, order=2, num_aggr_nodes_per_frame=None):
"""
Build the edge_index used for PyG GNN modules
:param node_index_aggr: aggregation node as indices, Shape (num_frames, num_aggr_nodes_per_frame)
:param node_types_aggr: aggregation node types as integers, Shape (num_frames, num_aggr_nodes_per_frame)
:param order: order of connectivity in the temporal direction
:param num_aggr_nodes_per_frame: number of aggregation nodes per frame, List
e.g. [num_objects_aggr, num_person_aggr, num_text_aggr, num_audio_aggr, num_scene_aggr]
:return:
edge_index_aggr: aggregation edge index needed by PyG, Shape 2xE
edge_types_aggr: aggregation edge types encoded as source_type -> target_type, Shape 2xE
"""
#########################################################################
# 1. build aggregation edges
#########################################################################
# 1.1. within frame edges
frame_edge_index = []
frame_edge_types = []
for frame in range(len(node_index_aggr)):
# 1.1.1. get the node index
node_index_frame = node_index_aggr[frame] # node index in the current frame
source_inds, target_inds = torch.meshgrid(torch.arange(len(node_index_frame)),
torch.arange(len(node_index_frame)))
source, target = node_index_frame[source_inds.reshape(1, -1)], node_index_frame[target_inds.reshape(1, -1)]
# store the edge_index
edge_index = torch.cat((source, target), dim=0)
frame_edge_index.append(edge_index)
# 1.1.2. get the node types
node_types_aggr_frame = node_types_aggr[frame] # node types in the current frame
source_types, target_types = node_types_aggr_frame[source_inds.reshape(1, -1)], \
node_types_aggr_frame[target_inds.reshape(1, -1)]
edge_types = torch.cat((source_types, target_types), dim=0)
frame_edge_types.append(edge_types)
frame_edge_index = torch.cat(frame_edge_index, dim=1)
frame_edge_types = torch.cat(frame_edge_types, dim=1)
# 1.2. temporal edges
temporal_edge_index, temporal_edge_types = build_temporal_clique(node_index_aggr, node_types_aggr, order=order)
# 1.3. merge them
edge_index_aggr = torch.cat((frame_edge_index, temporal_edge_index), dim=1)
edge_types_aggr = torch.cat((frame_edge_types, temporal_edge_types), dim=1)
return edge_index_aggr, edge_types_aggr
def build_node_index(num_aggr_nodes_per_frame, num_frames):
"""
Mark each node in the graph with an index
:param num_aggr_nodes_per_frame: number of aggregation nodes per frame, List
e.g. [num_objects_aggr, num_person_aggr, num_text_aggr, num_audio_aggr, num_scene_aggr]
:param num_frames: total number of frames
:return:
node_index_aggr: aggregation node as indices, Shape (num_frames, num_aggr_nodes_per_frame)
e.g. [[object1_aggr, object2_aggr, ..., objectN_aggr, person1_aggr, ...],
[...]]
node_types_aggr: aggregation node types as integers, Shape (num_frames, num_aggr_nodes_per_frame)
e.g. [[0, 0, ..., 0, 1, ...],
[...]]
"""
# build aggregation node types. TODO: now this is assuming graph structure is the same across time
node_types_aggr_frame = reduce(operator.add, [[i] * n for i, n in enumerate(num_aggr_nodes_per_frame)])
node_types_aggr = torch.tensor([node_types_aggr_frame] * num_frames).to(device)
# build aggregation nodes
total_aggr_nodes_per_frame = sum(num_aggr_nodes_per_frame)
node_index_aggr = torch.zeros(num_frames, total_aggr_nodes_per_frame)
for frame in range(num_frames):
# build the node index
offset = frame * total_aggr_nodes_per_frame
node_index_aggr_frame = (torch.arange(0, total_aggr_nodes_per_frame) + offset).reshape(1, -1)
node_index_aggr[frame] = node_index_aggr_frame
num_aggr_nodes = num_frames * total_aggr_nodes_per_frame
return node_index_aggr, node_types_aggr
| [
2,
645,
1040,
14978,
9485,
6030,
9787,
263,
198,
11748,
28034,
198,
11748,
10088,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
198,
6738,
28034,
1330,
309,
22854,
198,
198,
25202,
796,
28034,
13,
25202,
7203,
66,
15339,
25,
15,
1,
61... | 2.30323 | 3,034 |
"""The type representing a cell magic"""
"""
A cell magic is a shortcut a user can write into a notebook cell.
The cell magic is written under the form '%%magic' and in principle
it operates over the entire code of the cell it is written in, e.g.
------cell
%%magic_python
from matplotlib import pyplot
x = [1,2,3]
print(x)
pyplot.plot(x)
------end of cell
"""
# Copyright (c) MariaDB Foundation.
# Distributed under the terms of the Modified BSD License.
from mariadb_kernel.maria_magics.maria_magic import MariaMagic
| [
37811,
464,
2099,
10200,
257,
2685,
5536,
37811,
198,
198,
37811,
198,
32,
2685,
5536,
318,
257,
29401,
257,
2836,
460,
3551,
656,
257,
20922,
2685,
13,
198,
464,
2685,
5536,
318,
3194,
739,
262,
1296,
705,
16626,
32707,
6,
290,
287,
... | 3.295597 | 159 |
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pprint
import github
from mergify_engine import check_api
from mergify_engine import config
from mergify_engine import mergify_pull
from mergify_engine import utils
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13... | 3.559242 | 211 |
import sys
'''
Plot the positions from a saved Sibernetic position_buffer.txt file
'''
if __name__ == '__main__':
if len(sys.argv) == 2:
pos_file_name = sys.argv[1]
else:
pos_file_name = '../buffers/position_buffer.txt'
#pos_file_name = '../buffers/position_buffer0.txt'
#pos_file_name = '../simulations/C1_Muscles_Mon_Jul_25_18.19.50_2016/position_buffer.txt'
pos_file_name = '../simulations/C1_Muscles_Mon_Aug__1_18.55.24_2016/position_buffer.txt'
pos_file_name = '../simulations/sine/position_buffer.txt'
plot_positions(pos_file_name) | [
11748,
25064,
220,
198,
198,
7061,
6,
198,
220,
220,
220,
28114,
262,
6116,
422,
257,
7448,
28394,
9833,
2292,
62,
22252,
13,
14116,
2393,
198,
7061,
6,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
2... | 2.237226 | 274 |
from .views import scrap_proj | [
6738,
764,
33571,
1330,
15881,
62,
1676,
73
] | 3.625 | 8 |
from .base import BaseModule
| [
6738,
764,
8692,
1330,
7308,
26796,
198
] | 4.142857 | 7 |
# Generated by Django 3.2.3 on 2021-05-24 03:26
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
18,
319,
33448,
12,
2713,
12,
1731,
7643,
25,
2075,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.886792 | 53 |
# -*- coding: utf-8 -*-
from os.path import join
from unicodecsv import writer
from progress.bar import Bar
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand
from core.models import Article
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
6738,
28000,
375,
721,
21370,
1330,
6260,
198,
6738,
4371,
13,
5657,
1330,
2409,
198,
6738,
2172,
29572,
1330,
787,
62,
18076,
19... | 3.493333 | 75 |
from osbot_aws.helpers.Test_Helper import Test_Helper
from osbot_browser.browser.Browser_Lamdba_Helper import Browser_Lamdba_Helper
from osbot_browser.view_helpers.Google_Charts_Js import Google_Charts_Js
| [
6738,
28686,
13645,
62,
8356,
13,
16794,
364,
13,
14402,
62,
47429,
1330,
6208,
62,
47429,
198,
6738,
28686,
13645,
62,
40259,
13,
40259,
13,
46532,
62,
43,
28745,
7012,
62,
47429,
1330,
34270,
62,
43,
28745,
7012,
62,
47429,
198,
673... | 3.121212 | 66 |
import importlib
from typing import TYPE_CHECKING
from cowbird.config import get_all_configs
from cowbird.utils import SingletonMeta, get_config_path, get_logger, get_settings
if TYPE_CHECKING:
from typing import List
from cowbird.services.service import Service
LOGGER = get_logger(__name__)
VALID_SERVICES = ["Catalog", "Geoserver", "Magpie", "Nginx", "Thredds",
"FileSystem"]
class ServiceFactory(metaclass=SingletonMeta):
"""
Create service instance using service name.
"""
def get_service(self, name):
# type: (ServiceFactory, str) -> Service
"""
Instantiates a `Service` implementation using its name if it doesn't exist or else returns the existing one from
cache.
"""
try:
return self.services[name]
except KeyError:
svc = None
if name in VALID_SERVICES and \
name in self.services_cfg and \
self.services_cfg[name].get("active", False):
module = importlib.import_module(".".join(["cowbird.services.impl", name.lower()]))
cls = getattr(module, name)
svc = cls(settings=self.settings, name=name, **self.services_cfg[name])
self.services[name] = svc
return svc
def get_active_services(self):
# type: (ServiceFactory) -> List[Service]
"""
Return a sorted list by priority of `Service` implementation activated in the config.
"""
return sorted(filter(None, [self.get_service(name) for name in self.services_cfg]),
key=lambda svc: svc.priority)
| [
11748,
1330,
8019,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
198,
198,
6738,
9875,
16944,
13,
11250,
1330,
651,
62,
439,
62,
11250,
82,
198,
6738,
9875,
16944,
13,
26791,
1330,
5573,
10565,
48526,
11,
651,
62,
11250,
62,
6978,
1... | 2.397399 | 692 |
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""A simple implementation of a Message Catalog.
"""
from functools import wraps
from gettext import GNUTranslations
from zope.i18n.interfaces import IGlobalMessageCatalog
from zope.interface import implementer
def plural_formatting(func):
"""This decorator interpolates the possible formatting marker.
This interpolation marker is usally present for plurals.
Example: `There are %d apples` or `They have %s pies.`
Please note that the interpolation can be done, alternatively,
using the mapping. This is only present as a conveniance.
"""
@wraps(func)
return pformat
@implementer(IGlobalMessageCatalog)
class GettextMessageCatalog(object):
"""A message catalog based on GNU gettext and Python's gettext module."""
_catalog = None
def __init__(self, language, domain, path_to_file):
"""Initialize the message catalog"""
self.language = (
language.decode('utf-8') if isinstance(language, bytes)
else language)
self.domain = (
domain.decode("utf-8") if isinstance(domain, bytes)
else domain)
self._path_to_file = path_to_file
self.reload()
catalog = self._catalog
catalog.add_fallback(_KeyErrorRaisingFallback())
self._gettext = (
catalog.gettext if str is not bytes else catalog.ugettext)
self._ngettext = (
catalog.ngettext if str is not bytes else catalog.ungettext)
def reload(self):
'See IMessageCatalog'
with open(self._path_to_file, 'rb') as fp:
self._catalog = GNUTranslations(fp)
def getMessage(self, id):
'See IMessageCatalog'
return self._gettext(id)
@plural_formatting
def getPluralMessage(self, singular, plural, n):
'See IMessageCatalog'
return self._ngettext(singular, plural, n)
@plural_formatting
def queryPluralMessage(self, singular, plural, n, dft1=None, dft2=None):
'See IMessageCatalog'
try:
return self._ngettext(singular, plural, n)
except KeyError:
# Here, we use the catalog plural function to determine
# if `n` triggers a plural form or not.
if self._catalog.plural(n):
return dft2
return dft1
def queryMessage(self, id, default=None):
'See IMessageCatalog'
try:
return self._gettext(id)
except KeyError:
return default
def getIdentifier(self):
'See IMessageCatalog'
return self._path_to_file
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
198,
2,
15069,
357,
66,
8,
5878,
11,
6244,
1168,
3008,
5693,
290,
25767,
669,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
770,
3788,
318,
2426,
284,
262,
8617,
286,
262,
1168,
3008,
... | 2.692308 | 1,196 |
from mmdet.apis import init_detector, inference_detector, show_result_pyplot
import mmcv
import argparse
import cv2
import numpy as np
import os
import time
import threading
import tqdm
def read_image(file_name, format=None, package="PIL"):
"""
Read an image into the given format.
Will apply rotation and flipping if the image has such exif information.
Args:
file_name (str): image file path
format (str): one of the supported image modes in PIL, or "BGR"
Returns:
image (np.ndarray): an HWC image in the given format.
"""
from PIL import Image, ImageOps
if package == "PIL":
with open(file_name, "rb") as f:
image = Image.open(f)
# capture and ignore this bug: https://github.com/python-pillow/Pillow/issues/3973
try:
image = ImageOps.exif_transpose(image)
except Exception:
pass
if format is not None:
# PIL only supports RGB, so convert to RGB and flip channels over below
conversion_format = format
if format == "BGR":
conversion_format = "RGB"
image = image.convert(conversion_format)
image = np.asarray(image)
if format == "BGR":
# flip channels if needed
image = image[:, :, ::-1]
# PIL squeezes out the channel dimension for "L", so make it HWC
if format == "L":
image = np.expand_dims(image, -1)
return image
else:
assert package == "cv2"
image = cv2.imread(file_name)
return image
if __name__ == "__main__":
config_file = '../configs/DARDet/exp1.py'
# checkpoint_file = '../latest.pth'
args = get_parser().parse_args()
if args.config:
config_file = args.config
checkpoint_file = args.checkpoint
# build the model from a config file and a checkpoint file
model = init_detector(config_file, checkpoint_file, device='cuda:0')
if os.path.isfile(args.im_or_folder):
name_list = [args.im_or_folder]
elif os.path.isdir(args.im_or_folder):
name_list = sorted([os.path.join(args.im_or_folder, file_name) for file_name in os.listdir(args.im_or_folder) if file_name != "Thumbs.db"])[:]
else:
# To perform evaluation only
assert args.im_or_folder == ""
name_list = []
package = 'PIL'
if args.num_loader > 0:
# Use multi-threads to read images
start_time = time.time()
num_threads = args.num_loader
threads = []
num_images = len(name_list)
img_queue = [0 for i in range(num_images)]
for i in range(num_threads):
threads.append(threading.Thread(
target=img_reader,
args=(name_list, img_queue, range(i, num_images, num_threads), package))
)
for thread in threads:
thread.start()
print("All loader start!")
for thread in threads:
thread.join()
print("Take {}s to load all images.".format(time.time() - start_time))
assert args.output
out_dirs = [args.output, os.path.join(args.output, "txt"), os.path.join(args.output, "image")]
for _ in out_dirs:
if not os.path.isdir(_):
os.makedirs(_)
count = 0
for path in tqdm.tqdm(name_list, disable=False):
print("Processing: {}".format(path))
if args.num_loader > 0:
img = img_queue[count]
else:
img = read_image(path, format="BGR", package=package)
count += 1
# start_time = time.time()
result = inference_detector(model, img)
# time.time() - start_time
if hasattr(model, 'module'):
model_ = model.module
else:
model_ = model
imname = os.path.basename(path).split('.')[0]
out_txt_name = os.path.join(args.output, "txt", imname + '.txt')
out_image_name = os.path.join(args.output, "image", imname + '.jpg')
model_.show_result(
img,
result,
score_thr=0.5,
show=True,
wait_time=0,
win_name=result,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),out_file=out_image_name,thickness=8,font_scale=8)
bboxes = result[0]
with open(out_txt_name, 'w') as f:
boxes = bboxes[...,10:]
scores = bboxes[...,4:5]
for idx, box in enumerate(boxes):
box = box.reshape(-1)
score = scores[idx]
if True:
f.write(str(int(box[0])) + ',')
f.write(str(int(box[1])) + ',')
f.write(str(int(box[2])) + ',')
f.write(str(int(box[3])) + ',')
f.write(str(int(box[4])) + ',')
f.write(str(int(box[5])) + ',')
f.write(str(int(box[6])) + ',')
f.write(str(int(box[7])) + ',')
f.write(str(score[0]) + '\n')
score_th_list = [str(th / 1000.0) for th in range(100, 1000, 25)]
score_th_list = " ".join(score_th_list)
cmd = "cd ./tools/msra_pod_measurement_tool_45/ \n"
cmd += "python sortdetection.py {} {} \n".format(os.path.join(os.path.abspath(args.output), "txt"), score_th_list)
cmd += "python test_ctw1500_eval.py {} \n".format(score_th_list)
cmd += "cd ../../ \n"
print(cmd)
os.system(cmd)
| [
6738,
8085,
15255,
13,
499,
271,
1330,
2315,
62,
15255,
9250,
11,
32278,
62,
15255,
9250,
11,
905,
62,
20274,
62,
9078,
29487,
198,
11748,
8085,
33967,
198,
11748,
1822,
29572,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
459... | 2.020297 | 2,759 |
# -*- coding: utf-8 -*-
#
# Created by libxd on 17-2-5.
#
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15622,
416,
9195,
24954,
319,
1596,
12,
17,
12,
20,
13,
198,
2,
198
] | 1.870968 | 31 |
from django.http import JsonResponse, HttpResponse
from django.shortcuts import render
from django.contrib.auth import get_user_model
from cvat.apps.authentication.decorators import login_required
from cvat.apps.stats.services import collect_annotators_stats, save_job_stats
User = get_user_model()
@login_required
@login_required
@login_required
| [
6738,
42625,
14208,
13,
4023,
1330,
449,
1559,
31077,
11,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
198,
6738,
269,
... | 3.266055 | 109 |
import numpy as np
import os
import sys
import time
import copy
import scipy
import random
import logging
import pandas as pd
import qinfer as qi
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import seaborn as sns
import redis
import pickle
try:
from lfig import LatexFigure
except:
from qmla.shared_functionality.latex_figure import LatexFigure
import qmla.redis_settings
import qmla.logging
import qmla.get_exploration_strategy
import qmla.shared_functionality.prior_distributions
import qmla.model_building_utilities
import qmla.analysis
import qmla.utilities
pickle.HIGHEST_PROTOCOL = 4
__all__ = [
"ModelInstanceForLearning",
]
class ModelInstanceForLearning:
"""
Model used for parameter learning.
Models are specified by their name; they can be separated into
separate terms by splitting the name string by '+'.
Individual terms correspond to base matrices and are assigned parameters.
Each term is assigned a parameter probability distribution, or a prior distribution:
this will be iteratively changed according to evidence from experiments, and its mean
gives the estimate for that parameter. Prior distributions are used by the QInfer updater,
and can be specified by the :meth:`~qmla.exploration_strategies.ExplorationStrategy.get_prior` method.
The individual terms are parsed into matrices for calculations. This is achieved by
:func:`~qmla.process_basic_operator`: different string syntax enable different core oeprators.
Parameter estimation is done by :meth:`~qmla.ModelInstanceForLearning.update_model`.
The final parameter estimates are set as the mean of the
posterior distribution after n_experiments wherein n_particles
are sampled per experiment (these user definted
parameters are retrieved from `qmla_core_info_dict`).
:meth:`~qmla.ModelInstanceForLearning.learned_info_dict` returns the pertinent learned information.
:param int model_id: ID of the model to study
:param str model_name: name of the model to be learned
:param qid: ID of the QMLA instance
:param str exploration_rule: name of exploration_strategy
:param dict qmla_core_info_database: essential details about the QMLA
instance needed to learn/compare models.
If None, this is retrieved instead from the redis database.
:param str host_name: name of host server on which redis database exists.
:param int port_number: port number unique to this QMLA instance on redis database
:param str log_file: path of QMLA instance's log file.
"""
##########
# Section: Setup
##########
def _initialise_model_for_learning(
self, model_name, qmla_core_info_database, **kwargs
):
r"""
Preliminary set up necessary before parameter learning.
Start instances of classes used throughout, generally by calling the exploration strategy's method,
* qinfer inferface: :meth:`~qmla.exploration_strategies.ExplorationStrategy.qinfer_model`.
* updater is default `QInfer.SMCUpdater <http://docs.qinfer.org/en/latest/guide/smc.html#using-smcupdater>`_.
* parameter distribution prior: :meth:`~qmla.exploration_strategies.ExplorationStrategy.get_prior`.
:param str model_name: name of the model to be learned
:param str exploration_rule: name of exploration_strategy
:param dict qmla_core_info_database: essential details about the QMLA
instance needed to learn/compare models.
If None, this is retrieved instead from the redis database.
"""
# Retrieve data held on redis databases.
redis_databases = qmla.redis_settings.get_redis_databases_by_qmla_id(
self.redis_host, self.redis_port_number, self.qmla_id
)
if qmla_core_info_database is None:
qmla_core_info_database = redis_databases["qmla_core_info_database"]
qmla_core_info_dict = pickle.loads(
qmla_core_info_database.get("qmla_settings")
)
self.probes_system = pickle.loads(qmla_core_info_database["probes_system"])
self.probes_simulator = pickle.loads(
qmla_core_info_database["probes_simulator"]
)
else:
qmla_core_info_dict = qmla_core_info_database.get("qmla_settings")
self.probes_system = qmla_core_info_database["probes_system"]
self.probes_simulator = qmla_core_info_database["probes_simulator"]
# Extract data from core database
self.num_particles = qmla_core_info_dict["num_particles"]
self.num_experiments = qmla_core_info_dict["num_experiments"]
self.probe_number = qmla_core_info_dict["num_probes"]
self.results_directory = qmla_core_info_dict["results_directory"]
self.true_model_constituent_operators = qmla_core_info_dict["true_oplist"]
self.true_model_params = qmla_core_info_dict["true_model_terms_params"]
self.true_model_name = qmla_core_info_dict["true_name"]
if self.model_name == self.true_model_name:
self.is_true_model = True
self.log_print(["This is the true model for learning."])
else:
self.is_true_model = False
self.true_param_dict = qmla_core_info_dict["true_param_dict"]
self.true_model_constructor = qmla_core_info_dict["true_model_constructor"]
self.times_to_plot = qmla_core_info_dict["plot_times"]
self.experimental_measurements = qmla_core_info_dict[
"experimental_measurements"
]
self.experimental_measurement_times = qmla_core_info_dict[
"experimental_measurement_times"
]
self.true_params_path = qmla_core_info_dict["run_info_file"]
self.plot_probes = pickle.load(
open(qmla_core_info_dict["probes_plot_file"], "rb")
)
self.plots_directory = qmla_core_info_dict["plots_directory"]
self.debug_mode = qmla_core_info_dict["debug_mode"]
self.plot_level = qmla_core_info_dict["plot_level"]
self.figure_format = qmla_core_info_dict["figure_format"]
# Instantiate exploration strategy
self.exploration_class = qmla.get_exploration_strategy.get_exploration_class(
exploration_rules=self.exploration_strategy_of_this_model,
log_file=self.log_file,
qmla_id=self.qmla_id,
)
# Get initial configuration for this model
self.model_constructor = self.exploration_class.model_constructor(
name=model_name
)
self.model_terms_names = self.model_constructor.terms_names
# self.model_name_latex = self.exploration_class.latex_name(
# name=self.model_name
# )
self.model_name_latex = self.model_constructor.name_latex
self.model_terms_matrices = np.asarray(self.model_constructor.terms_matrices)
self.num_parameters = self.model_constructor.num_parameters
self.model_dimension = self.model_constructor.num_qubits
self.log_print(["Getting num qubits"])
self.model_num_qubits = int(np.log2(np.shape(self.model_terms_matrices[0])[0]))
self.log_print(["model num qubits:", self.model_num_qubits])
self.log_print(["model dimension:", self.model_dimension])
# Poterntially use different resources depending on relative model
# complexity.
self._consider_reallocate_resources()
# Set up
self._setup_qinfer_infrastructure()
def _setup_qinfer_infrastructure(self):
r"""
Set up prior, model and updater (via QInfer) which are used to run Bayesian inference.
"""
# Prior parameter distribution vian exploration strategy
self.model_prior = self.exploration_class.get_prior(
model_name=self.model_name,
log_file=self.log_file,
log_identifier=str("QHL {}".format(self.model_id)),
)
self.model_terms_parameters = self.model_prior.sample()
self._store_prior()
# Initialise model to infereace with QInfer as specified in ES
self.qinfer_model = self.exploration_class.get_qinfer_model(
model_name=self.model_name,
model_constructor=self.model_constructor,
true_oplist=self.true_model_constituent_operators,
true_model_constructor=self.true_model_constructor,
num_probes=self.probe_number,
probes_system=self.probes_system,
probes_simulator=self.probes_simulator,
exploration_rules=self.exploration_strategy_of_this_model,
experimental_measurements=self.experimental_measurements,
experimental_measurement_times=self.experimental_measurement_times,
qmla_id=self.qmla_id,
log_file=self.log_file,
debug_mode=self.debug_mode,
)
# Updater to perform Bayesian inference with
if (
self.exploration_class.hard_fix_resample_effective_sample_size is not None
and self.exploration_class.hard_fix_resample_effective_sample_size
< self.num_particles
):
# get resampler treshold
resampler_threshold = (
self.exploration_class.hard_fix_resample_effective_sample_size
/ self.num_particles
)
else:
resampler_threshold = self.exploration_class.qinfer_resampler_threshold
self.qinfer_updater = qi.SMCUpdater(
self.qinfer_model,
self.num_particles,
self.model_prior,
resample_thresh=resampler_threshold,
resampler=qi.LiuWestResampler(a=self.exploration_class.qinfer_resampler_a),
)
# Experiment design heuristic
self.model_heuristic = self.exploration_class.get_heuristic(
model_id=self.model_id,
updater=self.qinfer_updater,
oplist=self.model_terms_matrices,
num_experiments=self.num_experiments,
num_probes=self.probe_number,
log_file=self.log_file,
inv_field=[item[0] for item in self.qinfer_model.expparams_dtype[1:]],
max_time_to_enforce=self.exploration_class.max_time_to_consider,
figure_format=self.figure_format,
)
self.log_print(["Heuristic built"])
self.model_heuristic_class = self.model_heuristic.__class__.__name__
self.prior_marginal = [
self.qinfer_updater.posterior_marginal(idx_param=i)
for i in range(self.model_constructor.num_terms)
]
def _initialise_tracking_infrastructure(self):
r"""
Arrays, dictionaries etc for tracking learning across experiments
"""
# Unused
self.timings = {"update_qinfer": 0}
self.track_total_log_likelihood = np.array([])
self.particles = np.array([])
self.weights = np.array([])
self.track_posterior_dist = []
# Final results
self.final_learned_params = np.empty(
# TODO remove final_leared_params and references to it,
# use dictionaries defined here instead.
[self.num_parameters, 2]
)
self.qhl_final_param_estimates = {}
self.qhl_final_param_uncertainties = {}
# Miscellaneous
self.progress_tracker = pd.DataFrame()
self.all_params_for_q_loss = list(
set(list(self.true_param_dict.keys())).union(self.model_terms_names)
)
self.param_indices = {
op_name: self.model_terms_names.index(op_name)
for op_name in self.model_terms_names
}
self.epochs_after_resampling = []
# To track at every epoch
self.track_experimental_times = []
self.track_experiment_parameters = []
self.volume_by_epoch = np.array([])
self.track_param_means = []
self.track_param_uncertainties = []
self.track_norm_cov_matrices = []
self.track_covariance_matrices = []
self.quadratic_losses_record = []
# Initialise all
self._record_experiment_updates(update_step=0)
##########
# Section: Model learning
##########
def update_model(
self,
):
r"""
Run updates on model, corresponding to quantum Hamiltonian learning procedure.
This function is called on an instance of this model to run the entire QHL algorithm.
Get datum corresponding to true system, where true system is either experimental or simulated,
by calling `simulate_experiment <http://docs.qinfer.org/en/latest/guide/smc.html#using-smcupdater>`_
on the QInfer.SMCUpdater. This datum is taken as the true expected value for the system, which is used
in the likelihood calucation in the Bayesian inference step.
This is done by calling the `update` method on the `qinfer_updater
<http://docs.qinfer.org/en/latest/apiref/smc.html?highlight=smcupdater#smcupdater-smc-based-particle-updater>`_.
Effects of the update are then recorded by :meth:`~qmla.ModelInstanceForLearning._record_experiment_updates`,
and terminate either upon convergence or after a fixed `num_experiments`.
Final details are recorded by :meth:`~qmla.ModelInstanceForLearning._finalise_learning`.
"""
self.log_print(["Updating model."])
print_frequency = max(int(self.num_experiments / 5), 5)
for update_step in range(self.num_experiments):
if update_step % print_frequency == 0:
# Print so user can see how far along the learning is.
self.log_print(["Epoch", update_step])
# Design exeriment
new_experiment = self.model_heuristic(
num_params=self.model_constructor.num_parameters,
epoch_id=update_step,
current_params=self.track_param_means[-1],
current_volume=self.volume_by_epoch[-1],
)
self.track_experimental_times.append(new_experiment["t"])
self.track_experiment_parameters.append(new_experiment)
self.log_print_debug(["New experiment:", new_experiment])
# Run (or simulate) the experiment
datum_from_experiment = self.qinfer_model.simulate_experiment(
self.model_terms_parameters,
# this doesn't actually matter - likelihood overwrites this for
# true system
new_experiment,
repeat=1,
)
self.log_print_debug(["Datum:", datum_from_experiment])
self.log_print_debug(["Exp:", new_experiment])
# Call updater to update distribution based on datum
try:
update_start = time.time()
self.qinfer_updater.update(datum_from_experiment, new_experiment)
update_time = time.time() - update_start
except RuntimeError as e:
import sys
self.log_print(
[
"RuntimeError from updater on model {} - {}. Error: {}".format(
self.model_id, self.model_name, str(e)
)
]
)
print("\n\n[Model class] EXITING; Inspect log\n\n")
raise NameError("Qinfer update failure")
sys.exit()
except BaseException:
import sys
self.log_print(
[
"Failed to update model ({}) {} at update step {}".format(
self.model_id, self.model_name, update_step
)
]
)
raise ValueError("Failed to learn model")
sys.exit()
# Track learning
self._record_experiment_updates(
update_step=update_step,
new_experiment=new_experiment,
datum=datum_from_experiment,
update_time=update_time,
)
# Terminate
if (
self.exploration_class.terminate_learning_at_volume_convergence
and volume_by_epoch[-1]
< self.exploration_class.volume_convergence_threshold
): # can be reinstated to stop learning when volume converges
self._finalise_learning()
break
self._finalise_learning()
self.compute_likelihood_after_parameter_learning()
t1 = time.time()
self._model_plots()
self.log_print(
["Time to do plots: {} sec".format(np.round(time.time() - t1, 3))]
)
t2 = time.time()
self.model_heuristic.finalise_heuristic()
self.log_print(
["Time to finalise heuristic: {} sec".format(np.round(time.time() - t2, 3))]
)
def _record_experiment_updates(
self,
update_step,
new_experiment=None,
datum=None,
update_time=0,
):
r"""Update tracking infrastructure."""
cov_mt = self.qinfer_updater.est_covariance_mtx()
param_estimates = self.qinfer_updater.est_mean()
# Data used in plots
volume = np.abs(qi.utils.ellipsoid_volume(invA=cov_mt))
self.volume_by_epoch = np.append(self.volume_by_epoch, volume)
self.track_param_means.append(param_estimates)
self.track_param_uncertainties.append(np.sqrt(np.diag(cov_mt)))
self.track_norm_cov_matrices.append(np.linalg.norm(cov_mt))
if self.qinfer_updater.just_resampled:
self.epochs_after_resampling.append(update_step)
# Some optional tracking
if self.exploration_class.track_cov_mtx:
self.track_covariance_matrices.append(
self.qinfer_updater.est_covariance_mtx()
)
# compute quadratic loss
quadratic_loss = 0
for param in self.all_params_for_q_loss:
if param in self.model_terms_names:
learned_param = param_estimates[self.param_indices[param]]
else:
learned_param = 0
if param in list(self.true_param_dict.keys()):
true_param = self.true_param_dict[param]
else:
true_param = 0
quadratic_loss += (learned_param - true_param) ** 2
self.quadratic_losses_record.append(quadratic_loss)
if new_experiment is None:
exp_time = None
probe_id = None
total_likelihood = None
else:
exp_time = new_experiment["t"][0]
probe_id = new_experiment["probe_id"]
total_likelihood = self.qinfer_updater.normalization_record[-1][0]
try:
residual_median = self.qinfer_model.store_p0_diffs[-1][0]
residual_std = self.qinfer_model.store_p0_diffs[-1][1]
except:
residual_median = None
residual_std = None
if update_time == 0:
storage_time = 0
likelihood_time = 0
else:
try:
storage_time = self.qinfer_model.single_experiment_timings["simulator"][
"storage"
]
likelihood_time = self.qinfer_model.single_experiment_timings[
"simulator"
]["likelihood"]
except Exception as e:
raise
self.log_print(["Can't find storage/likelihood time. Exception : ", e])
experiment_summary = pd.Series(
{
"model_id": self.model_id,
"model_name": self.model_name_latex,
"num_qubits": self.model_num_qubits,
"experiment_id": update_step + 1, # update_step counts from 0
"parameters_true": self.true_model_params,
"parameters_estimates": param_estimates,
"parameters_uncertainties": self.track_param_uncertainties[-1],
"volume": volume,
"quadratic_loss": quadratic_loss,
"experiment_time": exp_time,
"probe_id": probe_id,
"residual_median": residual_median,
"residual_std_dev": residual_std,
"just_resampled": self.qinfer_updater.just_resampled,
"effective_sample_size": self.qinfer_updater.n_ess,
"datum": datum,
"total_likelihood": total_likelihood,
"update_time": update_time,
"storage_time": storage_time,
"likelihood_time": likelihood_time,
}
)
self.progress_tracker = self.progress_tracker.append(
experiment_summary, ignore_index=True
)
def _finalise_learning(self):
r"""Record and log final result."""
self.log_print(
[
"Epoch {}".format(self.num_experiments),
"\n QHL finished for ",
self.model_name,
"\n Final experiment time:",
self.track_experimental_times[-1],
"\n {} Resample epochs: \n{}".format(
len(self.epochs_after_resampling), self.epochs_after_resampling
),
"\nTimings:\n",
self.timings,
"\nEffective sample size: {}".format(self.qinfer_updater.n_ess),
]
)
# Final results
self.model_log_total_likelihood = self.qinfer_updater.log_total_likelihood
self.posterior_marginal = [
self.qinfer_updater.posterior_marginal(idx_param=i)
for i in range(self.model_constructor.num_terms)
]
self.track_param_means = np.array(self.track_param_means)
self.track_param_uncertainties = np.array(self.track_param_uncertainties)
self.track_param_estimate_v_epoch = {}
self.track_param_uncertainty_v_epoch = {}
cov_mat = self.qinfer_updater.est_covariance_mtx()
est_params = self.qinfer_updater.est_mean()
self.log_print(["model_terms_names:", self.model_terms_names])
for i in range(self.model_constructor.num_terms):
# Store learned parameters
# TODO get rid of uses of final_learned_params, use
# qhl_final_param_estimates instead
term = self.model_terms_names[i]
self.final_learned_params[i] = [
self.qinfer_updater.est_mean()[i],
np.sqrt(cov_mat[i][i]),
]
self.qhl_final_param_estimates[term] = est_params[i]
self.qhl_final_param_uncertainties[term] = np.sqrt(cov_mat[i][i])
self.log_print(
[
"Final parameters estimates and uncertainties (term {}): {} +/- {}".format(
term,
self.qhl_final_param_estimates[term],
self.qhl_final_param_uncertainties[term],
)
]
)
# Arrays of parameter estimates/uncertainties
self.track_param_estimate_v_epoch[term] = self.track_param_means[:, i]
self.track_param_uncertainty_v_epoch[term] = self.track_param_uncertainties[
:, i
]
# Compute the Hamiltonian corresponding to the parameter
# posterior distribution
self.learned_hamiltonian = self.model_constructor.construct_matrix(
parameters=est_params
)
# Record parameter estimates
pe = pd.DataFrame(self.track_param_estimate_v_epoch)
pu = pd.DataFrame(self.track_param_uncertainty_v_epoch)
pu.index.rename("experiment_id", inplace=True)
pe.index.rename("experiment_id", inplace=True)
pu.rename(
columns={d: "uncertainty_{}".format(d) for d in pu.keys()}, inplace=True
)
self.parameter_estimates = pu.join(pe, on="experiment_id")
# Compute dynamics
self._compute_expectation_values()
def _model_plots_old(self):
r"""
Generate plots specific to this model.
Which plots are drawn depends on the ``plot_level`` set in the launch script.
"""
if self.plot_level >= 4:
# Plots for this model, if plot level wants to include them
# TODO replace excepts prints with warnings
self._plot_preliminary_preparation()
try:
self._plot_learning_summary()
except BaseException:
self.log_print(["Failed to _plot_learning_summary"])
try:
self._plot_dynamics()
except:
self.log_print(["Failed to plot model dynamics."])
# raise
if self.plot_level >= 5:
try:
self._plot_distributions()
except BaseException:
raise
self.log_print(["Failed to plot posterior"])
try:
self.model_heuristic.plot_heuristic_attributes(
save_to_file=os.path.join(
self.model_learning_plots_directory,
"{}heuristic_attributes_{}.png".format(
self.plot_prefix, self.model_id
),
)
)
except BaseException:
self.log_print(["Failed to plot_heuristic_attributes"])
if self.plot_level >= 7:
# very heavy, not very informative
try:
self._plot_posterior_mesh_pairwise()
except BaseException:
self.log_print(["failed to _plot_poster_mesh_pairwise"])
def learned_info_dict(self):
"""
Place essential information after learning has occured into a dict.
This is used to recreate the model for
* comparisons: :class:`~qmla.ModelInstanceForComparison`
* storage within the main QMLA environment :class:`~qmla.ModelInstanceForStorage>`.
"""
learned_info = {}
# needed by storage class
learned_info["num_particles"] = self.num_particles
learned_info["num_experiments"] = self.num_experiments
learned_info["times_learned_over"] = self.track_experimental_times
learned_info["track_experiment_parameters"] = self.track_experiment_parameters
learned_info["final_learned_params"] = self.final_learned_params
learned_info[
"model_normalization_record"
] = self.qinfer_updater.normalization_record
learned_info["log_total_likelihood"] = self.qinfer_updater.log_total_likelihood
learned_info["raw_volume_list"] = self.volume_by_epoch
learned_info["track_param_means"] = self.track_param_means
learned_info["track_covariance_matrices"] = self.track_covariance_matrices
learned_info["track_norm_cov_matrices"] = self.track_norm_cov_matrices
learned_info["track_param_uncertainties"] = self.track_param_uncertainties
learned_info["track_param_estimate_v_epoch"] = self.track_param_estimate_v_epoch
learned_info[
"track_param_uncertainty_v_epoch"
] = self.track_param_uncertainty_v_epoch
learned_info["epochs_after_resampling"] = self.epochs_after_resampling
learned_info["quadratic_losses_record"] = self.quadratic_losses_record
learned_info["qhl_final_param_estimates"] = self.qhl_final_param_estimates
learned_info[
"qhl_final_param_uncertainties"
] = self.qhl_final_param_uncertainties
learned_info["covariance_mtx_final"] = self.qinfer_updater.est_covariance_mtx()
learned_info["estimated_mean_params"] = self.qinfer_updater.est_mean()
learned_info["learned_hamiltonian"] = self.learned_hamiltonian
learned_info[
"exploration_strategy_of_this_model"
] = self.exploration_strategy_of_this_model
learned_info["model_heuristic_class"] = self.model_heuristic_class
learned_info["evaluation_log_likelihood"] = self.evaluation_log_likelihood
learned_info[
"evaluation_normalization_record"
] = self.evaluation_normalization_record
learned_info["akaike_info_criterion"] = self.akaike_info_criterion
learned_info["akaike_info_criterion_c"] = self.akaike_info_criterion_c
learned_info["bayesian_info_criterion"] = self.bayesian_info_criterion
learned_info["evaluation_median_likelihood"] = self.evaluation_median_likelihood
learned_info["evaluation_pr0_diffs"] = self.evaluation_pr0_diffs
learned_info["evaluation_mean_pr0_diff"] = np.mean(self.evaluation_pr0_diffs)
learned_info["evaluation_median_pr0_diff"] = np.median(
self.evaluation_pr0_diffs
)
learned_info["num_evaluation_points"] = self.num_evaluation_points
learned_info["qinfer_model_likelihoods"] = self.qinfer_model.store_likelihoods
learned_info["evaluation_likelihoods"] = self.evaluation_likelihoods
learned_info["evaluation_residual_squares"] = self.evaluation_residual_squares
learned_info[
"evaluation_summarise_likelihoods"
] = self.evaluation_summarise_likelihoods
learned_info["qinfer_pr0_diff_from_true"] = np.array(
self.qinfer_model.store_p0_diffs
)
learned_info["expectation_values"] = self.expectation_values
learned_info["progress_tracker"] = self.progress_tracker
learned_info["parameter_estimates"] = self.parameter_estimates
# additionally wanted by comparison class
learned_info["name"] = self.model_name
learned_info["model_id"] = self.model_id
learned_info["final_prior"] = self.qinfer_updater.prior
learned_info["posterior_marginal"] = self.posterior_marginal
# TODO restore initial_prior as required for plots in
# remote_bayes_factor
try:
learned_info["heuristic_data"] = self.model_heuristic.heuristic_data
except BaseException:
pass
try:
learned_info["heuristic_distances"] = self.model_heuristic.distances
except BaseException:
pass
try:
learned_info[
"heuristic_assorted_times"
] = self.model_heuristic.designed_times
learned_info["volume_derivatives"] = self.model_heuristic.derivatives
except BaseException:
pass
return learned_info
##########
# Section: Evaluation
##########
def compute_likelihood_after_parameter_learning(
self,
):
r""" "
Evaluate the model after parameter learning on independent evaluation data.
"""
self.log_print(["Evaluating learned model."])
# Retrieve probes and experiment list used as evaluation data.
evaluation_data = pickle.load(
open(os.path.join(self.results_directory, "evaluation_data.p"), "rb")
) # TODO get from command line argument instead of reconstructing path here
# evaluation_times = evaluation_data['times']
evaluation_probe_dict = evaluation_data["probes"]
evaluation_experiments = evaluation_data["experiments"]
self.num_evaluation_points = len(evaluation_experiments)
if not self.exploration_class.force_evaluation and self.num_experiments < 20:
# TODO make optional robustly in ES or pass dev arg to QMLA
# instance.
self.log_print(
["<20 experiments; presumed dev mode. Not evaluating all models"]
)
evaluation_experiments = evaluation_experiments[::10]
if self.exploration_class.exclude_evaluation:
evaluation_experiments = evaluation_experiments[::10]
self.log_print(
[
"Evaluation experiments len {}. First 5 elements:\n{}".format(
len(evaluation_experiments), evaluation_experiments[:5]
)
]
)
# Construct a fresh updater and model to evaluate on.
estimated_params = self.qinfer_updater.est_mean()
cov_mt_uncertainty = [1e-10] * np.shape(estimated_params)[0]
cov_mt = np.diag(cov_mt_uncertainty)
posterior_distribution = self.exploration_class.get_evaluation_prior(
model_name=self.model_name,
estimated_params=estimated_params,
cov_mt=cov_mt,
)
evaluation_model_constructor = self.exploration_class.model_constructor(
name=self.model_name, fixed_parameters=estimated_params
)
# TODO using precise mean of posterior to evaluate model
# want to sample from it -- add flag to qinfer model
evaluation_qinfer_model = self.exploration_class.get_qinfer_model(
model_name=self.model_name,
model_constructor=evaluation_model_constructor,
true_model_constructor=self.true_model_constructor,
num_probes=self.probe_number,
probes_system=evaluation_probe_dict,
probes_simulator=evaluation_probe_dict,
exploration_rules=self.exploration_strategy_of_this_model,
experimental_measurements=self.experimental_measurements,
experimental_measurement_times=self.experimental_measurement_times,
log_file=self.log_file,
debug_mode=self.debug_mode,
qmla_id=self.qmla_id,
evaluation_model=True,
)
evaluation_updater = qi.SMCUpdater(
model=evaluation_qinfer_model,
n_particles=min(5, self.num_particles),
prior=posterior_distribution,
# turn off resampling - want to evaluate the learned model, not
# improved version
resample_thresh=0.0,
resampler=qi.LiuWestResampler(a=self.exploration_class.qinfer_resampler_a),
)
evaluation_heuristic = self.exploration_class.get_heuristic(
model_id=self.model_id,
updater=evaluation_updater,
oplist=self.model_terms_matrices,
num_experiments=self.num_experiments,
num_probes=self.probe_number,
log_file=self.log_file,
inv_field=[item[0] for item in self.qinfer_model.expparams_dtype[1:]],
max_time_to_enforce=self.exploration_class.max_time_to_consider,
figure_format=self.figure_format,
)
evaluation_updater._log_total_likelihood = 0.0
evaluation_updater._normalization_record = []
eval_epoch = 0
self.log_print(
["Evaluating on {} experiments".format(len(evaluation_experiments))]
)
for experiment in evaluation_experiments:
t = experiment["t"].item()
probe_id = experiment["probe_id"].item()
exp = evaluation_heuristic(
num_params=len(self.model_terms_matrices),
epoch_id=eval_epoch,
force_time_choice=t,
)
exp["probe_id"] = probe_id
params_array = np.array([[self.true_model_params[:]]])
datum = evaluation_updater.model.simulate_experiment(
params_array,
exp,
# repeat=1000
repeat=5,
)
self.log_print_debug(
[
"(eval) Datum:",
datum,
]
)
evaluation_updater.update(datum, exp)
eval_epoch += 1
# Store evaluation
self.evaluation_normalization_record = evaluation_updater.normalization_record
if np.isnan(evaluation_updater.log_total_likelihood):
self.evaluation_log_likelihood = None
self.evaluation_median_likelihood = None
self.log_print(["Evaluation ll is nan"])
else:
self.evaluation_log_likelihood = evaluation_updater.log_total_likelihood
# self.evaluation_log_likelihood /= len(self.evaluation_normalization_record) # normalise
self.evaluation_log_likelihood = qmla.utilities.round_nearest(
self.evaluation_log_likelihood, 0.01
)
self.evaluation_median_likelihood = np.round(
np.median(evaluation_updater.normalization_record), 2
)
self.evaluation_pr0_diffs = np.array(
evaluation_qinfer_model.store_p0_diffs
)[:, 0]
n_terms = len(self.model_terms_names)
n_samples = len(self.evaluation_normalization_record)
self.akaike_info_criterion = 2 * n_terms - 2 * self.evaluation_log_likelihood
try:
self.akaike_info_criterion_c = self.akaike_info_criterion + 2 * (
n_terms ** 2 + n_terms
) / (n_samples - n_terms - 1)
except:
# when n_samples - n_terms - 1 == 0
# TODO this is made up to avoid errors - find a better way
# AICc should not be trusted in these cases b/c sample size is so small
self.akaike_info_criterion_c = self.akaike_info_criterion + 2 * (
n_terms ** 2 + n_terms
) / (n_samples - n_terms)
self.bayesian_info_criterion = (
self.num_parameters * np.log(self.num_evaluation_points)
- 2 * self.evaluation_log_likelihood
)
self.evaluation_likelihoods = evaluation_qinfer_model.store_likelihoods
self.evaluation_summarise_likelihoods = (
evaluation_qinfer_model.summarise_likelihoods
)
self.evaluation_residual_squares = {
"mean": np.mean(
np.abs(
(
np.array(self.evaluation_summarise_likelihoods["system"])
- np.array(
self.evaluation_summarise_likelihoods["particles_mean"]
)
)
)
),
"median": np.median(
np.abs(
(
np.array(self.evaluation_summarise_likelihoods["system"])
- np.array(
self.evaluation_summarise_likelihoods["particles_median"]
)
)
)
),
"mean_sq": np.mean(
(
np.array(self.evaluation_summarise_likelihoods["system"])
- np.array(self.evaluation_summarise_likelihoods["particles_mean"])
)
** 2
),
"median_sq": np.median(
(
np.array(self.evaluation_summarise_likelihoods["system"])
- np.array(
self.evaluation_summarise_likelihoods["particles_median"]
)
)
** 2
),
}
self.log_print(
[
"Model {} evaluation ll:{} AIC:{}".format(
self.model_id,
self.evaluation_log_likelihood,
self.akaike_info_criterion,
)
]
)
##########
# Section: Evaluation
##########
def _plot_preliminary_preparation(self):
r"""
Prepare model for plots; make directory.
"""
self.model_learning_plots_directory = os.path.join(
self.plots_directory, "model_training"
)
self.plot_prefix = ""
if self.is_true_model:
self.plot_prefix = ""
# TODO turn back on when not in dev
# self.plot_prefix = 'true_'
if not os.path.exists(self.model_learning_plots_directory):
try:
os.makedirs(self.model_learning_plots_directory)
except BaseException:
pass # another instance made it at same time
def _plot_distributions(self):
r"""
For each parameter, plot:
* prior distribution
* posterior distributino
* prior distribution for comparison,
i.e. posterior from learning recast as a unimodal normal
* true parameters (if applicable)
* learned parameter estimates
* covariance matrix between parameters (separate plot)
# TODO add plotting levels: run, instance, model
"""
bf_posterior = qi.MultivariateNormalDistribution(
self.qinfer_updater.est_mean(), self.qinfer_updater.est_covariance_mtx()
)
bf_posterior_updater = qi.SMCUpdater(
model=self.qinfer_model, n_particles=self.num_particles, prior=bf_posterior
)
bf_posterior_marginal = [
bf_posterior_updater.posterior_marginal(idx_param=i)
for i in range(self.model_constructor.num_terms)
]
num_terms = self.model_constructor.num_terms
lf = LatexFigure(auto_gridspec=num_terms)
for param_idx in range(num_terms):
term = self.model_terms_names[param_idx]
ax = lf.new_axis()
# plot prior
ax.plot(
self.prior_marginal[param_idx][0], # locations
self.prior_marginal[param_idx][1], # weights
color="blue",
ls="-",
label="Prior",
)
# plot posterior
ax.plot(
self.posterior_marginal[param_idx][0], # locations
self.posterior_marginal[param_idx][1], # weights
color="black",
ls="-",
label="Posterior",
)
# plot posterior_used for BF comparison
ax.plot(
bf_posterior_marginal[param_idx][0], # locations
bf_posterior_marginal[param_idx][1], # weights
color="green",
ls=":",
label="Prior for BF",
)
# True param
if term in self.true_param_dict:
ax.axvline(
self.true_param_dict[term], color="red", ls="-.", label="True"
)
# Learned param
try:
ax.axvline(
self.qhl_final_param_estimates[term],
color="black",
ls="--",
label="Learned",
)
except BaseException:
self.log_print(
["{} not in {}".format(term, self.qhl_final_param_estimates)]
)
# There is a bug when using log scale which causes overlap on the axis labels:
# https://stackoverflow.com/questions/46498157/overlapping-axis-tick-labels-in-logarithmic-plots
# ax.semilogx()
# ax.semilogy()
# ax.minorticks_off()
# latex_name = self.exploration_class.latex_name(term)
latex_name = self.model_constructor.latex_name_method(term)
self.log_print(["Latex name:", latex_name])
ax.set_title(r"{}".format(latex_name))
if ax.row == 0 and ax.col == lf.gridspec_layout[1] - 1:
ax.legend()
lf.fig.text(0.5, 0.04, "Particle locations", ha="center")
lf.fig.text(0.04, 0.5, "Weights", va="center", rotation="vertical")
# save the plot
lf.save(
os.path.join(
self.model_learning_plots_directory,
"{}distributions_{}".format(self.plot_prefix, self.model_id),
),
file_format=self.figure_format,
)
# Plot covariance matrix heatmap
plt.clf()
lf = LatexFigure()
ax = lf.new_axis()
sns.heatmap(self.qinfer_updater.est_covariance_mtx(), ax=ax)
lf.save(
os.path.join(
self.model_learning_plots_directory,
"{}cov_mtx_final_{}".format(self.plot_prefix, self.model_id),
),
file_format=self.figure_format,
)
def _plot_learning_summary(self):
r"""
Plot summary of this model's learning:
* parameter estimates and uncertainties
* volume of parameter distribution
* experimental times used
* (resample points superposed on the above)
* likelihoods of system/particles
* difference between system/particles' likelihoods
"""
terms = self.track_param_estimate_v_epoch.keys()
num_terms = len(terms)
extra_plots = [
"volume",
# 'quad_loss', 'residuals', 'likelihoods'
]
resample_colour = "grey"
if num_terms <= 3:
ncols = num_terms
else:
ncols = int(np.ceil(np.sqrt(num_terms)))
nrows_for_params = int(np.ceil(num_terms / ncols))
nrows = nrows_for_params + len(extra_plots)
height_ratios = [1] * nrows_for_params
height_ratios.extend([ncols * 0.7] * len(extra_plots))
plt.clf()
lf = LatexFigure(
use_gridspec=True,
gridspec_layout=(nrows, ncols),
gridspec_params={"height_ratios": height_ratios},
)
# Parameter estimates
for term in terms:
ax = lf.new_axis(
# label_position=(-.3, 1.1)
)
estimates = self.track_param_estimate_v_epoch[term]
uncertainty = self.track_param_uncertainty_v_epoch[term]
lower_bound = estimates - uncertainty
upper_bound = estimates + uncertainty
epochs = range(len(estimates))
ax.plot(epochs, estimates, label="Estimate")
ax.fill_between(
epochs, lower_bound, upper_bound, alpha=0.2, label="Uncertainty"
)
# if len(self.epochs_after_resampling) > 0:
# ax.axvline(
# self.epochs_after_resampling[0],
# ls='--',
# c=resample_colour, alpha=0.5, label='Resample'
# )
# for e in self.epochs_after_resampling[1:]:
# ax.axvline(
# e,
# ls='--',
# c=resample_colour, alpha=0.5,
# )
if term in self.true_param_dict:
true_param = self.true_param_dict[term]
ax.axhline(true_param, color="red", ls="--", label="True")
try:
# term_latex = self.exploration_class.latex_name(term)
term_latex = self.model_constructor.latex_name_method(term)
ax.set_title(term_latex)
# ax.set_ylabel(term_latex)
except BaseException:
self.log_print(["Failed to get latex name"])
raise
# ax.set_ylabel('Parameter')
ax.set_xlabel("Epoch")
if ax.row == 0 and ax.col == lf.gridspec_layout[1] - 1:
ax.legend(bbox_to_anchor=(1.1, 1.1))
if ax.col == 0:
ax.set_ylabel("Parameter")
if ax.row == nrows_for_params - 1:
ax.set_xlabel("Epoch")
else:
ax.set_xlabel("")
if "volume" in extra_plots:
# Volume and experimental times
ax = lf.new_axis(
# label_position=(-0.1, 1.05),
span=(1, "all")
)
ax.plot(
range(len(self.volume_by_epoch)),
self.volume_by_epoch,
label=r"$V$",
color="k",
)
# if len(self.epochs_after_resampling) > 0:
# ax.axvline( # label first resample only
# self.epochs_after_resampling[0],
# ls='--',
# c=resample_colour,
# alpha=0.5,
# # label='Resample'
# )
# for e in self.epochs_after_resampling[1:]:
# ax.axvline(
# e,
# ls='--',
# c=resample_colour,
# alpha=0.5,
# )
# ax.set_title('Volume and Experimental Times')
ax.set_ylabel("Volume")
ax.set_xlabel("Epoch")
ax.set_yscale("log")
time_ax = ax.twinx()
times = qmla.utilities.flatten(self.track_experimental_times)
if self.num_experiments > 100:
s = 4 # size of time dots
else:
s = 7
time_ax.scatter(
range(len(self.track_experimental_times)),
self.track_experimental_times,
label=r"$t$",
s=s,
)
time_ax.set_ylabel("Time")
time_ax.semilogy()
# time_ax.legend(
# bbox_to_anchor=(0.85, 1.25),
# # loc='lower center'
# )
handles, labels = ax.get_legend_handles_labels()
t_handles, t_labels = time_ax.get_legend_handles_labels()
handles.extend(t_handles)
labels.extend(t_labels)
ax.legend(
handles,
labels,
ncol=2,
loc="upper center"
# bbox_to_anchor=(0.4, 1.25)
)
if "quad_loss" in extra_plots:
# Covariance mtx norm and quadratic loss
ax = lf.new_axis(span=(1, "all"))
ax.plot(
range(len(self.track_norm_cov_matrices)),
self.track_norm_cov_matrices,
label="Covariance norm",
color="green",
ls=":",
)
ax.semilogy()
ax.set_ylabel("Q.L / Norm")
ax.plot(
range(len(self.quadratic_losses_record)),
self.quadratic_losses_record,
label="Quadratic loss",
c="orange",
ls="--",
)
ax.legend(
loc="lower left"
# bbox_to_anchor=(1.1, 1.1)
)
if "likelihoods" in extra_plots:
# Likelihoods of system and particles
row += 1
ax = lf.fig.add_subplot(lf.gs[row, :])
particle_likelihoods = self.qinfer_model.summarise_likelihoods[
"particles_median"
]
particle_likelihoods_std = self.qinfer_model.summarise_likelihoods[
"particles_std"
]
system_likelihoods = self.qinfer_model.summarise_likelihoods["system"]
ax.plot(
range(len(system_likelihoods)),
system_likelihoods,
# s=3,
color="red",
ls="--",
label="System",
)
ax.scatter(
range(len(particle_likelihoods)),
particle_likelihoods,
s=3,
color="Blue",
label="Particles",
)
ax.fill_between(
range(len(particle_likelihoods)),
self.qinfer_model.summarise_likelihoods["particles_upper_quartile"],
self.qinfer_model.summarise_likelihoods["particles_lower_quartile"],
alpha=0.3,
color="Blue",
label="Particles IQR",
)
ax.set_ylabel("$ Pr(0) $")
ax.set_xlabel("Epoch")
ax.semilogy()
ax.legend()
if "residuals" in extra_plots:
# Difference | system-pr0 - particles-pr0 |
row += 1
ax = lf.fig.add_subplot(lf.gs[row, :])
self.qinfer_pr0_diff_from_true = np.array(self.qinfer_model.store_p0_diffs)
medians = self.qinfer_pr0_diff_from_true[:, 0]
std = self.qinfer_pr0_diff_from_true[:, 1]
ax.scatter(range(len(medians)), medians, s=3, color="Blue")
ax.fill_between(
range(len(medians)),
medians + std,
medians - std,
alpha=0.3,
color="Blue",
)
ax.set_ylabel(r"$ \|Pr(0)_{sys} - Pr(0)_{sim} \| $")
ax.set_xlabel("Epoch")
ax.semilogy()
try:
ax.axhline(0.5, label="0.5", ls="--", alpha=0.3, c="grey")
ax.axhline(
0.1,
label="0.1",
ls=":",
alpha=0.3,
c="grey",
)
except BaseException:
pass
ax.legend()
# Save figure
lf.save(
os.path.join(
self.model_learning_plots_directory,
"{}learning_summary_{}".format(self.plot_prefix, self.model_id),
),
file_format=self.figure_format,
)
def _plot_posterior_mesh_pairwise(self):
r"""
Plots the posterior mesh as contours for each pair of parameters.
Mesh from qinfer.SMCUpdater.posterior_mesh
"""
import itertools
fig, axes = plt.subplots(figsize=(18, 10), constrained_layout=True)
selected_cmap = plt.cm.Paired
n_param = self.model_constructor.num_terms
nrows = ncols = n_param
gs = GridSpec(
nrows + 1,
ncols,
)
include_param_self_correlation = True
if include_param_self_correlation:
pairs_of_params = list(
itertools.combinations_with_replacement(range(n_param), 2)
)
else:
pairs_of_params = list(
itertools.combinations(range(n_param), 2)
) # exlcude param with itself
vmin = 1e3
vmax = 0
posterior_meshes = {}
for i, j in pairs_of_params:
post_mesh = self.qinfer_updater.posterior_mesh(
idx_param1=j, idx_param2=i, res1=50, res2=50
)
# store the post mesh - don't want to compute twice
posterior_meshes[i, j] = post_mesh
# find global min/max contour value for consistency across plots
if np.min(post_mesh[2]) < vmin:
vmin = np.min(post_mesh[2])
if np.max(post_mesh[2]) > vmax:
vmax = np.max(post_mesh[2])
for i in range(n_param):
for j in range(n_param):
ax = fig.add_subplot(gs[i, j])
y_term = self.qinfer_model.modelparam_names[i]
x_term = self.qinfer_model.modelparam_names[j]
if ax.is_first_col():
ax.set_ylabel(
# self.exploration_class.latex_name(y_term),
self.model_constructor.latex_name_method(y_term),
rotation=0,
)
if ax.is_first_row():
ax.set_title(
# self.exploration_class.latex_name(x_term)
self.model_constructor.latex_name_method(x_term)
)
if (i, j) in pairs_of_params:
ax.contourf(
*posterior_meshes[i, j],
vmin=vmin,
vmax=vmax,
cmap=selected_cmap
)
if x_term in self.true_param_dict:
true_param = self.true_param_dict[x_term]
if ax.get_xlim()[0] < true_param < ax.get_xlim()[1]:
ax.axvline(true_param, c="black", ls="--", alpha=0.3)
if y_term in self.true_param_dict:
true_param = self.true_param_dict[y_term]
if ax.get_ylim()[0] < true_param < ax.get_ylim()[1]:
ax.axhline(true_param, c="black", ls="--", alpha=0.3)
else:
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
# Colour bar
ax = fig.add_subplot(gs[nrows, :])
m = plt.cm.ScalarMappable(cmap=selected_cmap)
m.set_array([])
m.set_clim(vmin, vmax)
fig.colorbar(m, cax=ax, orientation="horizontal", shrink=0.7)
# Save
fig.text(0.5, 0.04, "Posterior mesh", ha="center")
fig.savefig(
os.path.join(
self.model_learning_plots_directory,
"{}posterior_mesh_pairwise_{}.png".format(
self.plot_prefix, self.model_id
),
)
)
def _plot_dynamics(self):
"""
Plots the dynamics reproduced by this model against system data.
"""
# Plot dynamics of model vs system
lf = LatexFigure(auto_label=False, fraction=0.75)
ax = lf.new_axis()
# System
times = sorted(self.expectation_values.keys())
ax.scatter(
times,
[self.experimental_measurements[t] for t in times],
color="red",
label="System",
s=3,
)
# This model
ax.plot(
times,
[self.expectation_values[t] for t in times],
color="blue",
label="Model",
)
# label_fontsize = 15
ax.set_xlim(0, max(times))
ax.set_ylabel(
"Expectation value",
)
ax.set_xlabel(
"Time",
)
ax.set_title(
"Dynamics for {}".format(self.model_name_latex),
)
ax.legend(
# prop={'size' : label_fontsize}
)
lf.save(
os.path.join(
self.model_learning_plots_directory,
"{}dynamics_{}".format(self.plot_prefix, self.model_id),
),
file_format=self.figure_format,
)
##########
# Section: Utilities
##########
def log_print(self, to_print_list, log_identifier=None):
r"""Wrapper for :func:`~qmla.print_to_log`"""
if log_identifier is None:
log_identifier = "ModelForLearning {}".format(self.model_id)
qmla.logging.print_to_log(
to_print_list=to_print_list,
log_file=self.log_file,
log_identifier=log_identifier,
)
def log_print_debug(self, to_print_list):
r"""Log print if global debug_log_print set to True."""
if self.debug_mode:
self.log_print(
to_print_list=to_print_list,
log_identifier="Debug Model {}".format(self.model_id),
)
def _consider_reallocate_resources(self):
r"""Model might get less resources if it is deemed less complex than others"""
if self.exploration_class.reallocate_resources:
base_resources = qmla_core_info_dict["base_resources"]
this_model_num_qubits = self.model_dimension
this_model_num_terms = self.model_constructor.num_terms
max_num_params = self.exploration_class.max_num_parameter_estimate
new_resources = qmla.utilities.resource_allocation(
base_qubits=base_resources["num_qubits"],
base_terms=base_resources["num_terms"],
max_num_params=max_num_params,
this_model_qubits=this_model_num_qubits,
this_model_terms=this_model_num_terms,
num_experiments=self.num_experiments,
num_particles=self.num_particles,
)
self.num_experiments = new_resources["num_experiments"]
self.num_particles = new_resources["num_particles"]
self.log_print(
"After resource reallocation on {}: {} experiments and {} particles".format(
self.model_name, self.num_experiments, self.num_particles
)
)
def _store_prior(self):
r"""Save the prior raw and as plot."""
store_all_priors = False # optional
if not store_all_priors:
return
prior_dir = str(self.results_directory + "priors/QMLA_{}/".format(self.qmla_id))
if not os.path.exists(prior_dir):
try:
os.makedirs(prior_dir)
except BaseException:
# if already exists (ie created by another QMLA instance)
pass
prior_file = str(prior_dir + "prior_" + str(self.model_id) + ".png")
individual_terms_in_name = self.model_constructor.terms_names
latex_terms = []
for term in individual_terms_in_name:
# lt = self.exploration_class.latex_name(
# name=term
# )
lt = self.model_constructor.latex_name_method(term)
latex_terms.append(lt)
try:
qmla.shared_functionality.prior_distributions.plot_prior(
model_name=self.model_name_latex,
model_name_individual_terms=latex_terms,
prior=self.model_prior,
plot_file=prior_file,
)
except BaseException:
self.log_print(["Failed to plot prior"])
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
28686,
201,
198,
11748,
25064,
201,
198,
11748,
640,
201,
198,
11748,
4866,
201,
198,
11748,
629,
541,
88,
201,
198,
11748,
4738,
201,
198,
11748,
18931,
201,
198,
11748,
19798,
292,
355,
... | 1.907423 | 33,356 |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_blend_func_extended'
GL_MAX_DUAL_SOURCE_DRAW_BUFFERS=_C('GL_MAX_DUAL_SOURCE_DRAW_BUFFERS',0x88FC)
GL_ONE_MINUS_SRC1_ALPHA=_C('GL_ONE_MINUS_SRC1_ALPHA',0x88FB)
GL_ONE_MINUS_SRC1_COLOR=_C('GL_ONE_MINUS_SRC1_COLOR',0x88FA)
GL_SRC1_ALPHA=_C('GL_SRC1_ALPHA',0x8589)
GL_SRC1_COLOR=_C('GL_SRC1_COLOR',0x88F9)
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLuint,arrays.GLcharArray)
@_f
@_p.types(_cs.GLint,_cs.GLuint,arrays.GLcharArray)
| [
7061,
6,
16541,
519,
877,
515,
416,
35555,
62,
8612,
378,
4226,
11,
466,
407,
4370,
0,
7061,
6,
201,
198,
6738,
30672,
1330,
3859,
355,
4808,
79,
11,
26515,
201,
198,
2,
6127,
5270,
3544,
428,
201,
198,
6738,
30672,
13,
1831,
13,
... | 2.140162 | 371 |
# wujian@2018
"""
SI-SNR(scale-invariant SNR/SDR) measure of speech separation
"""
import numpy as np
from itertools import permutations
import pdb
def si_snr(x, s, remove_dc=True):
"""
Compute SI-SNR
Arguments:
x: vector, enhanced/separated signal
s: vector, reference signal(ground truth)
"""
# zero mean, seems do not hurt results
if remove_dc:
x_zm = x - np.mean(x)
s_zm = s - np.mean(s)
t = np.inner(x_zm, s_zm) * s_zm / vec_l2norm(s_zm)**2
n = x_zm - t
else:
t = np.inner(x, s) * s / vec_l2norm(s)**2
n = x - t
return 20 * np.log10((vec_l2norm(t) / (vec_l2norm(n)+0.000001))+0.000001)
def permute_si_snr(xlist, slist):
"""
Compute SI-SNR between N pairs
Arguments:
x: list[vector], enhanced/separated signal
s: list[vector], reference signal(ground truth)
"""
N = len(xlist)
if N != len(slist):
raise RuntimeError(
"size do not match between xlist and slist: {:d} vs {:d}".format(
N, len(slist)))
si_snrs = []
for order in permutations(range(N)):
si_snrs.append(si_snr_avg(xlist, [slist[n] for n in order]))
return max(si_snrs)
def permute_si_snr_mix_of_mix(xlist, slist):
"""
Find the best combination between N pairs depending on SI-SNR
Arguments:
x: list[vector], enhanced/separated signal
s: list[vector], reference signal(ground truth)
Return:
order: list[vector], list of outputs in the best order
"""
N = len(xlist)
if N != len(slist):
raise RuntimeError(
"size do not match between xlist and slist: {:d} vs {:d}".format(
N, len(slist)))
si_snrMem = None
bestOrder=None
for order in permutations(range(N)):
new_si_snr = si_snr_avg([xlist[n] for n in order], slist)
#print("newSi-snr", new_si_snr,"order", order)
if si_snrMem is None: #init
si_snrMem = new_si_snr
bestOrder = order
elif si_snrMem < new_si_snr: #if better result, then remember combination
si_snrMem = new_si_snr
bestOrder = order
return [xlist[n] for n in bestOrder]
| [
2,
266,
23577,
666,
31,
7908,
198,
37811,
198,
11584,
12,
15571,
49,
7,
9888,
12,
16340,
2743,
415,
11346,
49,
14,
50,
7707,
8,
3953,
286,
4046,
14139,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
340,
861,
... | 2.105561 | 1,061 |
from .base import BaseResource
import requests | [
198,
6738,
764,
8692,
1330,
7308,
26198,
198,
11748,
7007
] | 4.7 | 10 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 15 07:53:36 2020
Illustrates linear least squares fitting of data
@author: zettergm
"""
# Imports
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append("../linear_algebra")
from elimtools import Gauss_elim,backsub
# Grid and indep. vars for problem, linear function y=a+b*x
n=40 #number of data points
a=2 #y-intercept, linear fn.
b=3 #slope, linear fn.
minx=-5
maxx=5
xdata=np.linspace(minx,maxx,n)
# Gemeration of Gaussian random numbers in Python
dev=5.0
mean=0.5 #models callibration error in measurement, offset
noise=dev*np.random.randn(n)+mean
ytrue=a+b*xdata
ydata=ytrue+noise
# Plot of function and noisy data
plt.figure(1)
plt.plot(xdata,ytrue,"--")
plt.plot(xdata,ydata,"o",markersize=6)
plt.xlabel("x")
plt.ylabel("y")
# Solution using least squares
J=np.concatenate(( np.reshape(np.ones(n),(n,1)),np.reshape(xdata,(n,1)) ),axis=1)
M=J.transpose()@J
yprime=J.transpose()@np.reshape(ydata,(n,1))
[Mmod,order]=Gauss_elim(M,yprime,False)
avec=backsub(Mmod[order,:],False)
yfit=avec[0]+avec[1]*xdata
plt.plot(xdata,yfit,'-')
plt.legend(("original function","noisy data","fitted function"))
plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
2556,
1315,
8753,
25,
4310,
25,
2623,
12131,
198,
198,
21478,
436,
9700,
14174,
1551... | 2.340952 | 525 |