content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from django.db import IntegrityError, transaction
from curation.parsers.generic import GenericData
from curation.parsers.trait import TraitData
from catalog.models import Score
| [
6738,
42625,
14208,
13,
9945,
1330,
39348,
12331,
11,
8611,
198,
6738,
1090,
341,
13,
79,
945,
364,
13,
41357,
1330,
42044,
6601,
198,
6738,
1090,
341,
13,
79,
945,
364,
13,
9535,
270,
1330,
4759,
270,
6601,
198,
6738,
18388,
13,
27... | 3.847826 | 46 |
import unittest
import numpy as np
from spyne import Tensor, Constant
class TestTensor(unittest.TestCase):
""" A set of tests to validate tensor definitions, attributes, functionality etc..."""
def test_tensor_attributes(self):
""" Test tensor attributes """
# value attr should return the wrapped data
self.assertTrue(np.array_equal(self.tens.value, self.data))
# shape should return the shape of the wrapped data
self.assertEqual(self.tens.shape, (2, 2, 3))
# node_uid should be a random string generated uniquely for each tensor
self.assertTrue(type(self.tens.node_uid) == str)
self.assertTrue(len(self.tens.node_uid) > 0)
new_tens = Tensor(self.tens.value)
self.assertNotEqual(self.tens.node_uid, new_tens.node_uid)
def test_tensor_instantiation(self):
""" Test tensor instantiation approaches """
new_tensor = Tensor(self.data.tolist())
# tensors can accept lists on instantiation and will internally convert the data to an ndarray
self.assertTrue(np.array_equal(new_tensor.value, self.tens.value))
# for now tensors and constants share identical functionality. In the future they may not.
class TestConstant(unittest.TestCase):
""" A set of tests to validate Constant's definitions, attributes, functionality etc..."""
def test_constants_attributes(self):
""" Test tensor attributes """
# value attr should return the wrapped data
self.assertTrue(np.array_equal(self.const.value, self.data))
# shape should return the shape of the wrapped data
self.assertEqual(self.const.shape, (2, 2, 3))
# node_uid should be a random string generated uniquely for each tensor
self.assertTrue(type(self.const.node_uid) == str)
self.assertTrue(len(self.const.node_uid) > 0)
new_const = Tensor(self.const.value)
self.assertNotEqual(self.const.node_uid, new_const.node_uid)
def test_constants_instantiation(self):
""" Test tensor instantiation approaches """
new_const = Tensor(self.data.tolist())
# tensors can accept lists on instantiation and will internally convert the data to an ndarray
self.assertTrue(np.array_equal(new_const.value, self.const.value))
if __name__ == '__main__':
unittest.main() | [
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
13997,
710,
1330,
309,
22854,
11,
20217,
628,
198,
4871,
6208,
51,
22854,
7,
403,
715,
395,
13,
14402,
20448,
2599,
198,
220,
220,
220,
37227,
317,
900,
286,
5254,
... | 2.730012 | 863 |
import gym
import option_keyboard.envs
import time
import numpy as np
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
KEY_MAP = ['UP', 'RIGHT', 'DOWN', 'LEFT']
if __name__ == '__main__':
main()
| [
11748,
11550,
198,
11748,
3038,
62,
2539,
3526,
13,
268,
14259,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
198,
8577,
796,
657,
198,
49,
9947,
796,
352,
198,
41925,
796,
362,
198,
2538,
9792,
796,
513,
198,
198,
20373,
... | 2.405063 | 79 |
# https://github.com/codeninja/CCXT-Historical-Data/blob/master/Binance%20Historical%20Data%20.ipynb
import ccxt
from datetime import datetime, timedelta, timezone
import math
import argparse
import pandas as pd
import csv
from pathlib import Path
import sys
scrape_candles_to_csv('btc_usdt_1m.csv', 'binance', 3, 'BTC/USDT', '1m',
'2020-09-0100:00:00Z', 1000)
| [
2,
3740,
1378,
12567,
13,
785,
14,
19815,
268,
259,
6592,
14,
4093,
25010,
12,
13749,
12409,
12,
6601,
14,
2436,
672,
14,
9866,
14,
33,
14149,
4,
1238,
13749,
12409,
4,
1238,
6601,
4,
1238,
13,
541,
2047,
65,
198,
11748,
36624,
74... | 2.401235 | 162 |
__author__ = 'jatwood'
import sys
import numpy as np
from sklearn.metrics import f1_score, accuracy_score
from graph_scnn import GraphSCNN
import data
import util
if __name__ == '__main__':
np.random.seed()
args = sys.argv[1:]
name_to_data = {
'nci1': lambda: data.parse_nci(graph_name='nci1.graph'),
'nci109': lambda: data.parse_nci(graph_name='nci109.graph'),
'mutag': lambda : data.parse_nci(graph_name='mutag.graph'),
'ptc': lambda : data.parse_nci(graph_name='ptc.graph'),
'enzymes': lambda : data.parse_nci(graph_name='enzymes.graph'),
'nci1struct': lambda: data.parse_nci(graph_name='nci1.graph', with_structural_features=True),
'nci109struct': lambda: data.parse_nci(graph_name='nci109.graph', with_structural_features=True),
}
transform_lookup = {
'id': None,
'rwl': util.rw_laplacian,
'l': util.laplacian,
}
name = args[0]
data_fn = name_to_data[name]
n_hops = int(args[1])
transform_name = args[2]
transform_fn = transform_lookup[transform_name]
scnn_graph_proportion_experiment(data_fn, name, n_hops, 0.1, 0.1, transform_fn=transform_fn, transform_name=transform_name)
| [
834,
9800,
834,
796,
705,
73,
265,
3822,
6,
198,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
277,
16,
62,
26675,
11,
9922,
62,
26675,
198,
198,
6738,
4823,
62,
1416,
20471,
1... | 2.306238 | 529 |
PC_RACE, NPC_RACE, SMALL_ANIMAL, LARGE_ANIMAL, MONSTER = range(5)
RACE_TYPE_CHOICES = (
(PC_RACE, "Allowed Player Character Race"),
(NPC_RACE, "NPC Only Race"),
(SMALL_ANIMAL, "Small Animal"),
(LARGE_ANIMAL, "Large Animal"),
(MONSTER, "Monster"),
)
CHEST_KEY, ROOM_KEY = range(2)
KEY_CHOICES = ((CHEST_KEY, "chest key"), (ROOM_KEY, "room key"))
SINGLE, MARRIED, WIDOWED, DIVORCED = "single", "married", "widowed", "divorced"
MARITAL_STATUS_CHOICES = (
(SINGLE, "Single"),
(MARRIED, "Married"),
(WIDOWED, "Widowed"),
(DIVORCED, "Divorced"),
)
| [
5662,
62,
49,
11598,
11,
15888,
62,
49,
11598,
11,
9447,
7036,
62,
1565,
3955,
1847,
11,
47211,
8264,
62,
1565,
3955,
1847,
11,
25000,
41809,
796,
2837,
7,
20,
8,
198,
198,
49,
11598,
62,
25216,
62,
44899,
34444,
796,
357,
198,
22... | 2.151292 | 271 |
#!/usr/bin/python
# coding=utf-8
import time
from collections import OrderedDict
from Key import Key
if __name__ == '__main__':
n = 100
print_dict('Key generation', test_generation_perf(n), n)
print_dict('Signing', test_signing_perf(n), n)
print_dict('Verifying', test_verification_perf(n), n)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
19617,
28,
40477,
12,
23,
198,
198,
11748,
640,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
6738,
7383,
1330,
7383,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,... | 2.593496 | 123 |
from flask import Flask, render_template
import requests
import json
########################################################################################################################
app = Flask(__name__)
########################################################################################################################
# MÉTODO QUE PERMITE MEDIANTE LA RUTA DE /START HACER EL POST AL SERVIDOR
@app.route('/start')
# MÉTODO QUE PERMITE MEDIANTE LA RUTA DE /RESULTADO VER EL PROCESAMIENTO DEL TXT FILE.
@app.route('/resultado')
########################################################################################################################
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
198,
11748,
7007,
198,
11748,
33918,
198,
198,
29113,
29113,
29113,
14468,
7804,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
628,
198,
29113,
29113,
29113,
14468,
7804,
198,
198,
2,
... | 4.201117 | 179 |
"""
views
"""
import logging
import sys, traceback
from django.shortcuts import render_to_response
from django.template import RequestContext
from core.common.utils import getPrefix, getContextVariables
from django.core.urlresolvers import reverse
#_logger = logging.getLogger(__name__)
_logger = logging.getLogger('todoview')
# Create your views here.
def todoTaskDescription(request, taskid="1"):
"""
placeholder for implementation of view with ID "TODO-task-description(taskid)":
"""
# _logger.debug('reverse:' + str(reverse('todoview:todoTaskDescription')))
_logger.debug('taskid:' + str(taskid))
try:
_logger.debug('reverse(ExtraTodoTaskDescription):' + str(reverse('ExtraTodoTaskDescription', args=(taskid,))))
except:
_logger.debug('reverse(ExtraTodoTaskDescription) failed:' + str(traceback.format_exc()))
try:
_logger.debug('reverse(todoview:todoTaskDescription):' + str(reverse('todoview:todoTaskDescription', args=(taskid,))))
except:
_logger.debug('reverse(todoview:todoTaskDescription) failed:' + str(traceback.format_exc()))
data = {
'prefix': getPrefix(request),
'taskid': taskid,
}
data.update(getContextVariables(request))
return render_to_response('todoview/todo-task-description.html', data, RequestContext(request))
| [
37811,
220,
198,
33571,
198,
198,
37811,
198,
198,
11748,
18931,
198,
11748,
25064,
11,
12854,
1891,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
62,
1462,
62,
26209,
198,
6738,
42625,
14208,
13,
28243,
1330,
19390,
21947,
198,
... | 2.709163 | 502 |
################################
# Splunk Connector for OpenCTI #
################################
import os
import yaml
import json
import requests
from pycti import OpenCTIConnectorHelper, get_config_variable
if __name__ == "__main__":
SplunkInstance = SplunkConnector()
SplunkInstance.start()
| [
29113,
198,
2,
13341,
2954,
8113,
273,
329,
4946,
4177,
40,
1303,
198,
29113,
198,
198,
11748,
28686,
198,
11748,
331,
43695,
198,
11748,
33918,
198,
11748,
7007,
198,
198,
6738,
12972,
310,
72,
1330,
4946,
4177,
2149,
261,
1606,
273,
... | 3.678571 | 84 |
from configurations import config_grabber as cg
from extendedminigrid import *
from perception import Perception
import gym
class SafetyEnvelope(gym.core.Wrapper):
"""
Safety envelope for safe exploration.
Uses monitors for avoiding unsafe actions and shaping rewards
""" | [
198,
6738,
25412,
1330,
4566,
62,
32393,
527,
355,
269,
70,
198,
198,
6738,
7083,
1084,
3692,
312,
1330,
1635,
198,
6738,
11202,
1330,
35802,
198,
198,
11748,
11550,
628,
198,
198,
4871,
11233,
4834,
1091,
68,
7,
1360,
76,
13,
7295,
... | 3.708861 | 79 |
# MIT License
#
# (C) Copyright [2022] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Test CANU validate network config commands."""
from os import mkdir, urandom
from unittest.mock import patch
from click import testing
from netmiko import ssh_exception
from canu.cli import cli
from .test_validate_switch_config import switch_config
username = "admin"
password = "admin"
ips = "192.168.1.1"
credentials = {"username": username, "password": password}
cache_minutes = 0
running_config_file = "running_switch.cfg"
csm = "1.0"
runner = testing.CliRunner()
@patch("canu.validate.switch.config.config.switch_vendor")
@patch("canu.validate.switch.config.config.netmiko_command")
def test_validate_network_config(netmiko_command, switch_vendor):
"""Test that the `canu validate network config` command runs."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.return_value = "sw-spine-001"
netmiko_command.return_value = switch_config
mkdir("generated")
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--ips",
ips,
"--username",
username,
"--password",
password,
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"Switch: sw-spine-001 (192.168.1.1)\n"
+ "Differences\n"
+ "-------------------------------------------------------------------------\n"
+ "In Generated Not In Running (+) | In Running Not In Generated (-) \n"
+ "-------------------------------------------------------------------------\n"
+ "Total Additions: 1 | Total Deletions: 1\n"
+ " | Script: 1\n"
+ "Router: 1 | \n"
) in str(result.output)
def test_validate_network_config_running_file():
"""Test that the `canu validate network config` command runs."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
mkdir("running")
mkdir("generated")
with open("running/running_switch.cfg", "w") as f:
f.writelines(switch_config)
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--running",
"running/",
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"Switch: sw-spine-001\n"
+ "Differences\n"
+ "-------------------------------------------------------------------------\n"
+ "In Generated Not In Running (+) | In Running Not In Generated (-) \n"
+ "-------------------------------------------------------------------------\n"
+ "Total Additions: 1 | Total Deletions: 1\n"
+ " | Script: 1\n"
+ "Router: 1 | \n"
) in str(result.output)
@patch("canu.validate.switch.config.config.switch_vendor")
@patch("canu.validate.switch.config.config.netmiko_command")
def test_validate_network_config_file(netmiko_command, switch_vendor):
"""Test that the `canu validate network config` command runs from a file."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.return_value = "sw-spine-001"
netmiko_command.return_value = switch_config
mkdir("generated")
with open("test.txt", "w") as f:
f.write("192.168.1.1")
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--ips-file",
"test.txt",
"--username",
username,
"--password",
password,
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"Switch: sw-spine-001 (192.168.1.1)\n"
+ "Differences\n"
+ "-------------------------------------------------------------------------\n"
+ "In Generated Not In Running (+) | In Running Not In Generated (-) \n"
+ "-------------------------------------------------------------------------\n"
+ "Total Additions: 1 | Total Deletions: 1\n"
+ " | Script: 1\n"
+ "Router: 1 | \n"
) in str(result.output)
@patch("canu.validate.switch.config.config.switch_vendor")
@patch("canu.validate.switch.config.config.netmiko_command")
def test_validate_network_config_password_prompt(netmiko_command, switch_vendor):
"""Test that the `canu validate network config` command runs and prompts for password."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.return_value = "sw-spine-001"
netmiko_command.return_value = switch_config
mkdir("generated")
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--ips",
ips,
"--username",
username,
"--generated",
"generated/",
"--csm",
csm,
],
input=password,
)
assert result.exit_code == 0
assert (
"Switch: sw-spine-001 (192.168.1.1)\n"
+ "Differences\n"
+ "-------------------------------------------------------------------------\n"
+ "In Generated Not In Running (+) | In Running Not In Generated (-) \n"
+ "-------------------------------------------------------------------------\n"
+ "Total Additions: 1 | Total Deletions: 1\n"
+ " | Script: 1\n"
+ "Router: 1 | \n"
) in str(result.output)
@patch("canu.validate.switch.config.config.switch_vendor")
@patch("canu.validate.switch.config.config.netmiko_command")
def test_validate_network_config_timeout(netmiko_command, switch_vendor):
"""Test that the `canu validate network config` command errors on timeout."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.side_effect = ssh_exception.NetmikoTimeoutException
mkdir("generated")
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--ips",
ips,
"--username",
username,
"--password",
password,
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"Errors\n"
+ "----------------------------------------------------------------------------------------------------\n"
+ "192.168.1.1 - Timeout error. Check the IP address and try again.\n"
) in str(result.output)
@patch("canu.validate.switch.config.config.switch_vendor")
@patch("canu.validate.switch.config.config.netmiko_command")
def test_validate_network_config_authentication(netmiko_command, switch_vendor):
"""Test that the `canu validate network config` command errors on authentication."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.side_effect = ssh_exception.NetmikoAuthenticationException
mkdir("generated")
with open("generated/sw-spine-001.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--ips",
ips,
"--username",
username,
"--password",
password,
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"Errors\n"
+ "----------------------------------------------------------------------------------------------------\n"
+ "192.168.1.1 - Authentication error. Check the credentials or IP address and try again"
) in str(result.output)
def test_validate_network_config_bad_config_file():
"""Test that the `canu validate network config` command fails on bad file."""
switch_config_edit = switch_config[:-15] + "router add\n"
with runner.isolated_filesystem():
mkdir("running")
mkdir("generated")
# Generate random binary file
with open("running/bad.file", "wb") as f:
f.write(urandom(128))
with open("running/bad_config.cfg", "w") as f:
f.write("bad")
with open("running/switch.cfg", "w") as f:
f.writelines(switch_config_edit)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"network",
"config",
"--running",
"running/",
"--generated",
"generated/",
"--csm",
csm,
],
)
assert result.exit_code == 0
assert (
"running/bad_config.cfg - The file running/bad_config.cfg is not a valid config file."
) in str(result.output)
assert (
"sw-spine-001 - Could not find generated config file generated/sw-spine-001.cfg"
) in str(result.output)
assert (
"running/bad.file - The file running/bad.file is not a valid config file."
) in str(result.output)
| [
2,
17168,
13789,
198,
2,
198,
2,
357,
34,
8,
15069,
685,
1238,
1828,
60,
30446,
15503,
6400,
446,
14973,
7712,
18470,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
198,
2,
4866,... | 2.013086 | 6,572 |
"""
rest_entiries_by_entities 커맨드용 클래스 파일
"""
import tqdm
from django.core.management.base import BaseCommand
from django.db import transaction
from sunless_web.models import Entry, Entity, Translation, Discussion
class Command(BaseCommand):
""" V1 > V2 데이터 마이그레이션용 임시 커맨드 """
help = 'Delete all translations of entries and get from entities'
@transaction.atomic
def insert_as_checker(user, entry, translate, final):
""" insert old tranlations to entry as user """
if translate:
trans = Translation(entry=entry, text=translate, user=user)
trans.save()
Discussion(msg='기존 유저 번역 등록', translate=trans).save()
if final:
trans = Translation(entry=entry, text=final, user=user)
trans.save()
Discussion(msg='기존 유저 번역 등록', translate=trans).save()
| [
37811,
198,
2118,
62,
298,
18561,
62,
1525,
62,
298,
871,
23821,
119,
97,
167,
100,
101,
167,
241,
250,
168,
248,
102,
220,
169,
223,
112,
167,
252,
246,
168,
232,
97,
220,
169,
234,
234,
35975,
120,
198,
37811,
198,
11748,
256,
... | 2.166227 | 379 |
"""
A link is based on an GMNS link (https://github.com/zephyr-data-specs/GMNS/blob/master/Specification/link.schema.json). However, our links are only one way: all two way links are broken into two one-way links. This means there is only one direction to consider.
Links are made up of one, two, three, or four of the following Segments:
1. Lanes. Lanes are wide enough for a motor vehicle. Bicycles and pedestrians may also use segments. Lanes have direction from their parent link.
2. Cycleways. Cycleways may be found between segments and sidewalks. They are wide enough for a bicycle. Pedestrians may also use segments. Motor vehicles may unfortunately end up in cycleways.
3. Sidewalks. Sidewalks are on the side of a link. Bicycles and pedestrians may use sidewalks. Motor vehicles may also end up on sidewalks.
4. Parking. A link must have at least one lane to have parking. Parking goes in between the segments and the cycleway in the case of a protected cycleway and in between the cycleway and the sidewalk in the case of an unprotected cycleway.
Things to fix: What about a (right-hand drive) cycleway on the left side of a one-way street?
"""
import requests
from .lane import Lane
from .cycleway import Cycleway
from .sidewalk import Sidewalk
from .parking import Parking
class Link:
"""
Note: the output_intersection of a link means that link is an input_link of that intersection. And the input_intersection of a link means that link is an output_link of that intersection
"""
| [
37811,
198,
32,
2792,
318,
1912,
319,
281,
6951,
8035,
2792,
357,
5450,
1378,
12567,
13,
785,
14,
89,
27446,
2417,
12,
7890,
12,
4125,
6359,
14,
15548,
8035,
14,
2436,
672,
14,
9866,
14,
22882,
2649,
14,
8726,
13,
15952,
2611,
13,
... | 3.779703 | 404 |
#!/usr/bin/env python3
#
# formatted with black
#
import json
import requests
import sys
from os import getenv
if __name__ == "__main__":
# nest_access_token = get_nest_access_token(
# getenv("NEST_CLIENT_ID"),
# getenv("NEST_CLIENT_SECRET"),
# getenv("NEST_AUTHORIZATION_CODE"),
# )
nest_access_token = {}
nest_access_token["access_token"] = getenv("NEST_ACCESS_TOKEN")
if nest_access_token["access_token"] is None:
print("Please set the NEST_ACCESS_TOKEN environment variable")
sys.exit(1)
results = get_nest_temperatures(nest_access_token["access_token"])
if "status_code" in results:
print(json.dumps(results, indent=4))
else:
print_results_stdout(results)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
39559,
351,
2042,
198,
2,
198,
198,
11748,
33918,
198,
11748,
7007,
198,
11748,
25064,
198,
6738,
28686,
1330,
651,
24330,
628,
628,
198,
198,
361,
11593,
3672,
834,
6... | 2.380503 | 318 |
# imports
import time
from actors import Wizard, Creature
import random
if __name__ == '__main__':
main()
| [
2,
17944,
198,
11748,
640,
198,
198,
6738,
10544,
1330,
16884,
11,
33248,
198,
11748,
4738,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 3.194444 | 36 |
"""
Trains a simple language model based on the one found at
https://github.com/facebookresearch/adaptive-softmax and generates comparative
results for full softmax, differentiated softmax, and adaptive softmax. This
benchmark uses the text8 (http://mattmahoney.net/dc/textdata.html) dataset. This
dataset isn't the best demonstration of adaptive softmax's strengths, but it is
of a convenient size for downloading and training in a reasonable amount of
time.
You can run the benchmark by executing the following at the project root:
PYTHONPATH="$PYTHONPATH:." python examples/text8_benchmark.py --graph
You can see all of the other options by using the `--help` option:
usage: text8_benchmark.py [-h] [-b {adaptive,full,differentiated}]
[--no-resume] [--output-directory OUTPUT_DIRECTORY]
[--graph]
optional arguments:
-h, --help show this help message and exit
-b {adaptive,full,differentiated}, --benchmarks {adaptive,full,differentiated}
run benchmark for different variations of softmax
--no-resume prevents resuming a previously interrupted benchmark
--output-directory OUTPUT_DIRECTORY
where to store output of benchmark
--graph dump a graph of perplexity over time for bencmarks
By default, the benchmark runs for every variation of softmax. This can take a
long time to train on the CPU (over a day) so use of a GPU is recommended.
"""
from keras.utils.data_utils import get_file
from keras.preprocessing import text
from keras.preprocessing import sequence
from keras import initializers
from keras.models import Model
from keras.layers import (Dense,
Dropout,
Input,
LSTM,
Embedding,
Activation)
from keras.optimizers import Adagrad
from trimble.keras.adaptive import (DifferentiatedSoftmaxProduceLogits,
AdaptiveSoftmaxProduceLogits,
AdaptiveLogProb)
from zipfile import ZipFile
import numpy as np
import tensorflow as tf
import math
import io
import time
import os
import json
TEXT8_DATA_URL='http://mattmahoney.net/dc/text8.zip'
def load_data(vocab_size=45000, batch_size=128, sequence_length=20, output_directory='./benchmark_out'):
"""
Loads Text8 dataset. (http://mattmahoney.net/dc/textdata.html)
# Arguments
vocab_size: maximum number of words to use.
batch_size: the batch size that will be used when this data is passed to
`Model.fit(..)` or similar function.
sequence_length: the number of time steps for each batch.
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
raw_data = _load_raw_text8_data(output_directory=output_directory)
train_text, dev_text = _split_text8(raw_data)
tokenizer = _build_tokenizer(train_text, vocab_size=vocab_size)
raw_data = None # allow gc
eos_idx = tokenizer.word_index['</s>']
results = []
data_sequence = tokenizer.texts_to_sequences([train_text])[0] + [eos_idx]
results.append(_segment_sequence_into_batches(data_sequence, eos_idx, batch_size, sequence_length))
data_sequence = tokenizer.texts_to_sequences([dev_text])[0] + [eos_idx]
results.append(_segment_sequence_into_batches(data_sequence, eos_idx, batch_size, sequence_length))
return tuple(results)
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--benchmarks',
choices=['adaptive', 'full', 'differentiated'],
action='append',
help="run benchmark for different variations of softmax")
parser.add_argument('--iterations',
type=int,
default=10,
help="number of training iterations")
parser.add_argument('--no-resume',
dest='resume',
action='store_false',
help="prevents resuming a previously interrupted benchmark")
parser.add_argument('--output-directory',
dest="output_directory",
default='benchmark_out',
help="where to store output of benchmark")
parser.add_argument('--graph',
action='store_true',
help="dump a graph of perplexity over time for bencmarks")
options = parser.parse_args()
options.benchmarks = options.benchmarks or ['adaptive', 'full', 'differentiated']
if not os.path.exists(options.output_directory):
os.mkdir(options.output_directory)
result = run_benchmarks(options.iterations, benchmarks=options.benchmarks, output_directory=options.output_directory, resume=options.resume)
print_summary(result)
if options.graph:
dump_graph(result, os.path.join(options.output_directory, 'text8_performance_comparison.png'))
| [
37811,
198,
2898,
1299,
257,
2829,
3303,
2746,
1912,
319,
262,
530,
1043,
379,
198,
5450,
1378,
12567,
13,
785,
14,
19024,
34033,
14,
42552,
425,
12,
4215,
9806,
290,
18616,
29270,
198,
43420,
329,
1336,
2705,
9806,
11,
47543,
2705,
9... | 2.420609 | 2,135 |
from action import ActionType
from action import Action
from action import Dir
import copy
from merger import merge
from state import StateMA
from typing import List
import sys
| [
6738,
2223,
1330,
7561,
6030,
198,
6738,
2223,
1330,
7561,
198,
6738,
2223,
1330,
36202,
198,
11748,
4866,
198,
6738,
24589,
1330,
20121,
198,
6738,
1181,
1330,
1812,
5673,
198,
6738,
19720,
1330,
7343,
198,
11748,
25064,
220,
220,
220,
... | 4.181818 | 44 |
from collections import defaultdict
obj = solution()
s = ["eat","tea","tan","ate","nat","bat"]
s2 = [""]
s3 = ["a"]
res = obj.groupAnagrams(s)
print(res)
| [
6738,
17268,
1330,
4277,
11600,
198,
198,
26801,
796,
4610,
3419,
198,
82,
796,
14631,
4098,
2430,
660,
64,
2430,
38006,
2430,
378,
2430,
32353,
2430,
8664,
8973,
198,
82,
17,
796,
14631,
8973,
198,
82,
18,
796,
14631,
64,
8973,
198,
... | 2.627119 | 59 |
# Copyright (c) 2016-2017 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module provides a simple GUI viewer for Cozmo's camera.
It uses Tkinter, the standard Python GUI toolkit which is optionally available
on most platforms, and also depends on the Pillow and numpy libraries for
image processing.
See the online SDK documentation for details on how to install these extra
packages on your platform.
The easiest way to make use of this viewer is to call
:func:`cozmo.run.connect_with_tkviewer`.
Warning:
This package requires Python to have Tkinter installed to display the GUI.
'''
# __all__ should order by constants, event classes, other classes, functions.
__all__ = ['TkImageViewer']
import cozmo
import collections
import functools
import queue
import platform
import time
from PIL import Image, ImageDraw, ImageTk
import tkinter
from . import world
class TkThreadable:
'''A mixin for adding threadsafe calls to tkinter methods.'''
#pylint: disable=no-member
# no-member errors are raised in pylint regarding members/methods called but not defined in our mixin.
class TkImageViewer(tkinter.Frame, TkThreadable):
'''Simple Tkinter camera viewer.'''
# TODO: rewrite this whole thing. Make a generic camera widget
# that can be used in other Tk applications. Also handle resizing
# the window properly.
# The base class configure doesn't take an event
#pylint: disable=arguments-differ
| [
2,
15069,
357,
66,
8,
1584,
12,
5539,
1052,
4106,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
2... | 3.545293 | 563 |
import numpy as np
def noj(rel_pos, c_t, D, k):
"""N.O. Jensen single wake deficit model
This function checks if r is greater than the wake radius!
Parameters
-----------
rel_pos: ndarray [n,3]
x,y,z relative position compared to the upstream turbine
c_t: float | ndarray [n]
upstream wind turbine thrust coefficient
D: float | ndarray [n]
upstream wind turbine rotor diameter
k: float | ndarray [n]
wake expansion parameter
Returns
-------
du: float | ndarray [n]
The wind speed deficit at the specified positions
"""
x = rel_pos[:, 0]
r = np.sqrt(rel_pos[:, 1] ** 2.0 + rel_pos[:, 2] ** 2.0)
# Radius
R = D / 2.0
# NOJ Specific
Rw = R + k * x # upstream turbine wake radius
DU = - (1.0 - np.sqrt(1.0 - c_t)) / (1.0 + (k * x) / R) ** 2.0
# Upstream cases
DU[x < 0.0] = 0.0
DU[abs(r) > Rw] = 0.0
return DU
| [
11748,
299,
32152,
355,
45941,
628,
198,
4299,
645,
73,
7,
2411,
62,
1930,
11,
269,
62,
83,
11,
360,
11,
479,
2599,
198,
220,
220,
220,
37227,
45,
13,
46,
13,
32623,
2060,
7765,
11807,
2746,
198,
220,
220,
220,
770,
2163,
8794,
... | 2.155508 | 463 |
class WebsiteNotFound(Exception):
"""
*WebsiteNotFound raise*
When requests can't reach a website,
this error should pop up.
(This should make it more clear)
"""
pass
class KillSwitch(Exception):
"""
*KillSwitch raise*
Once a client sends a KillSwitch (your-ip/closeconnection)
the handler automatically will raise the KillSwitch Exception.
"""
pass
| [
4871,
15887,
3673,
21077,
7,
16922,
2599,
201,
198,
220,
220,
220,
37227,
201,
198,
220,
220,
220,
220,
220,
220,
220,
1635,
33420,
3673,
21077,
5298,
9,
201,
198,
220,
220,
220,
220,
220,
220,
220,
220,
201,
198,
220,
220,
220,
2... | 2.426316 | 190 |
import os
import time
import math
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
from nvidia.dali.plugin.pytorch import DALIClassificationIterator, LastBatchPolicy
from nvidia.dali.pipeline import pipeline_def
import nvidia.dali.types as types
import nvidia.dali.fn as fn
import warnings
warnings.filterwarnings('ignore')
#LOCAL_PATH = 'E:\DATASET\tiny-imagenet-200'
@pipeline_def
if __name__ == '__main__':
# iteration of PyTorch dataloader
transform_train = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.08, 1.25)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
train_dst = datasets.ImageFolder(IMG_DIR, transform_train)
train_loader = torch.utils.data.DataLoader(train_dst, batch_size=2048, shuffle=True, pin_memory=True, num_workers=8)
transform_val = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
val_dst = datasets.ImageFolder(IMG_DIR, transform_val)
val_loader = torch.utils.data.DataLoader(val_dst, batch_size=2000, shuffle=False, pin_memory=True, num_workers=8)
print('[PyTorch] start iterate test dataloader')
start = time.time()
for i, (x,y) in enumerate(train_loader):
if i%5==0:
print(i,end='-')
images = x.cuda(non_blocking=True)
labels = y.cuda(non_blocking=True)
end = time.time()
test_time = end-start
print('[PyTorch] end test dataloader iteration')
# print('[PyTorch] iteration time: %fs [train], %fs [test]' % (train_time, test_time))
print('[PyTorch] iteration time: %fs [test]' % (test_time))
pipe = create_dali_pipeline(batch_size=2048, num_threads=8, device_id=0, seed=12, data_dir=IMG_DIR,
crop=224, size=256, dali_cpu=False, shard_id=0, num_shards=1, is_training=True)
pipe.build()
train_loader = DALIClassificationIterator(pipe, reader_name="Reader", last_batch_policy=LastBatchPolicy.PARTIAL)
pipe = create_dali_pipeline(batch_size=2000, num_threads=8, device_id=0, seed=12, data_dir=IMG_DIR,
crop=256, size=256, dali_cpu=True, shard_id=0, num_shards=1, is_training=False)
pipe.build()
val_loader = DALIClassificationIterator(pipe, reader_name="Reader", last_batch_policy=LastBatchPolicy.PARTIAL)
print('[DALI-GPU] start iterate train dataloader')
start = time.time()
for i, data in enumerate(train_loader):
if i%5==0:
print(i,end='-')
images = data[0]['data'].cuda()
labels = data[0]['label'].cuda()
end = time.time()
test_time = end-start
print('[DALI-GPU] iteration time: %fs [test]' % (test_time))
print('[DALI-cpu] start iterate val dataloader')
start = time.time()
for i, data in enumerate(val_loader):
if i%5==0:
print(i,end='-')
images = data[0]['data'].cuda()
labels = data[0]['label'].cuda()
end = time.time()
test_time = end-start
print('[DALI-cpu] iteration time: %fs [test]' % (test_time))
| [
11748,
28686,
198,
11748,
640,
198,
11748,
10688,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
1845,
29363,
198,
11748,
28034,
13,
1891,
2412,
13,
66,
463,
20471,
355,
269,
463,
20471,... | 2.391275 | 1,490 |
# -*- coding: utf-8 -*-
import os
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup (
name = 'optidash',
version = '1.0.1',
description = 'Official Python integration for Optidash API',
long_description = 'Optidash: AI-powered image optimization and processing API. We will drastically speed-up your websites and save you money on bandwidth and storage.',
url = 'https://github.com/optidash-ai/optidash-python',
download_url = 'https://github.com/optidash-ai/optidash-python/archive/1.0.0.tar.gz',
author = 'Optidash UG',
author_email = 'support@optidash.ai',
license = 'MIT',
keywords = 'optidash image optimization processing resizing resizer cropping scaling masking watermarking filtering thumbnails pic picture photo face face detection visual watermark filter crop mask resize resizer thumbs thumbnail thumbnails jpg jpeg png gif svg bmp psd tiff heic',
packages = [
'optidash'
],
install_requires = [
'requests'
],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
]
) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
302,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
900,
37623,
10141,
1330,
9058,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
422,
... | 2.885434 | 611 |
import scipy as sp
import scipy.linalg as la
import pdb
from limix.core.covar import FreeFormCov
from limix.core.mean import MeanKronSum
from limix.core.gp import GP2KronSum
from limix.core.gp import GP
from limix.utils.preprocess import covar_rescale
import time
import copy
if __name__=='__main__':
# define phenotype
N = 1000
P = 4
Y = sp.randn(N,P)
# define fixed effects
F = []; A = []
F.append(1.*(sp.rand(N,2)<0.5))
A.append(sp.eye(P))
# define row caoriance
f = 10
X = 1.*(sp.rand(N, f)<0.2)
R = covar_rescale(sp.dot(X,X.T))
R+= 1e-4 * sp.eye(N)
S_R, U_R = la.eigh(R)
# define col covariances
Cg = FreeFormCov(P)
Cn = FreeFormCov(P)
Cg.setRandomParams()
Cn.setRandomParams()
# define gp and optimize
gp = GP2KronSum(Y=Y, F=F, A=A, Cg=Cg, Cn=Cn, S_R=S_R, U_R=U_R)
gp.optimize()
| [
11748,
629,
541,
88,
355,
599,
198,
11748,
629,
541,
88,
13,
75,
1292,
70,
355,
8591,
198,
11748,
279,
9945,
198,
6738,
1761,
844,
13,
7295,
13,
66,
709,
283,
1330,
3232,
8479,
34,
709,
198,
6738,
1761,
844,
13,
7295,
13,
32604,
... | 2.056075 | 428 |
from typing import Set, List, Tuple, NamedTuple, Iterator, Optional
from collections import deque
import heapq
RAW = """#######
#.G...#
#...EG#
#.#.#G#
#..G#E#
#.....#
#######"""
backpointers = {fighter.pos: None}
reached = set()
frontier = [(min(fighter.pos.manhattan(target),
0,
for target in targets), fighter.pos)]
best_score = float('inf')
while frontier:
score, length, pos = heapq.heappop(frontier)
if score > best_score:
# The best remaining candidate is worse than
# what we've already found, so break
break
if pos in targets:
reached.add(pos)
best_score = length
for next_pos in pos.neighbors():
if next_pos in off_limits:
continue
if next_pos in backpointers:
if pos in path:
continue
new_path = path + [pos]
new_score = len(new_path) + min(pos.manhattan(target) for target in targets)
heapq.heappush(frontier, (new_score, new_path))
# at this point, shortest_paths has all the shortest paths
# need to sort by (1) reading order of destination (2) reading order of first step
successful_paths.sort(key=lambda path: (path[-1].i, path[-1].j, path[1].i, path[1].j))
if successful_paths:
return successful_paths[0]
else:
#print("nowhere good to go")
return None
def round(self) -> bool:
"""Return true if the game is not over"""
occupied = {f.pos: f.elf for f in self.fighters if not f.dead}
movement_last_round = occupied != self.last_occupied[0]
self.fighters.sort(key=lambda f: (f.pos.i, f.pos.j))
game_over = False
for fighter in self.fighters:
if fighter.dead:
continue
found_enemies = fighter.take_turn(self, movement_last_round)
if not found_enemies:
game_over = True
self.last_occupied[0] = occupied
return game_over
def total_hit_points(self) -> int:
return sum(f.hp for f in self.fighters if not f.dead)
def __repr__(self) -> str:
outputs = {**{pos: '#' for pos in self.walls},
**{f.pos: 'E' if f.elf else 'G' for f in self.fighters if not f.dead}}
max_i = max(pos.i for pos in outputs)
max_j = max(pos.j for pos in outputs)
return "\n".join("".join(outputs.get(Pos(i, j), ".") for j in range(max_j + 1))
for i in range(max_i + 1))
def parse(raw: str) -> Cave:
walls = set()
fighters = []
for i, row in enumerate(raw.split("\n")):
for j, c in enumerate(row.strip()):
if c == '#':
walls.add(Pos(i, j))
elif c == 'E':
fighters.append(Fighter(elf=True, pos=Pos(i, j)))
elif c == 'G':
fighters.append(Fighter(elf=False, pos=Pos(i, j)))
return Cave(walls, fighters)
def run_game(cave: Cave) -> int:
num_rounds = 0
while True:
print("round", num_rounds)
print(cave)
game_over = cave.round()
if game_over:
break
num_rounds += 1
return num_rounds * cave.total_hit_points()
CAVE = parse(RAW)
assert run_game(CAVE) == 27730
CAVE2 = parse("""#######
#G..#E#
#E#E.E#
#G.##.#
#...#E#
#...E.#
#######""")
#assert run_game(CAVE2) == 36334
CAVE3 = parse("""#########
#G......#
#.E.#...#
#..##..G#
#...##..#
#...#...#
#.G...G.#
#.....G.#
#########""")
assert run_game(CAVE3) == 18740
with open('data/day15.txt') as f:
raw = f.read()
cave = parse(raw)
print(run_game(cave))
| [
6738,
19720,
1330,
5345,
11,
7343,
11,
309,
29291,
11,
34441,
51,
29291,
11,
40806,
1352,
11,
32233,
198,
198,
6738,
17268,
1330,
390,
4188,
198,
11748,
24575,
80,
198,
198,
20530,
796,
37227,
4242,
21017,
198,
2,
13,
38,
986,
2,
19... | 2.032735 | 1,894 |
#!/usr/bin/env python
# Copyright (c) 2015 - present Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import logging
import subprocess
import traceback
def run_compilation_commands(cmds, clean_cmd):
"""runs compilation commands, and suggests a project cleaning command
in case there is nothing to compile.
"""
from inferlib import utils
# TODO call it in parallel
if cmds is None or len(cmds) == 0:
utils.stderr('Nothing to compile. Try running `{}` first.'
.format(clean_cmd))
return os.EX_NOINPUT
for cmd in cmds:
if cmd.start() != os.EX_OK:
return os.EX_SOFTWARE
return os.EX_OK
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
357,
66,
8,
1853,
532,
1944,
3203,
11,
3457,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
347,
10305,
3918,
5964,
1043,
... | 2.977778 | 360 |
import corner
import emcee
import numpy as np
class MixedModel(object):
"""Combine multiple FFDs and fit
their parameters simultaneously with
shared alpha.
"""
def __init__(self, BFA=[], loglikelihood=None, alpha_prior=None):
'''Constructor for a Mixed Model Bayesian analysis suite.
Attributes:
-----------
BFA : list of BayesianFlaringAnalysis objects
loglikelihood : func
loglikelihood function
alpha_prior : float
shared prior for alpha
'''
self.BFA = BFA
self.loglikelihood = loglikelihood
self.alpha_prior = alpha_prior
def sample_posterior_with_mcmc(self, nwalkers=300, cutoff=100, steps=500):
'''Sample from the posterior using MCMC.
Parameters:
-------------
inits : list
initial variable values in the correct order
for lnprob
lnprob : func
posterior distribution that takes
inits as first argument and *args as second
to last arguments
nwalkers : int
number of walkers to run around the parameter space
cutoff : int
You do not want to use values in the beginning
of the chain, so cut them off.
steps : int
How long to run the walk.
Return:
--------
Sampling results as ndarray with dimensions like
len(init) x ((steps - cutoff) * nwalkers)
'''
args, inits = [], []
for bfa in self.BFA:
args.append([bfa.mined, bfa.Tprime, bfa.Mprime,
bfa.deltaT, bfa.threshed, bfa.M,
bfa.events])
inits.append(bfa.eps_prior)
inits.append(self.alpha_prior)
args = [i for i in args if i is not None]
inits = [i for i in inits if i]
ndim = len(inits)
pos = [inits + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, self.loglikelihood, args=args)
sampler.run_mcmc(pos, steps)
self.samples = sampler.chain[:, cutoff:, :].reshape((-1, ndim))
def show_corner_plot(self, save=False, path=''):
'''Show (and save) a corner plot. NOT TESTED.
'''
truths = [bfa.eps_prior for bfa in self.BFA]
truths.append(2.)
ndim = len(self.BFA)
labels = [r'$\epsilon_{}$'.format(i) for i in range(ndim)] + [r'$\alpha$']
fig = corner.corner(self.samples,
labels=labels,
quantiles=[0.16, 0.5, 0.84],
show_titles=True,
title_kwargs={"fontsize": 12},
truths=truths,)
if save==True:
fig.savefig(path, dpi=300)
def calculate_percentiles(self, percentiles=[16, 50, 84]):
'''Calculate best fit value and its uncertainties.
Parameters:
-----------
percentiles : n-list
percentiles to compute
Return:
--------
a tuple of n-tuples with
(median, upper_uncert, lower_uncert)
each.
'''
map_of_results = map(lambda v: (v[1], v[2] - v[1], v[1] - v[0]),
zip(*np.percentile(self.samples, percentiles, axis=0)))
p = list(map_of_results)
self.percentiles = p
return p
| [
11748,
5228,
198,
11748,
795,
344,
68,
198,
11748,
299,
32152,
355,
45941,
198,
198,
4871,
35250,
17633,
7,
15252,
2599,
198,
220,
220,
220,
37227,
20575,
500,
3294,
18402,
30832,
290,
4197,
220,
198,
220,
220,
220,
511,
10007,
11640,
... | 1.9503 | 1,831 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628
] | 2 | 24 |
import argparse
# borrowed usage from @netspooky/inhale
from cmd.bytecode_format_command import format_to_bytecode
from cmd.instruction_command import get_instr
from cmd.opcode_command import get_op
parser = argparse.ArgumentParser(description="mdis.py")
args = [
('-b', "INT_TO_BC", "shift into bytecode format", format_to_bytecode, 1),
('-f', "FILE", "get instructions of a given file", get_instr, 1),
('-op', "INT_TO_OP", "get opcode of a given integer", get_op, 1),
('-fr', "FROM", "from address", None, 1),
('-t', "TO", "to address", None, 1)
]
def set_up_arguments():
"""
Set up the arguments
:return:
"""
for arg in args:
add_argument(arg[0], arg[1], arg[2], arg[4])
def add_argument(flag: str, dest, _help: str, nargs: int):
"""
Add an argument
:param nargs: number of arguments
:param flag: argument flag
:param dest: destination
:param _help: help message
:return:
"""
parser.add_argument(flag, dest=dest, nargs=nargs, help=_help)
| [
11748,
1822,
29572,
198,
198,
2,
22546,
8748,
422,
2488,
45938,
79,
29655,
14,
259,
71,
1000,
198,
6738,
23991,
13,
26327,
8189,
62,
18982,
62,
21812,
1330,
5794,
62,
1462,
62,
26327,
8189,
198,
6738,
23991,
13,
8625,
2762,
62,
21812,... | 2.586466 | 399 |
import requests
import json
import time
import sys
import os
| [
11748,
7007,
201,
198,
11748,
33918,
201,
198,
11748,
640,
201,
198,
11748,
25064,
201,
198,
11748,
28686,
201,
198,
201,
198
] | 3.090909 | 22 |
import numpy as np
import json
from datetime import datetime
from dateutil.parser import parse
from balloon.settings import GRIB_PATH
from core.models import Column, Cell
from forecast.models import GribModel, grib_models
from forecast.preprocess import SHORT_NAMES
EPSILON = 1e-5 # EPSILON° < 1m
| [
11748,
299,
32152,
355,
45941,
198,
11748,
33918,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
3128,
22602,
13,
48610,
1330,
21136,
198,
198,
6738,
21190,
13,
33692,
1330,
402,
7112,
33,
62,
34219,
198,
6738,
4755,
13,
27530,
133... | 3.318681 | 91 |
import sys
import os
from DockerBuildManagement import ChangelogSelections, BuildSelections, PublishSelections, RunSelections, SwarmSelections, TestSelections, BuildTools, PromoteSelections
from SwarmManagement import SwarmTools
if __name__ == "__main__":
arguments = sys.argv[1:]
HandleManagement(arguments)
| [
11748,
25064,
198,
11748,
28686,
198,
6738,
25716,
15580,
48032,
1330,
609,
8368,
519,
17563,
507,
11,
10934,
17563,
507,
11,
8525,
1836,
17563,
507,
11,
5660,
17563,
507,
11,
38293,
17563,
507,
11,
6208,
17563,
507,
11,
10934,
33637,
1... | 3.468085 | 94 |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from multilayernn import * #Import from own library
# # MNIST
# ## 1. Data Processing and One Hot Encoding
# In[2]:
train = pd.read_csv("datasets/mnist_train.csv") #read data from file
#separating labels and pixels
train_labels=np.array(train.loc[:,'label'])
train_data=np.array(train.loc[:,train.columns!='label'])
#The characteristics of MNIST data pixels = 784 samples = 42000 classes = 10
# In[3]:
#Convert to onehot encoding
pixels = 784
samples = len(train_labels)
classes = 10
train_data = train_data.T #Transpose the matrix: where each column is a sample
train_label=np.zeros((classes, samples))
for col in range (samples):
train_label[train_labels[col],col]=1
#Scaling Down of dataset
train_data = train_data/255
# ##====================== 2. Training of Model
# Hypermeters:
# 1. Tune the right weights as improper weights will cause exploding outputs
# 2. Tune the learning rate and gamma
# 3. Tune the number of epoch to be trained
# In[4]:
#Create Mulit Layer Network
nodes_per_layer = [784, 500, 200, 80, 10] #nodes in each layer of neural network
mnist_nn = deepNN(nodes_per_layer, learning_rate = 0.3, gamma = 0.7, epoch=2000)
# In[5]:
#Train the network
mnist_nn.train_model(train_data, train_label, train_labels, verbose = True, filename="accuracy/mnist/mnistdata")
# ##====================== 3. Testing of Model
# In[6]:
#data preprocessing
test = pd.read_csv("datasets/mnist_test.csv") #read data from file
#separating labels and pixels
test_labels=np.array(test.loc[:,'label'])
test_data=np.array(test.loc[:,test.columns!='label'])
#The characteristics of MNIST data pixels = 784 samples = 42000 classes = 10
# In[7]:
#Convert to onehot encoding
pixels = 784
samples = len(test_labels)
classes = 10
test_data = test_data.T #Transpose the matrix: where each column is a sample
test_label=np.zeros((classes, samples))
for col in range (samples):
test_label[test_labels[col],col]=1
#Scaling Down of dataset
test_data = test_data/255
# In[8]:
test_error, test_accuracy = mnist_nn.test_model( test_data, test_label, test_labels, filename="accuracy/mnist/mnistdata")
# ## Conclusion:
# Check accuracy folder for all the error and accuracy data.
# <hr>
# #============================== Kaggle: Test and Compute Accuracy for Submission
# For submission to the Kaggle the kaggle test data needs to be passed through the model.
# The following code will generate the "sample_submission.csv" for the Kaggle MNIST.
#
# **Uncomment the Following for Kaggle**
# In[9]:
# test_data= pd.read_csv("datasets/kaggle/mnist_test.csv") #This generated cvs file which can be submitted to the Kaggle
# test_data=np.array(test_data) #separating labels and pixels
# #Preprocess data for the model
# test_data = test_data.T #Transpose the matrix: where each column is a sample
# test_data = test_data/255 #scale the data to range 1
# #Test the data for the model
# Y_hat, cache = mnist_nn.forward_propagation(test_data)
# Y_predicted = np.argmax(Y_hat, axis=0)
# #Create submission ready data
# df = pd.DataFrame(Y_predicted, columns = ["Label"])
# df.index.name = 'ImageId'
# df.index += 1
# df.to_csv('kaggle_submission/sample_submission.csv', index = True)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
16,
5974,
628,
198,
6738,
1963,
346,
323,
1142,
77,
1330,
1635,
1303,
20939,
422,
898,
5888,
628,
198,
2,
1303,
29060,
8808,
... | 2.932675 | 1,114 |
###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the AiiDA-FLEUR package. #
# #
# The code is hosted on GitHub at https://github.com/JuDFTteam/aiida-fleur #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.flapw.de or #
# http://aiida-fleur.readthedocs.io/en/develop/ #
###############################################################################
'''
Contains smoke tests for all workchains of aiida-fleur,
checks if builderis from aiida-core gets the correct class.
'''
import pytest
@pytest.mark.usefixtures('aiida_profile', 'clear_database')
class TestFleurWorkchainInterfaces:
"""
Test all aiida-fleur workflow interfaces
"""
# TODO
# prepare some nodes:
# structure, option, fleurinp, wfparameters
# add to builder and see if he takes it
# ggf if possible run initial step only, that the input is checked...
# In general the interfaces should be fixed and not changed. this is what
# these tests are for, to test be aware of interface breaks
def test_fleur_scf_wc_init(self):
"""
Test the interface of the scf workchain
"""
from aiida_fleur.workflows.scf import FleurScfWorkChain
builder = FleurScfWorkChain.get_builder()
def test_fleur_eos_wc_init(self):
"""
Test the interface of the eos workchain
"""
from aiida_fleur.workflows.eos import FleurEosWorkChain
builder = FleurEosWorkChain.get_builder()
def test_fleur_dos_wc_init(self):
"""
Test the interface of the dos workchain
"""
from aiida_fleur.workflows.dos import fleur_dos_wc
builder = fleur_dos_wc.get_builder()
def test_fleur_corehole_wc_init(self):
"""
Test the interface of the corehole workchain
"""
from aiida_fleur.workflows.corehole import FleurCoreholeWorkChain
builder = FleurCoreholeWorkChain.get_builder()
def test_fleur_initial_cls_wc_init(self):
"""
Test the interface of the scf workchain
"""
from aiida_fleur.workflows.initial_cls import FleurInitialCLSWorkChain
builder = FleurInitialCLSWorkChain.get_builder()
def test_fleur_relax_wc_init(self):
"""
Test the interface of the relax workchain
"""
from aiida_fleur.workflows.relax import FleurRelaxWorkChain
builder = FleurRelaxWorkChain.get_builder()
def test_fleur_optimize_para_wc_init(self):
"""
Test the interface of the optimize_para_ workchain
"""
from aiida_fleur.workflows.optimize_para import fleur_optimize_parameters_wc
builder = fleur_optimize_parameters_wc.get_builder()
def test_fleur_mae_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.mae import FleurMaeWorkChain
builder = FleurMaeWorkChain.get_builder()
def test_fleur_mae_conv_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.mae_conv import FleurMaeConvWorkChain
builder = FleurMaeConvWorkChain.get_builder()
def test_fleur_ssdisp_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.ssdisp import FleurSSDispWorkChain
builder = FleurSSDispWorkChain.get_builder()
def test_fleur_ssdisp_conv_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.ssdisp_conv import FleurSSDispConvWorkChain
builder = FleurSSDispConvWorkChain.get_builder()
def test_fleur_dmi_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.dmi import FleurDMIWorkChain
builder = FleurDMIWorkChain.get_builder()
def test_fleur_base_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.base_fleur import FleurBaseWorkChain
builder = FleurBaseWorkChain.get_builder()
def test_fleur_base_relax_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.base_relax import FleurBaseRelaxWorkChain
builder = FleurBaseRelaxWorkChain.get_builder()
def test_fleur_create_magnetic_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.create_magnetic_film import FleurCreateMagneticWorkChain
builder = FleurCreateMagneticWorkChain.get_builder()
def test_fleur_strain_wc_init(self):
"""
Test the interface of the dmi workchain
"""
from aiida_fleur.workflows.strain import FleurStrainWorkChain
builder = FleurStrainWorkChain.get_builder()
def test_fleur_orbcontrol_wc_init(self):
"""
Test the interface of the orbcontrol workchain
"""
from aiida_fleur.workflows.orbcontrol import FleurOrbControlWorkChain
builder = FleurOrbControlWorkChain.get_builder()
def test_fleur_cfcoeff_wc_init(self):
"""
Test the interface of the cfcoeff workchain
"""
from aiida_fleur.workflows.cfcoeff import FleurCFCoeffWorkChain
builder = FleurCFCoeffWorkChain.get_builder()
| [
29113,
29113,
7804,
4242,
21017,
198,
2,
15069,
357,
66,
828,
27325,
354,
2150,
82,
89,
298,
6582,
449,
9116,
33467,
402,
2022,
39,
11,
314,
1921,
12,
16,
14,
6968,
40,
12,
16,
11,
4486,
13,
220,
220,
220,
220,
220,
220,
220,
22... | 2.304623 | 2,531 |
#!/usr/bin/env python
"""
Twisted-Friendly Orb Reap Threads
---------------------------------
"""
from twisted.internet.threads import deferToThread
import antelope.brttpkt
class OrbreapThr(antelope.brttpkt.OrbreapThr):
"""Twisted-compatible subclass of ``antelope.brttpkt.OrbreapThr``."""
def get(self):
"""Defer ``get`` to a thread.
:rtype: ``Deferred``
"""
d = deferToThread(
super(OrbreapThr, self).get)
return d
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
5080,
6347,
12,
23331,
306,
15839,
797,
499,
14122,
82,
198,
3880,
12,
198,
37811,
198,
198,
6738,
19074,
13,
37675,
13,
16663,
82,
1330,
29135,
2514,
16818,
198,
198,
1174... | 2.410891 | 202 |
# 2b. Implement a client for the deployed server at 2a: a script that receives
# from the command line an addr string, a port integer, and a string msg,
# and sends a UDP packet to the addr address, the port port, and the msg content.
import socket
import sys
if len(sys.argv) < 4:
print('Please enter <address> <port> <message')
else:
ADDRESS = sys.argv[1]
PORT = int(sys.argv[2])
MESSAGE = sys.argv[3:]
content = ''
for word in MESSAGE:
content += ' ' + word
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_socket.connect((ADDRESS, PORT))
client_socket.send(content.encode())
client_socket.close()
| [
2,
362,
65,
13,
48282,
257,
5456,
329,
262,
12380,
4382,
379,
362,
64,
25,
257,
4226,
326,
11583,
198,
2,
422,
262,
3141,
1627,
281,
37817,
4731,
11,
257,
2493,
18253,
11,
290,
257,
4731,
31456,
11,
198,
2,
290,
12800,
257,
36428,... | 2.708 | 250 |
# GHC_Codepath SE101
# Sandbox - 3
# 1. SE101:String to Integer (ATOI)
# Implement atoi which converts a string to an integer.
# The function first discards as many whitespace characters as necessary until the first non-whitespace character is found. Then, starting from this character,
# takes an optional initial plus or minus sign followed by as many numerical digits as possible, and interprets them as a numerical value.
# The string can contain additional characters after those that form the integral number, which are ignored and have no effect on the behavior of this function.
# If the first sequence of non-whitespace characters in str is not a valid integral number, or if no such sequence exists because either str is empty or it contains
# only whitespace characters, no conversion is performed. If no valid conversion could be performed, a zero value is returned.
# Note:
# • Only the space character' is considered as whitespace character.
# • Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [-231. 231 - 1]. If the numerical value is out of the range of representable values, INT_MAX (231 - 1) or INT_MIN (-231) is returned.
#!/bin/python3
import math
import os
import random
import re
import sys
# The function is expected to return an INTEGER.
# The function accepts STRING a as parameter.
# The function will convert the string parameter
# into an integer, and return the result.
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
a = input()
result = atoi(a)
fptr.write(str(result) + '\n')
fptr.close()
| [
2,
46615,
62,
43806,
538,
776,
7946,
8784,
198,
2,
3837,
3524,
532,
513,
198,
2,
352,
13,
7946,
8784,
25,
10100,
284,
34142,
357,
1404,
46,
40,
8,
220,
198,
198,
2,
48282,
379,
23013,
543,
26161,
257,
4731,
284,
281,
18253,
13,
... | 3.668874 | 453 |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
# Register your models here.
from django.utils.translation import gettext_lazy as _
from .models import Funuser
@admin.register(Funuser)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
28482,
1330,
11787,
46787,
198,
2,
17296,
534,
4981,
994,
13,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
62,
7... | 3.453125 | 64 |
'''
Created on Oct 4, 2017
@author: jschmid3@stevens.edu
Pledge: I pledge my honor that I have abided by the Stevens Honor System -Joshua Schmidt
CS115 - hw4
'''
def pascal_row(n):
"""returns the pascal triangle row of the given integer n"""
return triangle(n + 1, [])[n]
def pascal_triangle(n):
"""returns the pascal triangle from 0 to n"""
return triangle(n + 1, [])
#TESTING
#print(pascal_row(0))
#print(pascal_triangle(3)) | [
7061,
6,
198,
41972,
319,
2556,
604,
11,
2177,
198,
198,
31,
9800,
25,
474,
20601,
13602,
18,
31,
4169,
574,
82,
13,
15532,
198,
47,
2965,
25,
220,
220,
220,
314,
13995,
616,
7522,
326,
314,
423,
450,
1384,
416,
262,
20019,
21071,... | 2.631579 | 171 |
from setuptools import find_packages, setup
setup(
name='osl_api',
version='0.0.1',
packages=find_packages(),
author="Anthony Dugarte",
author_email="toonny1998@gmai.com",
description="Simple Python OSL Exchange API client which handles authorization for you and exposes a requests-like interface",
url="https://github.com/AnthonyDugarte/osl_api",
python_requires='>=3.5',
)
| [
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
628,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
418,
75,
62,
15042,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
15,
13,
16,
3256,
198,
220,
220,
220,
10392,
28,
197... | 2.921429 | 140 |
from .cmath import add # noqa
| [
6738,
764,
66,
11018,
1330,
751,
220,
1303,
645,
20402,
198
] | 2.818182 | 11 |
import sqlalchemy as sa
from airflow.operators.python_operator import PythonOperator
from dataflow import config
from dataflow.dags import _PipelineDAG
from dataflow.operators.common import fetch_from_hawk_api
from dataflow.utils import TableConfig
| [
11748,
44161,
282,
26599,
355,
473,
198,
6738,
45771,
13,
3575,
2024,
13,
29412,
62,
46616,
1330,
11361,
18843,
1352,
198,
198,
6738,
1366,
11125,
1330,
4566,
198,
6738,
1366,
11125,
13,
67,
3775,
1330,
4808,
47,
541,
4470,
35,
4760,
... | 3.652174 | 69 |
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
from enum import Enum
import torch
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, BertTokenizer
# from transformers.configuration_bert import BertTokenizer, BertTokenizerFast
from transformers.tokenization_utils_base import (BatchEncoding,
PreTrainedTokenizerBase)
from .base_data_module import BaseDataModule
from .processor import KGProcessor, get_dataset
class ExplicitEnum(Enum):
"""
Enum with more explicit error message for missing values.
"""
@classmethod
class PaddingStrategy(ExplicitEnum):
"""
Possible values for the ``padding`` argument in :meth:`PreTrainedTokenizerBase.__call__`. Useful for tab-completion
in an IDE.
"""
LONGEST = "longest"
MAX_LENGTH = "max_length"
DO_NOT_PAD = "do_not_pad"
@dataclass
class DataCollatorForSeq2Seq:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
model (:class:`~transformers.PreTrainedModel`):
The model that is being trained. If set and has the `prepare_decoder_input_ids_from_labels`, use it to
prepare the `decoder_input_ids`
This is useful when using `label_smoothing` to avoid calculating loss twice.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (:obj:`int`, `optional`, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[Any] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
class Pipeline():
""" Pre-process Pipeline Class : callable """
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
360,
713,
11,
7343,
11,
968,
6030,
11,
32233,
11,
309,
29291,
11,
4479,
198,
6738,
33829,
1330,
2039,
388,
198,
11748,
28034,
198,
198,
... | 2.720854 | 1,218 |
import logging
import sys
import tetueSrc
from irc.bot import SingleServerIRCBot
# config
HOST = 'irc.twitch.tv'
PORT = 6667
read_successful, cfg = tetueSrc.get_configuration("bot")
CLIENT_ID = cfg["client_id"]
owner = cfg["owner"]
USERNAME = cfg["name"].lower()
TOKEN = cfg["token"]
PASSWORD = f"oauth:{TOKEN}"
CHANNEL = f"#{owner}"
logger = _get_logger()
if __name__ == '__main__':
main() | [
11748,
18931,
198,
11748,
25064,
198,
11748,
28408,
518,
50,
6015,
198,
198,
6738,
220,
1980,
13,
13645,
1330,
14206,
10697,
49060,
20630,
198,
198,
2,
4566,
198,
39,
10892,
796,
705,
1980,
13,
31844,
13,
14981,
6,
198,
15490,
796,
71... | 2.522013 | 159 |
from dvc.dependency.base import BaseDependency
from dvc.output.ssh import SSHOutput
| [
6738,
288,
28435,
13,
45841,
1387,
13,
8692,
1330,
7308,
35,
2690,
1387,
198,
6738,
288,
28435,
13,
22915,
13,
45824,
1330,
33825,
26410,
628
] | 3.4 | 25 |
import math
while True:
try:
(PM, PN) = (int(i) for i in raw_input().split())
find_num_of_prime = 0
i = 0
while True:
if isPrime(i):
find_num_of_prime += 1
if find_num_of_prime == PM:
break
i += 1
x = []
find_num_of_prime -= 1
while True:
if isPrime(i):
x.append(i)
find_num_of_prime += 1
if find_num_of_prime == PN:
break
i += 1
for j in range(len(x)):
print x[j],
if (j+1) % 10 == 0:
print
print
except EOFError:
break
| [
11748,
10688,
628,
198,
4514,
6407,
25,
198,
220,
220,
220,
1949,
25,
198,
220,
220,
220,
220,
220,
220,
220,
357,
5868,
11,
350,
45,
8,
796,
357,
600,
7,
72,
8,
329,
1312,
287,
8246,
62,
15414,
22446,
35312,
28955,
628,
220,
22... | 1.59276 | 442 |
"""Tests relating to the Material class."""
import pytest
import pygaps
import pygaps.utilities.exceptions as pgEx
@pytest.mark.core
class TestMaterial():
"""Test the material class."""
def test_material_basic(self):
"""Basic creation tests."""
mat = pygaps.Material('material1')
assert mat == 'material1'
assert mat != 'Material1'
mat2 = pygaps.Material('material1')
assert mat == mat2
def test_material_create(self, material_data, basic_material):
"""Check material can be created from test data."""
assert material_data == basic_material.to_dict()
def test_material_retrieved_list(self, material_data, basic_material):
"""Check material can be retrieved from master list."""
pygaps.MATERIAL_LIST.append(basic_material)
uploaded_material = pygaps.Material.find(material_data.get('name'))
assert material_data == uploaded_material.to_dict()
with pytest.raises(pgEx.ParameterError):
pygaps.Material.find('noname')
pygaps.MATERIAL_LIST.remove(basic_material)
def test_material_get_properties(self, material_data, basic_material):
"""Check if properties of a material can be located."""
assert basic_material.get_prop('density'
) == material_data.get('density')
density = basic_material.properties.pop('density')
with pytest.raises(pgEx.ParameterError):
basic_material.get_prop('density')
basic_material.properties['density'] = density
def test_material_print(self, basic_material):
"""Checks the printing can be done."""
print(basic_material)
| [
37811,
51,
3558,
11270,
284,
262,
14633,
1398,
526,
15931,
198,
198,
11748,
12972,
9288,
198,
198,
11748,
12972,
70,
1686,
198,
11748,
12972,
70,
1686,
13,
315,
2410,
13,
1069,
11755,
355,
23241,
3109,
628,
198,
31,
9078,
9288,
13,
41... | 2.601218 | 657 |
from pyper import R
for i in range(20):
r("a <- rbind(a, seq(1000000) * 1.0 * %d)" % i)
print r("sum(a)") | [
6738,
12972,
525,
1330,
371,
198,
1640,
1312,
287,
2837,
7,
1238,
2599,
198,
220,
220,
220,
374,
7203,
64,
24293,
374,
21653,
7,
64,
11,
33756,
7,
16,
10535,
8,
1635,
352,
13,
15,
1635,
4064,
67,
16725,
4064,
1312,
8,
198,
4798,
... | 2.137255 | 51 |
import time
import traceback
import telegram
from telegram.ext.dispatcher import run_async
from mayday import LogConfig
from mayday.constants import conversations, stages
from mayday.constants.replykeyboards import ReplyKeyboards
from mayday.controllers.redis import RedisHelper
from mayday.features import (platform_stats, post_ticket, quick_search, search,
support, update_ticket)
from mayday.utils import log_util
from mayday.validators import authenticator
flogger = LogConfig.flogger
KEYBOARDS = ReplyKeyboards()
REDIS = RedisHelper()
@run_async
@run_async
@run_async
@run_async
@run_async
@run_async
| [
11748,
640,
198,
11748,
12854,
1891,
198,
198,
11748,
573,
30536,
198,
6738,
573,
30536,
13,
2302,
13,
6381,
8071,
2044,
1330,
1057,
62,
292,
13361,
198,
198,
6738,
743,
820,
1330,
5972,
16934,
198,
6738,
743,
820,
13,
9979,
1187,
133... | 2.914798 | 223 |
'''hello world'''
def hello():
'''Hello world'''
print('hello world')
| [
7061,
6,
31373,
995,
7061,
6,
198,
198,
4299,
23748,
33529,
198,
220,
220,
220,
705,
7061,
15496,
995,
7061,
6,
198,
220,
220,
220,
3601,
10786,
31373,
995,
11537,
198
] | 2.548387 | 31 |
from datetime import datetime, timedelta | [
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514
] | 4.444444 | 9 |
import dgl
import numpy as np
import random
import torch
from dgllife.utils.featurizers import one_hot_encoding
from dgllife.utils.mol_to_graph import smiles_to_bigraph
from dgllife.utils.splitters import RandomSplitter
def set_random_seed(seed=0):
"""Set random seed.
Parameters
----------
seed : int
Random seed to use
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def load_dataset_for_classification(args):
"""Load dataset for classification tasks.
Parameters
----------
args : dict
Configurations.
Returns
-------
dataset
The whole dataset.
train_set
Subset for training.
val_set
Subset for validation.
test_set
Subset for test.
"""
assert args['dataset'] in ['Tox21']
if args['dataset'] == 'Tox21':
from dgllife.data import Tox21
dataset = Tox21(smiles_to_bigraph, args['atom_featurizer'])
train_set, val_set, test_set = RandomSplitter.train_val_test_split(
dataset, frac_train=args['frac_train'], frac_val=args['frac_val'],
frac_test=args['frac_test'], random_state=args['random_seed'])
return dataset, train_set, val_set, test_set
def load_dataset_for_regression(args):
"""Load dataset for regression tasks.
Parameters
----------
args : dict
Configurations.
Returns
-------
train_set
Subset for training.
val_set
Subset for validation.
test_set
Subset for test.
"""
assert args['dataset'] in ['Alchemy', 'Aromaticity']
if args['dataset'] == 'Alchemy':
from dgllife.data import TencentAlchemyDataset
train_set = TencentAlchemyDataset(mode='dev')
val_set = TencentAlchemyDataset(mode='valid')
test_set = None
if args['dataset'] == 'Aromaticity':
from dgllife.data import PubChemBioAssayAromaticity
dataset = PubChemBioAssayAromaticity(smiles_to_bigraph,
args['atom_featurizer'],
args['bond_featurizer'])
train_set, val_set, test_set = RandomSplitter.train_val_test_split(
dataset, frac_train=args['frac_train'], frac_val=args['frac_val'],
frac_test=args['frac_test'], random_state=args['random_seed'])
return train_set, val_set, test_set
def collate_molgraphs(data):
"""Batching a list of datapoints for dataloader.
Parameters
----------
data : list of 3-tuples or 4-tuples.
Each tuple is for a single datapoint, consisting of
a SMILES, a DGLGraph, all-task labels and optionally
a binary mask indicating the existence of labels.
Returns
-------
smiles : list
List of smiles
bg : DGLGraph
The batched DGLGraph.
labels : Tensor of dtype float32 and shape (B, T)
Batched datapoint labels. B is len(data) and
T is the number of total tasks.
masks : Tensor of dtype float32 and shape (B, T)
Batched datapoint binary mask, indicating the
existence of labels. If binary masks are not
provided, return a tensor with ones.
"""
assert len(data[0]) in [3, 4], \
'Expect the tuple to be of length 3 or 4, got {:d}'.format(len(data[0]))
if len(data[0]) == 3:
smiles, graphs, labels = map(list, zip(*data))
masks = None
else:
smiles, graphs, labels, masks = map(list, zip(*data))
bg = dgl.batch(graphs)
bg.set_n_initializer(dgl.init.zero_initializer)
bg.set_e_initializer(dgl.init.zero_initializer)
labels = torch.stack(labels, dim=0)
if masks is None:
masks = torch.ones(labels.shape)
else:
masks = torch.stack(masks, dim=0)
return smiles, bg, labels, masks
| [
11748,
288,
4743,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
11748,
28034,
198,
198,
6738,
288,
70,
297,
901,
13,
26791,
13,
5036,
2541,
11341,
1330,
530,
62,
8940,
62,
12685,
7656,
198,
6738,
288,
70,
297,
901,
13,
... | 2.305605 | 1,695 |
import os
import sys
import argparse
import csv
import itertools
import pew.pew as p
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PowerCouples Serial native version')
parser.add_argument('-i','--input', dest="input_csv", help="input file in csv format", required=True, type=argparse.FileType('r'))
parser.add_argument('-o','--output', dest="output_csv", help="output file in csv format", default=sys.stdout, type=argparse.FileType('w'))
args = parser.parse_args()
out = csv.writer(args.output_csv)
for row in csv.reader(args.input_csv):
name = row[0]
numbers = [int(i) for i in row[1:] ]
pc = find_powerCouple(numbers)
out.writerow( (name, pc[0], pc[1]) )
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
11748,
269,
21370,
198,
11748,
340,
861,
10141,
198,
11748,
279,
413,
13,
79,
413,
355,
279,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
220,
220,
2... | 2.575342 | 292 |
# importing the module
import tweepy
import os
# personal details
consumer_key = ""
consumer_secret = ""
access_token = ""
access_token_secret = ""
# authentication of consumer key and secret
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# authentication of access token and secret
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# time stamp images before positng to twitter
# post image function
| [
2,
33332,
262,
8265,
198,
11748,
4184,
538,
88,
198,
11748,
28686,
198,
198,
2,
2614,
3307,
198,
198,
49827,
62,
2539,
796,
13538,
198,
49827,
62,
21078,
796,
13538,
198,
15526,
62,
30001,
796,
13538,
198,
15526,
62,
30001,
62,
21078,... | 3.406015 | 133 |
# Author: Leland McInnes <leland.mcinnes@gmail.com>
#
# License: BSD 3 clause
import time
import numpy as np
import numba
import scipy.sparse
@numba.njit(parallel=True)
def fast_knn_indices(X, n_neighbors):
"""A fast computation of knn indices.
Parameters
----------
X: array of shape (n_samples, n_features)
The input data to compute the k-neighbor indices of.
n_neighbors: int
The number of nearest neighbors to compute for each sample in ``X``.
Returns
-------
knn_indices: array of shape (n_samples, n_neighbors)
The indices on the ``n_neighbors`` closest points in the dataset.
"""
knn_indices = np.empty((X.shape[0], n_neighbors), dtype=np.int32)
for row in numba.prange(X.shape[0]):
# v = np.argsort(X[row]) # Need to call argsort this way for numba
v = X[row].argsort(kind="quicksort")
v = v[:n_neighbors]
knn_indices[row] = v
return knn_indices
@numba.njit("i4(i8[:])")
def tau_rand_int(state):
"""A fast (pseudo)-random number generator.
Parameters
----------
state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
A (pseudo)-random int32 value
"""
state[0] = (((state[0] & 4294967294) << 12) & 0xFFFFFFFF) ^ (
(((state[0] << 13) & 0xFFFFFFFF) ^ state[0]) >> 19
)
state[1] = (((state[1] & 4294967288) << 4) & 0xFFFFFFFF) ^ (
(((state[1] << 2) & 0xFFFFFFFF) ^ state[1]) >> 25
)
state[2] = (((state[2] & 4294967280) << 17) & 0xFFFFFFFF) ^ (
(((state[2] << 3) & 0xFFFFFFFF) ^ state[2]) >> 11
)
return state[0] ^ state[1] ^ state[2]
@numba.njit("f4(i8[:])")
def tau_rand(state):
"""A fast (pseudo)-random number generator for floats in the range [0,1]
Parameters
----------
state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
A (pseudo)-random float32 in the interval [0, 1]
"""
integer = tau_rand_int(state)
return abs(float(integer) / 0x7FFFFFFF)
@numba.njit()
def norm(vec):
"""Compute the (standard l2) norm of a vector.
Parameters
----------
vec: array of shape (dim,)
Returns
-------
The l2 norm of vec.
"""
result = 0.0
for i in range(vec.shape[0]):
result += vec[i] ** 2
return np.sqrt(result)
@numba.njit()
def rejection_sample(n_samples, pool_size, rng_state):
"""Generate n_samples many integers from 0 to pool_size such that no
integer is selected twice. The duplication constraint is achieved via
rejection sampling.
Parameters
----------
n_samples: int
The number of random samples to select from the pool
pool_size: int
The size of the total pool of candidates to sample from
rng_state: array of int64, shape (3,)
Internal state of the random number generator
Returns
-------
sample: array of shape(n_samples,)
The ``n_samples`` randomly selected elements from the pool.
"""
result = np.empty(n_samples, dtype=np.int64)
for i in range(n_samples):
reject_sample = True
j = 0
while reject_sample:
j = tau_rand_int(rng_state) % pool_size
for k in range(i):
if j == result[k]:
break
else:
reject_sample = False
result[i] = j
return result
@numba.njit()
def make_heap(n_points, size):
"""Constructor for the numba enabled heap objects. The heaps are used
for approximate nearest neighbor search, maintaining a list of potential
neighbors sorted by their distance. We also flag if potential neighbors
are newly added to the list or not. Internally this is stored as
a single ndarray; the first axis determines whether we are looking at the
array of candidate indices, the array of distances, or the flag array for
whether elements are new or not. Each of these arrays are of shape
(``n_points``, ``size``)
Parameters
----------
n_points: int
The number of data points to track in the heap.
size: int
The number of items to keep on the heap for each data point.
Returns
-------
heap: An ndarray suitable for passing to other numba enabled heap functions.
"""
result = np.zeros(
(np.int64(3), np.int64(n_points), np.int64(size)), dtype=np.float64
)
result[0] = -1
result[1] = np.infty
result[2] = 0
return result
@numba.njit("i8(f8[:,:,:],i8,f8,i8,i8)")
def heap_push(heap, row, weight, index, flag):
"""Push a new element onto the heap. The heap stores potential neighbors
for each data point. The ``row`` parameter determines which data point we
are addressing, the ``weight`` determines the distance (for heap sorting),
the ``index`` is the element to add, and the flag determines whether this
is to be considered a new addition.
Parameters
----------
heap: ndarray generated by ``make_heap``
The heap object to push into
row: int
Which actual heap within the heap object to push to
weight: float
The priority value of the element to push onto the heap
index: int
The actual value to be pushed
flag: int
Whether to flag the newly added element or not.
Returns
-------
success: The number of new elements successfully pushed into the heap.
"""
row = int(row)
indices = heap[0, row]
weights = heap[1, row]
is_new = heap[2, row]
if weight >= weights[0]:
return 0
# break if we already have this element.
for i in range(indices.shape[0]):
if index == indices[i]:
return 0
# insert val at position zero
weights[0] = weight
indices[0] = index
is_new[0] = flag
# descend the heap, swapping values until the max heap criterion is met
i = 0
while True:
ic1 = 2 * i + 1
ic2 = ic1 + 1
if ic1 >= heap.shape[2]:
break
elif ic2 >= heap.shape[2]:
if weights[ic1] > weight:
i_swap = ic1
else:
break
elif weights[ic1] >= weights[ic2]:
if weight < weights[ic1]:
i_swap = ic1
else:
break
else:
if weight < weights[ic2]:
i_swap = ic2
else:
break
weights[i] = weights[i_swap]
indices[i] = indices[i_swap]
is_new[i] = is_new[i_swap]
i = i_swap
weights[i] = weight
indices[i] = index
is_new[i] = flag
return 1
@numba.njit("i8(f8[:,:,:],i8,f8,i8,i8)")
def unchecked_heap_push(heap, row, weight, index, flag):
"""Push a new element onto the heap. The heap stores potential neighbors
for each data point. The ``row`` parameter determines which data point we
are addressing, the ``weight`` determines the distance (for heap sorting),
the ``index`` is the element to add, and the flag determines whether this
is to be considered a new addition.
Parameters
----------
heap: ndarray generated by ``make_heap``
The heap object to push into
row: int
Which actual heap within the heap object to push to
weight: float
The priority value of the element to push onto the heap
index: int
The actual value to be pushed
flag: int
Whether to flag the newly added element or not.
Returns
-------
success: The number of new elements successfully pushed into the heap.
"""
if weight >= heap[1, row, 0]:
return 0
indices = heap[0, row]
weights = heap[1, row]
is_new = heap[2, row]
# insert val at position zero
weights[0] = weight
indices[0] = index
is_new[0] = flag
# descend the heap, swapping values until the max heap criterion is met
i = 0
while True:
ic1 = 2 * i + 1
ic2 = ic1 + 1
if ic1 >= heap.shape[2]:
break
elif ic2 >= heap.shape[2]:
if weights[ic1] > weight:
i_swap = ic1
else:
break
elif weights[ic1] >= weights[ic2]:
if weight < weights[ic1]:
i_swap = ic1
else:
break
else:
if weight < weights[ic2]:
i_swap = ic2
else:
break
weights[i] = weights[i_swap]
indices[i] = indices[i_swap]
is_new[i] = is_new[i_swap]
i = i_swap
weights[i] = weight
indices[i] = index
is_new[i] = flag
return 1
@numba.njit()
def siftdown(heap1, heap2, elt):
"""Restore the heap property for a heap with an out of place element
at position ``elt``. This works with a heap pair where heap1 carries
the weights and heap2 holds the corresponding elements."""
while elt * 2 + 1 < heap1.shape[0]:
left_child = elt * 2 + 1
right_child = left_child + 1
swap = elt
if heap1[swap] < heap1[left_child]:
swap = left_child
if right_child < heap1.shape[0] and heap1[swap] < heap1[right_child]:
swap = right_child
if swap == elt:
break
else:
heap1[elt], heap1[swap] = (heap1[swap], heap1[elt])
heap2[elt], heap2[swap] = (heap2[swap], heap2[elt])
elt = swap
@numba.njit()
def deheap_sort(heap):
"""Given an array of heaps (of indices and weights), unpack the heap
out to give and array of sorted lists of indices and weights by increasing
weight. This is effectively just the second half of heap sort (the first
half not being required since we already have the data in a heap).
Parameters
----------
heap : array of shape (3, n_samples, n_neighbors)
The heap to turn into sorted lists.
Returns
-------
indices, weights: arrays of shape (n_samples, n_neighbors)
The indices and weights sorted by increasing weight.
"""
indices = heap[0]
weights = heap[1]
for i in range(indices.shape[0]):
ind_heap = indices[i]
dist_heap = weights[i]
for j in range(ind_heap.shape[0] - 1):
ind_heap[0], ind_heap[ind_heap.shape[0] - j - 1] = (
ind_heap[ind_heap.shape[0] - j - 1],
ind_heap[0],
)
dist_heap[0], dist_heap[dist_heap.shape[0] - j - 1] = (
dist_heap[dist_heap.shape[0] - j - 1],
dist_heap[0],
)
siftdown(
dist_heap[: dist_heap.shape[0] - j - 1],
ind_heap[: ind_heap.shape[0] - j - 1],
0,
)
return indices.astype(np.int64), weights
@numba.njit("i8(f8[:, :, :],i8)")
def smallest_flagged(heap, row):
"""Search the heap for the smallest element that is
still flagged.
Parameters
----------
heap: array of shape (3, n_samples, n_neighbors)
The heaps to search
row: int
Which of the heaps to search
Returns
-------
index: int
The index of the smallest flagged element
of the ``row``th heap, or -1 if no flagged
elements remain in the heap.
"""
ind = heap[0, row]
dist = heap[1, row]
flag = heap[2, row]
min_dist = np.inf
result_index = -1
for i in range(ind.shape[0]):
if flag[i] == 1 and dist[i] < min_dist:
min_dist = dist[i]
result_index = i
if result_index >= 0:
flag[result_index] = 0.0
return int(ind[result_index])
else:
return -1
@numba.njit(parallel=True)
def build_candidates(current_graph, n_vertices, n_neighbors, max_candidates, rng_state):
"""Build a heap of candidate neighbors for nearest neighbor descent. For
each vertex the candidate neighbors are any current neighbors, and any
vertices that have the vertex as one of their nearest neighbors.
Parameters
----------
current_graph: heap
The current state of the graph for nearest neighbor descent.
n_vertices: int
The total number of vertices in the graph.
n_neighbors: int
The number of neighbor edges per node in the current graph.
max_candidates: int
The maximum number of new candidate neighbors.
rng_state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
candidate_neighbors: A heap with an array of (randomly sorted) candidate
neighbors for each vertex in the graph.
"""
candidate_neighbors = make_heap(n_vertices, max_candidates)
for i in range(n_vertices):
for j in range(n_neighbors):
if current_graph[0, i, j] < 0:
continue
idx = current_graph[0, i, j]
isn = current_graph[2, i, j]
d = tau_rand(rng_state)
heap_push(candidate_neighbors, i, d, idx, isn)
heap_push(candidate_neighbors, idx, d, i, isn)
current_graph[2, i, j] = 0
return candidate_neighbors
@numba.njit()
def new_build_candidates(
current_graph, n_vertices, n_neighbors, max_candidates, rng_state, rho=0.5
): # pragma: no cover
"""Build a heap of candidate neighbors for nearest neighbor descent. For
each vertex the candidate neighbors are any current neighbors, and any
vertices that have the vertex as one of their nearest neighbors.
Parameters
----------
current_graph: heap
The current state of the graph for nearest neighbor descent.
n_vertices: int
The total number of vertices in the graph.
n_neighbors: int
The number of neighbor edges per node in the current graph.
max_candidates: int
The maximum number of new candidate neighbors.
rng_state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
candidate_neighbors: A heap with an array of (randomly sorted) candidate
neighbors for each vertex in the graph.
"""
new_candidate_neighbors = make_heap(n_vertices, max_candidates)
old_candidate_neighbors = make_heap(n_vertices, max_candidates)
for i in range(n_vertices):
for j in range(n_neighbors):
if current_graph[0, i, j] < 0:
continue
idx = current_graph[0, i, j]
isn = current_graph[2, i, j]
d = tau_rand(rng_state)
if tau_rand(rng_state) < rho:
c = 0
if isn:
c += heap_push(new_candidate_neighbors, i, d, idx, isn)
c += heap_push(new_candidate_neighbors, idx, d, i, isn)
else:
heap_push(old_candidate_neighbors, i, d, idx, isn)
heap_push(old_candidate_neighbors, idx, d, i, isn)
if c > 0:
current_graph[2, i, j] = 0
return new_candidate_neighbors, old_candidate_neighbors
@numba.njit(parallel=True)
def submatrix(dmat, indices_col, n_neighbors):
"""Return a submatrix given an orginal matrix and the indices to keep.
Parameters
----------
dmat: array, shape (n_samples, n_samples)
Original matrix.
indices_col: array, shape (n_samples, n_neighbors)
Indices to keep. Each row consists of the indices of the columns.
n_neighbors: int
Number of neighbors.
Returns
-------
submat: array, shape (n_samples, n_neighbors)
The corresponding submatrix.
"""
n_samples_transform, n_samples_fit = dmat.shape
submat = np.zeros((n_samples_transform, n_neighbors), dtype=dmat.dtype)
for i in numba.prange(n_samples_transform):
for j in numba.prange(n_neighbors):
submat[i, j] = dmat[i, indices_col[i, j]]
return submat
# Generates a timestamp for use in logging messages when verbose=True
# I'm not enough of a numba ninja to numba this successfully.
# np.arrays of lists, which are objects...
def csr_unique(matrix, return_index=True, return_inverse=True, return_counts=True):
"""Find the unique elements of a sparse csr matrix.
We don't explicitly construct the unique matrix leaving that to the user
who may not want to duplicate a massive array in memory.
Returns the indices of the input array that give the unique values.
Returns the indices of the unique array that reconstructs the input array.
Returns the number of times each unique row appears in the input matrix.
matrix: a csr matrix
return_index = bool, optional
If true, return the row indices of 'matrix'
return_inverse: bool, optional
If true, return the the indices of the unique array that can be
used to reconstruct 'matrix'.
return_counts = bool, optional
If true, returns the number of times each unique item appears in 'matrix'
The unique matrix can computed via
unique_matrix = matrix[index]
and the original matrix reconstructed via
unique_matrix[inverse]
"""
lil_matrix = matrix.tolil()
rows = [x + y for x, y in zip(lil_matrix.rows, lil_matrix.data)]
return_values = return_counts + return_inverse + return_index
return np.unique(
rows,
return_index=return_index,
return_inverse=return_inverse,
return_counts=return_counts,
)[1 : (return_values + 1)]
| [
2,
6434,
25,
406,
8822,
1982,
818,
2516,
1279,
293,
1044,
13,
76,
17879,
2516,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
13789,
25,
347,
10305,
513,
13444,
198,
198,
11748,
640,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
9... | 2.352039 | 7,431 |
from malaya_speech.utils import check_file
from malaya_speech.path import CTC_VOCABS
import json
| [
6738,
6428,
11729,
62,
45862,
13,
26791,
1330,
2198,
62,
7753,
198,
6738,
6428,
11729,
62,
45862,
13,
6978,
1330,
327,
4825,
62,
53,
4503,
32,
4462,
198,
11748,
33918,
628,
198
] | 3.09375 | 32 |
from http.server import BaseHTTPRequestHandler
import CryptoService
import Database
import Packager
import XMLParser
| [
6738,
2638,
13,
15388,
1330,
7308,
40717,
18453,
25060,
198,
198,
11748,
36579,
16177,
198,
11748,
24047,
198,
11748,
6400,
3536,
198,
11748,
23735,
46677,
628,
198
] | 4.444444 | 27 |
from functools import wraps
import os
import json
import requests
from flask import redirect, url_for, jsonify, request
from flask_jwt_extended import (
get_jwt_identity, verify_jwt_in_request,
unset_jwt_cookies
)
TOKEN_BLACKLIST = set()
# Decorators
| [
6738,
1257,
310,
10141,
1330,
27521,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
7007,
198,
6738,
42903,
1330,
18941,
11,
19016,
62,
1640,
11,
33918,
1958,
11,
2581,
198,
6738,
42903,
62,
73,
46569,
62,
2302,
1631,
1330,
357,
198,... | 2.806452 | 93 |
"""
Code related to the concept of topic tree and its management: creating
and removing topics, getting info about a particular topic, etc.
:copyright: Copyright since 2006 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE_BSD_Simple.txt for details.
"""
__all__ = [
'TopicManager',
'TopicNameError',
'TopicDefnError',
]
from .callables import getID
from .topicutils import (
ALL_TOPICS,
tupleize,
stringize,
)
from .topicexc import (
TopicNameError,
TopicDefnError,
)
from .topicargspec import (
ArgSpecGiven,
ArgsInfo,
topicArgsFromCallable,
)
from .topicobj import (
Topic,
)
from .treeconfig import TreeConfig
from .topicdefnprovider import ITopicDefnProvider
from .topicmgrimpl import getRootTopicSpec
from .. import py2and3
# ---------------------------------------------------------
ARGS_SPEC_ALL = ArgSpecGiven.SPEC_GIVEN_ALL
ARGS_SPEC_NONE = ArgSpecGiven.SPEC_GIVEN_NONE
# ---------------------------------------------------------
class TopicManager:
"""
Manages the registry of all topics and creation/deletion
of topics.
Note that any method that accepts a topic name can accept it in the
'dotted' format such as ``'a.b.c.'`` or in tuple format such as
``('a', 'b', 'c')``. Any such method will raise a ValueError
if name not valid (empty, invalid characters, etc).
"""
# Allowed return values for isTopicSpecified()
TOPIC_SPEC_NOT_SPECIFIED = 0 # false
TOPIC_SPEC_ALREADY_CREATED = 1 # all other values equate to "true" but different reason
TOPIC_SPEC_ALREADY_DEFINED = 2
def __init__(self, treeConfig=None):
"""The optional treeConfig is an instance of TreeConfig, used to
configure the topic tree such as notification settings, etc. A
default config is created if not given. This method should only be
called by an instance of Publisher (see Publisher.getTopicManager())."""
self.__allTopics = None # root of topic tree
self._topicsMap = {} # registry of all topics
self.__treeConfig = treeConfig or TreeConfig()
self.__defnProvider = _MasterTopicDefnProvider(self.__treeConfig)
# define root of all topics
assert self.__allTopics is None
argsDocs, reqdArgs = getRootTopicSpec()
desc = 'Root of all topics'
specGiven = ArgSpecGiven(argsDocs, reqdArgs)
self.__allTopics = self.__createTopic((ALL_TOPICS,), desc, specGiven=specGiven)
def getRootAllTopics(self):
"""Get the topic that is parent of all root (ie top-level) topics,
for default TopicManager instance created when this module is imported.
Some notes:
- "root of all topics" topic satisfies isAll()==True, isRoot()==False,
getParent() is None;
- all root-level topics satisfy isAll()==False, isRoot()==True, and
getParent() is getDefaultTopicTreeRoot();
- all other topics satisfy neither. """
return self.__allTopics
def addDefnProvider(self, providerOrSource, format=None):
"""Register a topic definition provider. After this method is called, whenever a topic must be created,
the first definition provider that has a definition
for the required topic is used to instantiate the topic.
If providerOrSource is an instance of ITopicDefnProvider, register
it as a provider of topic definitions. Otherwise, register a new
instance of TopicDefnProvider(providerOrSource, format). In that case,
if format is not given, it defaults to TOPIC_TREE_FROM_MODULE. Either
way, returns the instance of ITopicDefnProvider registered.
"""
if isinstance(providerOrSource, ITopicDefnProvider):
provider = providerOrSource
else:
from .topicdefnprovider import (TopicDefnProvider, TOPIC_TREE_FROM_MODULE)
source = providerOrSource
provider = TopicDefnProvider(source, format or TOPIC_TREE_FROM_MODULE)
self.__defnProvider.addProvider(provider)
return provider
def clearDefnProviders(self):
"""Remove all registered topic definition providers"""
self.__defnProvider.clear()
def getNumDefnProviders(self):
"""Get how many topic definitions providers are registered."""
return self.__defnProvider.getNumProviders()
def getTopic(self, name, okIfNone=False):
"""Get the Topic instance for the given topic name. By default, raises
an TopicNameError exception if a topic with given name doesn't exist. If
okIfNone=True, returns None instead of raising an exception."""
topicNameDotted = stringize(name)
#if not name:
# raise TopicNameError(name, 'Empty topic name not allowed')
obj = self._topicsMap.get(topicNameDotted, None)
if obj is not None:
return obj
if okIfNone:
return None
# NOT FOUND! Determine what problem is and raise accordingly:
# find the closest parent up chain that does exists:
parentObj, subtopicNames = self.__getClosestParent(topicNameDotted)
assert subtopicNames
subtopicName = subtopicNames[0]
if parentObj is self.__allTopics:
raise TopicNameError(name, 'Root topic "%s" doesn\'t exist' % subtopicName)
msg = 'Topic "%s" doesn\'t have "%s" as subtopic' % (parentObj.getName(), subtopicName)
raise TopicNameError(name, msg)
def newTopic(self, _name, _desc, _required=(), **_argDocs):
"""Deprecated legacy method.
If topic _name already exists, just returns it and does nothing else.
Otherwise, uses getOrCreateTopic() to create it, then sets its
description (_desc) and its message data specification (_argDocs
and _required). Replaced by getOrCreateTopic()."""
topic = self.getTopic(_name, True)
if topic is None:
topic = self.getOrCreateTopic(_name)
topic.setDescription(_desc)
topic.setMsgArgSpec(_argDocs, _required)
return topic
def getOrCreateTopic(self, name, protoListener=None):
"""Get the Topic instance for topic of given name, creating it
(and any of its missing parent topics) as necessary. Pubsub
functions such as subscribe() use this to obtain the Topic object
corresponding to a topic name.
The name can be in dotted or string format (``'a.b.'`` or ``('a','b')``).
This method always attempts to return a "complete" topic, i.e. one
with a Message Data Specification (MDS). So if the topic does not have
an MDS, it attempts to add it. It first tries to find an MDS
from a TopicDefnProvider (see addDefnProvider()). If none is available,
it attempts to set it from protoListener, if it has been given. If not,
the topic has no MDS.
Once a topic's MDS has been set, it is never again changed or accessed
by this method.
Examples::
# assume no topics exist
# but a topic definition provider has been added via
# pub.addTopicDefnProvider() and has definition for topics 'a' and 'a.b'
# creates topic a and a.b; both will have MDS from the defn provider:
t1 = topicMgr.getOrCreateTopic('a.b')
t2 = topicMgr.getOrCreateTopic('a.b')
assert(t1 is t2)
assert(t1.getParent().getName() == 'a')
def proto(req1, optarg1=None): pass
# creates topic c.d with MDS based on proto; creates c without an MDS
# since no proto for it, nor defn provider:
t1 = topicMgr.getOrCreateTopic('c.d', proto)
The MDS can also be defined via a call to subscribe(listener, topicName),
which indirectly calls getOrCreateTopic(topicName, listener).
"""
obj = self.getTopic(name, okIfNone=True)
if obj:
# if object is not sendable but a proto listener was given,
# update its specification so that it is sendable
if (protoListener is not None) and not obj.hasMDS():
allArgsDocs, required = topicArgsFromCallable(protoListener)
obj.setMsgArgSpec(allArgsDocs, required)
return obj
# create missing parents
nameTuple = tupleize(name)
parentObj = self.__createParentTopics(nameTuple)
# now the final topic object, args from listener if provided
desc, specGiven = self.__defnProvider.getDefn(nameTuple)
# POLICY: protoListener is used only if no definition available
if specGiven is None:
if protoListener is None:
desc = 'UNDOCUMENTED: created without spec'
else:
allArgsDocs, required = topicArgsFromCallable(protoListener)
specGiven = ArgSpecGiven(allArgsDocs, required)
desc = 'UNDOCUMENTED: created from protoListener "%s" in module %s' % getID(protoListener)
return self.__createTopic(nameTuple, desc, parent = parentObj, specGiven = specGiven)
def isTopicInUse(self, name):
"""Determine if topic 'name' is in use. True if a Topic object exists
for topic name (i.e. message has already been sent for that topic, or a
least one listener subscribed), false otherwise. Note: a topic may be in use
but not have a definition (MDS and docstring); or a topic may have a
definition, but not be in use."""
return self.getTopic(name, okIfNone=True) is not None
def hasTopicDefinition(self, name):
"""Determine if there is a definition avaiable for topic 'name'. Return
true if there is, false otherwise. Note: a topic may have a
definition without being in use, and vice versa."""
# in already existing Topic object:
alreadyCreated = self.getTopic(name, okIfNone=True)
if alreadyCreated is not None and alreadyCreated.hasMDS():
return True
# from provider?
nameTuple = tupleize(name)
if self.__defnProvider.isDefined(nameTuple):
return True
return False
def checkAllTopicsHaveMDS(self):
"""Check that all topics that have been created for their MDS.
Raise a TopicDefnError if one is found that does not have one."""
for topic in py2and3.itervalues(self._topicsMap):
if not topic.hasMDS():
raise TopicDefnError(topic.getNameTuple())
def delTopic(self, name):
"""Delete the named topic, including all sub-topics. Returns False
if topic does not exist; True otherwise. Also unsubscribe any listeners
of topic and all subtopics. """
# find from which parent the topic object should be removed
dottedName = stringize(name)
try:
#obj = weakref( self._topicsMap[dottedName] )
obj = self._topicsMap[dottedName]
except KeyError:
return False
#assert obj().getName() == dottedName
assert obj.getName() == dottedName
# notification must be before deletion in case
self.__treeConfig.notificationMgr.notifyDelTopic(dottedName)
#obj()._undefineSelf_(self._topicsMap)
obj._undefineSelf_(self._topicsMap)
#assert obj() is None
return True
def getTopicsSubscribed(self, listener):
"""Get the list of Topic objects that have given listener
subscribed. Note: the listener can also get messages from any
sub-topic of returned list."""
assocTopics = []
for topicObj in py2and3.itervalues(self._topicsMap):
if topicObj.hasListener(listener):
assocTopics.append(topicObj)
return assocTopics
def __getClosestParent(self, topicNameDotted):
"""Returns a pair, (closest parent, tuple path from parent). The
first item is the closest parent Topic that exists.
The second one is the list of topic name elements that have to be
created to create the given topic.
So if topicNameDotted = A.B.C.D, but only A.B exists (A.B.C and
A.B.C.D not created yet), then return is (A.B, ['C','D']).
Note that if none of the branch exists (not even A), then return
will be [root topic, ['A',B','C','D']). Note also that if A.B.C
exists, the return will be (A.B.C, ['D']) regardless of whether
A.B.C.D exists. """
subtopicNames = []
headTail = topicNameDotted.rsplit('.', 1)
while len(headTail) > 1:
parentName = headTail[0]
subtopicNames.insert( 0, headTail[1] )
obj = self._topicsMap.get( parentName, None )
if obj is not None:
return obj, subtopicNames
headTail = parentName.rsplit('.', 1)
subtopicNames.insert( 0, headTail[0] )
return self.__allTopics, subtopicNames
def __createParentTopics(self, topicName):
"""This will find which parents need to be created such that
topicName can be created (but doesn't create given topic),
and creates them. Returns the parent object."""
assert self.getTopic(topicName, okIfNone=True) is None
parentObj, subtopicNames = self.__getClosestParent(stringize(topicName))
# will create subtopics of parentObj one by one from subtopicNames
if parentObj is self.__allTopics:
nextTopicNameList = []
else:
nextTopicNameList = list(parentObj.getNameTuple())
for name in subtopicNames[:-1]:
nextTopicNameList.append(name)
desc, specGiven = self.__defnProvider.getDefn( tuple(nextTopicNameList) )
if desc is None:
desc = 'UNDOCUMENTED: created as parent without specification'
parentObj = self.__createTopic( tuple(nextTopicNameList),
desc, specGiven = specGiven, parent = parentObj)
return parentObj
def __createTopic(self, nameTuple, desc, specGiven, parent=None):
"""Actual topic creation step. Adds new Topic instance to topic map,
and sends notification message (see ``Publisher.addNotificationMgr()``)
regarding topic creation."""
if specGiven is None:
specGiven = ArgSpecGiven()
parentAI = None
if parent:
parentAI = parent._getListenerSpec()
argsInfo = ArgsInfo(nameTuple, specGiven, parentAI)
if (self.__treeConfig.raiseOnTopicUnspecified
and not argsInfo.isComplete()):
raise TopicDefnError(nameTuple)
newTopicObj = Topic(self.__treeConfig, nameTuple, desc,
argsInfo, parent = parent)
# sanity checks:
assert newTopicObj.getName() not in self._topicsMap
if parent is self.__allTopics:
assert len( newTopicObj.getNameTuple() ) == 1
else:
assert parent.getNameTuple() == newTopicObj.getNameTuple()[:-1]
assert nameTuple == newTopicObj.getNameTuple()
# store new object and notify of creation
self._topicsMap[ newTopicObj.getName() ] = newTopicObj
self.__treeConfig.notificationMgr.notifyNewTopic(
newTopicObj, desc, specGiven.reqdArgs, specGiven.argsDocs)
return newTopicObj
def validateNameHierarchy(topicTuple):
"""Check that names in topicTuple are valid: no spaces, not empty.
Raise ValueError if fails check. E.g. ('',) and ('a',' ') would
both fail, but ('a','b') would be ok. """
if not topicTuple:
topicName = stringize(topicTuple)
errMsg = 'empty topic name'
raise TopicNameError(topicName, errMsg)
for indx, topic in enumerate(topicTuple):
errMsg = None
if topic is None:
topicName = list(topicTuple)
topicName[indx] = 'None'
errMsg = 'None at level #%s'
elif not topic:
topicName = stringize(topicTuple)
errMsg = 'empty element at level #%s'
elif topic.isspace():
topicName = stringize(topicTuple)
errMsg = 'blank element at level #%s'
if errMsg:
raise TopicNameError(topicName, errMsg % indx)
class _MasterTopicDefnProvider:
"""
Stores a list of topic definition providers. When queried for a topic
definition, queries each provider (registered via addProvider()) and
returns the first complete definition provided, or (None,None).
The providers must follow the ITopicDefnProvider protocol.
"""
def addProvider(self, provider):
"""Add given provider IF not already added. """
assert(isinstance(provider, ITopicDefnProvider))
if provider not in self.__providers:
self.__providers.append(provider)
def clear(self):
"""Remove all providers added."""
self.__providers = []
def getNumProviders(self):
"""Return how many providers added."""
return len(self.__providers)
def getDefn(self, topicNameTuple):
"""Returns a pair (docstring, MDS) for the topic. The first item is
a string containing the topic's "docstring", i.e. a description string
for the topic, or None if no docstring available for the topic. The
second item is None or an instance of ArgSpecGiven specifying the
required and optional message data for listeners of this topic. """
desc, defn = None, None
for provider in self.__providers:
tmpDesc, tmpDefn = provider.getDefn(topicNameTuple)
if (tmpDesc is not None) and (tmpDefn is not None):
assert tmpDefn.isComplete()
desc, defn = tmpDesc, tmpDefn
break
return desc, defn
def isDefined(self, topicNameTuple):
"""Returns True only if a complete definition exists, ie topic
has a description and a complete message data specification (MDS)."""
desc, defn = self.getDefn(topicNameTuple)
if desc is None or defn is None:
return False
if defn.isComplete():
return True
return False
| [
37811,
201,
198,
10669,
3519,
284,
262,
3721,
286,
7243,
5509,
290,
663,
4542,
25,
4441,
220,
201,
198,
392,
10829,
10233,
11,
1972,
7508,
546,
257,
1948,
7243,
11,
3503,
13,
220,
201,
198,
201,
198,
25,
22163,
4766,
25,
15069,
1201... | 2.411391 | 7,866 |
import sqlite3
import pymongo
import os
from dotenv import load_dotenv
from pprintpp import pprint
if __name__ == "__main__":
# Open a connection
mongo_client = create_mongodb_connection()
db = mongo_client.rgb_characters
# How many total documents are there?
doc_count = db.rgb_characters.count_documents({})
print(f"Counted {doc_count} documents on your MongoDB cluster")
# How many total Characters are there?
character_count = db.rgb_characters.count_documents({ 'name': { "$exists": True } })
print(f"There are {character_count} characters")
# How many total Items?
characters_with_items = list(db.rgb_characters.find({ 'items': { "$exists": True } }))
nested_list_of_items = [character['items'] for character in characters_with_items]
list_of_items = [item for character_items in nested_list_of_items for item in character_items]
print(f"Characters have many items: {list_of_items[:3]}")
item_count = len(list_of_items)
print(f"All characters together have a total of {item_count} items.")
# How many of the Items are weapons? How many are not?
characters_with_weapons = list(db.rgb_characters.find({ 'weapons': { "$exists": True } }))
nested_list_of_weapons = [character['weapons'] for character in characters_with_weapons]
list_of_weapons = [item for character_weapons in nested_list_of_weapons for item in character_weapons]
print(f"Characters have many weapons too: {list_of_weapons[:3]}")
weapon_count = len(list_of_weapons)
print(f"All characters together have a total of {weapon_count} weapons.")
weapon_portion = weapon_count/item_count
print(f"This means that {100*weapon_portion:.2f}% of items are weapons (and {100*(1-weapon_portion):.2f}% are not).")
# How many Items does each character have? (Return first 20 rows)
characters_with_items = list(db.rgb_characters.find({ 'items': { "$exists": True } }))
for character in characters_with_items[:20]:
print(f"{character['name']} has {len(character['items'])} items")
# How many Weapons does each character have? (Return first 20 rows)
characters_with_weapons = list(db.rgb_characters.find({ 'weapons': { "$exists": True } }))
for character in characters_with_weapons[:20]:
print(f"{character['name']} has {len(character['weapons'])} weapons")
# On average, how many Items does each Character have?
print(f"On average, each character has {item_count/character_count:.2f} items.")
# On average, how many Weapons does each character have?
print(f"On average, each character has {weapon_count/character_count:.2f} weapons.") | [
11748,
44161,
578,
18,
198,
11748,
279,
4948,
25162,
198,
11748,
28686,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
6738,
279,
4798,
381,
1330,
279,
4798,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298... | 2.99774 | 885 |
# test list comprehension
print([x.upper() for x in ["one", "two", "three", "four", "five", "six"]])
print([x.upper() for x in ["one", "two", "three", "four", "five", "six"] if len(x) <= 4])
print([x for x in range(10)])
| [
2,
1332,
1351,
35915,
198,
198,
4798,
26933,
87,
13,
45828,
3419,
329,
2124,
287,
14631,
505,
1600,
366,
11545,
1600,
366,
15542,
1600,
366,
14337,
1600,
366,
13261,
1600,
366,
19412,
8973,
12962,
198,
198,
4798,
26933,
87,
13,
45828,
... | 2.635294 | 85 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright 2021-... Oleksandr Kolodkin <alexandr.kolodkin@gmail.com>.
# This program is distributed under the MIT license.
# Glory to Ukraine!
from typing import Tuple, Optional
import DipTrace
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
33448,
12,
986,
30093,
591,
46273,
25910,
375,
5116,
1279,
1000,
87,
46273,
13,
74,
349,
375,
5116,
31,
... | 2.975309 | 81 |
#!/usr/bin/env python3
#
# Copyright (c) 2021 Iliass Alami Qammouri
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
import conf.conf as conf
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
15069,
357,
66,
8,
33448,
314,
4528,
562,
978,
6277,
1195,
6475,
10300,
198,
2,
198,
2,
770,
318,
1479,
3788,
11,
11971,
739,
262,
17168,
13789,
13,
198,
2,
4091,
... | 3.047619 | 63 |
import math
import matplotlib
import numpy as np
from typing import Sequence
from PIL import Image
from io import BytesIO
from contextlib import contextmanager
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from figpptx.slide_editor import SlideTransformer, Box
def fig_to_image(fig, **kwargs):
"""Convert ``matplotlib.Figure`` to ``PIL.Image``.
Args:
kwargs (str):
Keyword parameters for ``Figure.savefig`` except ``fname``.
"""
# Ref: https://stackoverflow.com/questions/8598673/how-to-save-a-pylab-figure-into-in-memory-file-which-can-be-read-into-pil-image/8598881 # NOQA
kwargs["format"] = kwargs.get("format", "png")
kwargs["transparent"] = kwargs.get("transparent", True)
buf = BytesIO()
fig.savefig(buf, **kwargs)
buf.seek(0)
image = Image.open(buf).copy()
buf.close()
return image
def ax_to_image(ax, is_tight=True, **kwargs):
"""Convert ``matplotlib.Axes`` to ``PIL.Image``."""
kwargs["transparent"] = kwargs.get("transparent", True)
fig = ax.figure
artists = fig.get_children() # [TODO] Check ``get_axes`` is more apt?
with _store_visibility(artists):
for artist in artists:
if artist is not ax:
artist.set_visible(False)
image = fig_to_image(fig, **kwargs)
if is_tight:
image = _crop_image(image, ax)
bbox = ax.get_tightbbox(fig.canvas.get_renderer())
xmin, xmax = math.floor(bbox.xmin), math.ceil(bbox.xmax)
ymin, ymax = math.floor(bbox.ymin), math.ceil(bbox.ymax)
image = image.crop([xmin, ymin, xmax, ymax])
return image
def _get_bbox(image):
"""
(2020-01-12)
``Image.getbbox()`` does not seem to work intendedly. (Really?)
So, substitution is implemented.
"""
assert image.mode == "RGBA"
width, height = image.size
array = np.array(image)
alpha = array[:, :, -1]
ys, xs = np.where(alpha != 0)
xmin, xmax = np.min(xs) - 1, np.max(xs) + 1
ymin, ymax = np.min(ys) - 1, np.max(ys) + 1
xmin = np.clip(xmin, 0, width)
xmax = np.clip(xmax, 0, width)
ymin = np.clip(ymin, 0, height)
ymax = np.clip(ymax, 0, height)
return xmin, ymin, xmax, ymax
def _crop_image(fig_image, artist):
"""Crop the ``fig_image`` so that only ROI of ``target`` remains."""
width, height = fig_image.size
from figpptx import artist_misc
transformer = SlideTransformer(0, 0, size=(width, height), offset=(0, 0))
if isinstance(artist, Axes):
fig = artist_misc.to_figure(artist)
renderer = fig.canvas.get_renderer()
bbox = artist.get_tightbbox(renderer)
vertices = transformer.transform(bbox)
box = Box.from_vertices(vertices)
elif isinstance(artist, Artist):
box = transformer.get_box(artist)
elif isinstance(artist, Sequence):
boxes = [transformer.get_box(elem) for elem in artist]
box = Box.union(boxes)
else:
raise ValueError("Argument Error.", artist)
xmin, xmax = math.floor(box.left), math.ceil(box.left + box.width)
ymin, ymax = math.floor(box.top), math.ceil(box.top + box.height)
xmin, xmax = max(0, xmin), min(xmax, width - 1)
ymin, ymax = max(0, ymin), min(ymax, height - 1)
image = fig_image.crop([xmin, ymin, xmax + 1, ymax + 1])
return image
@contextmanager
if __name__ == "__main__":
pass
| [
11748,
10688,
198,
11748,
2603,
29487,
8019,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
19720,
1330,
45835,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
673... | 2.338125 | 1,461 |
import sys
baseline = sys.argv[1]
benchmark_decls = sys.argv[2]
benchmark_map = sys.argv[3]
baseline_types = {}
with open(baseline) as f:
for line in f:
if line.strip() == "":
continue
if line.strip()[0] == '#':
continue
if line.startswith("type "):
info = line[len("type "):].strip()
(typeid, defn) = scan_number(info)
(t,rest) = scan_type(defn[1:])
assert rest == ""
baseline_types[typeid] = t
benchmark_types = {}
with open(benchmark_decls) as f:
for line in f:
if line.strip() == "":
continue
if line.strip()[0] == '#':
continue
(typeid, rest) = scan_number(line)
(t, rest) = scan_type(rest[1:])
benchmark_types[typeid] = t
## Compare globals
baseline_globals = {}
with open(baseline) as f:
for line in f:
if "<global>" in line:
parts = line.strip().split(" ")
baseline_globals[parts[0]] = int(parts[2])
benchmark_globals = {}
with open(benchmark_map) as f:
for line in f:
if line.startswith(" @"):
parts = line[2:].strip().split(" ")
benchmark_globals[parts[0]] = int(parts[1])
gdists = []
gsizes = []
gconsv = []
gptacc = []
for addr in baseline_globals:
base_ty = baseline_types[baseline_globals[addr]]
if base_ty == "BOT":
continue
try:
bench_ty = benchmark_types[benchmark_globals[addr]]
except KeyError:
continue
if not comparable_size(base_ty, bench_ty):
base_ty = to_structural(base_ty)
bench_ty = to_structural(bench_ty)
gdists.append(dist(base_ty, bench_ty))
gsizes.append(interval_size(bench_ty))
gconsv.append(conservativeness(base_ty, bench_ty))
gptacc.append(ptr_acc(base_ty, bench_ty))
dists = []
sizes = []
consv = []
ptacc = []
if gdists != []:
print "GLOBALS:"
print "average dist =", float(sum(gdists)) / len(gdists)
print "average size =", float(sum(gsizes)) / len(gsizes)
print "conservative =", 100. * float(sum(gconsv)) / len(gconsv)
print "ptr accuracy =", 100. * float(sum(gptacc)) / len(gptacc)
print
dists += gdists
sizes += gsizes
consv += gconsv
ptacc += gptacc
baseline_rets = {}
with open(baseline) as f:
for line in f:
if line[0] == "@":
funaddr = line.strip()
continue
if "<final>" in line:
parts = line.strip().split(" ")
baseline_rets[funaddr] = int(parts[2])
rty = None
benchmark_rets = {}
with open(benchmark_map) as f:
for line in f:
if not line.startswith(" "):
parts = line.strip().split(" ")
if len(parts) != 2 or parts[1] in "VXS":
rty = None
continue
rty = int(parts[1])
continue
if line.startswith(" ") and line.strip().endswith(":"):
addr = "@" + hex(int(line.strip()[:-1],16))[2:]
if rty != None:
benchmark_rets[addr] = rty
rty = None
rdists = []
rsizes = []
rconsv = []
rptacc = []
for addr in baseline_rets:
base_ty = baseline_types[baseline_rets[addr]]
if base_ty == "BOT":
continue
try:
bench_ty = benchmark_types[benchmark_rets[addr]]
except KeyError:
continue
if not comparable_size(base_ty, bench_ty):
continue
rdists.append(dist(base_ty, bench_ty))
rsizes.append(interval_size(bench_ty))
rconsv.append(conservativeness(base_ty, bench_ty))
rptacc.append(ptr_acc(base_ty, bench_ty))
if rdists != []:
print "RETURNS:"
print "average dist =", float(sum(rdists)) / len(rdists)
print "average size =", float(sum(rsizes)) / len(rsizes)
print "conservative =", 100. * float(sum(rconsv)) / len(rconsv)
print "ptr accuracy =", 100. * float(sum(rptacc)) / len(rptacc)
print
dists += rdists
sizes += rsizes
consv += rconsv
ptacc += rptacc
baseline_params = {}
with open(baseline) as f:
for line in f:
if line[0] == "@":
funaddr = line.strip()
baseline_params[funaddr] = {}
continue
if "<initial>" in line:
parts = line.strip().split(" ")
baseline_params[funaddr][parts[0]] = int(parts[2])
benchmark_params = {}
get_next = False
with open(benchmark_map) as f:
for line in f:
if not line.startswith(" "):
addr = None
parts = line.strip().split(" ")
if len(parts) == 2:
get_next = True
continue
continue
if line.startswith(" ") and line.strip().endswith(":"):
addr = None
if get_next:
addr = "@" + hex(int(line.strip()[:-1],16))[2:]
benchmark_params[addr] = {}
get_next = False
continue
if line.startswith(" ") and addr != None:
parts = line.strip().split(" ")
benchmark_params[addr][parts[0]] = int(parts[1])
pdists = []
psizes = []
pconsv = []
pptacc = []
bps = sorted(baseline_params.keys())
for addr in bps:
for loc in baseline_params[addr]:
base_ty = baseline_types[baseline_params[addr][loc]]
if base_ty == "BOT":
continue
try:
bench_ty = benchmark_types[benchmark_params[addr][loc]]
except KeyError:
continue
if not comparable_size(base_ty, bench_ty):
continue
pdists.append(dist(base_ty, bench_ty))
psizes.append(interval_size(bench_ty))
pconsv.append(conservativeness(base_ty, bench_ty))
pptacc.append(ptr_acc(base_ty, bench_ty))
if pdists != []:
print "PARAMETERS:"
print "average dist =", float(sum(pdists)) / len(pdists)
print "average size =", float(sum(psizes)) / len(psizes)
print "conservative =", 100. * float(sum(pconsv)) / len(pconsv)
print "ptr accuracy =", 100. * float(sum(pptacc)) / len(pptacc)
print
dists += pdists
sizes += psizes
consv += pconsv
ptacc += pptacc
baseline_locals = {}
with open(baseline) as f:
for line in f:
if line[0] == "@":
funaddr = line.strip()
baseline_locals[funaddr] = {}
continue
if "<any>" in line:
parts = line.strip().split(" ")
baseline_locals[funaddr][parts[0]] = int(parts[2])
benchmark_locals = {}
get_next = False
with open(benchmark_map) as f:
for line in f:
if not line.startswith(" "):
addr = None
parts = line.strip().split(" ")
if len(parts) == 2:
get_next = True
continue
continue
if line.startswith(" ") and line.strip().endswith(":"):
if get_next:
addr = "@" + hex(int(line.strip()[:-1],16))[2:]
benchmark_locals[addr] = {}
get_next = False
continue
if line.startswith(" ") and addr != None:
parts = line.strip().split(" ")
types = map(lambda x: int(x), parts[1:])
try:
benchmark_locals[addr][parts[0]] += types
except KeyError:
benchmark_locals[addr][parts[0]] = types
ldists = []
lsizes = []
lconsv = []
lptacc = []
for addr in baseline_locals:
for loc in baseline_locals[addr]:
base_ty = baseline_types[baseline_locals[addr][loc]]
if base_ty == "BOT":
continue
try:
bench_ty = benchmark_types_to_union(benchmark_locals[addr][loc])
except KeyError:
continue
if not comparable_size(base_ty, bench_ty):
continue
ldists.append(dist(base_ty, bench_ty))
lsizes.append(interval_size(bench_ty))
lconsv.append(conservativeness(base_ty, bench_ty))
lptacc.append(ptr_acc(base_ty, bench_ty))
if ldists != []:
print "LOCALS:"
print "average dist =", float(sum(ldists)) / len(ldists)
print "average size =", float(sum(lsizes)) / len(lsizes)
print "conservative =", 100. * float(sum(lconsv)) / len(lconsv)
print "ptr accuracy =", 100. * float(sum(lptacc)) / len(lptacc)
print
dists += ldists
sizes += lsizes
consv += lconsv
ptacc += lptacc
if dists != []:
print "TOTAL:"
print " matched entities:", len(dists)
print " average TIE distance:", float(sum(dists)) / len(dists)
print " average TIE interval:", float(sum(sizes)) / len(sizes)
print " average conservative:", float(sum(consv)) / len(consv)
print " average ptr accuracy:", float(sum(ptacc)) / len(ptacc)
print csv(benchmark_map.split(".map")[0].split("/")[-1], [(gdists, gsizes, gconsv, gptacc), (ldists, lsizes, lconsv, lptacc), (rdists, rsizes, rconsv, rptacc), (pdists, psizes, pconsv, pptacc)])
| [
11748,
25064,
198,
198,
12093,
4470,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
26968,
4102,
62,
32446,
82,
796,
25064,
13,
853,
85,
58,
17,
60,
198,
26968,
4102,
62,
8899,
796,
25064,
13,
853,
85,
58,
18,
60,
628,
198,
12093,
44... | 2.080047 | 4,285 |
#!/usr/bin/env python
from setuptools import setup
setup(name='gtsc',
version='0.1',
description='Goblin Trello Sync Client. ',
author='eternnoir',
author_email='eternnoir@gmail.com',
url='https://github.com/ggoblin/trello-sync-client',
packages=['gtsc'],
install_requires=['requests', 'pythondialog'],
entry_points={
'console_scripts': [
'gtsc = gtsc:main',
],
},
) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
3672,
11639,
70,
912,
66,
3256,
198,
220,
220,
220,
220,
220,
2196,
11639,
15,
13,
16,
3256,
198,
220,
220,
220,
220,
220,
... | 2.021552 | 232 |
hour_of_the_day = int(input())
day_of_the_week = str(input())
if 10 <= hour_of_the_day <= 18:
if day_of_the_week == 'Monday' or day_of_the_week == 'Tuesday' or day_of_the_week == 'Wednesday' or \
day_of_the_week == 'Thursday' or day_of_the_week == 'Friday':
print('open')
else:
print('closed')
else:
print('closed')
| [
9769,
62,
1659,
62,
1169,
62,
820,
796,
493,
7,
15414,
28955,
198,
820,
62,
1659,
62,
1169,
62,
10464,
796,
965,
7,
15414,
28955,
198,
198,
361,
838,
19841,
1711,
62,
1659,
62,
1169,
62,
820,
19841,
1248,
25,
198,
220,
220,
220,
... | 2.047619 | 189 |
"""
protocolDefinitions.py
The following module consists of a list of commands or definitions to be used in the communication between devices and the control system
Michael Xynidis
Fluvio L Lobo Fenoglietto
09/26/2016
"""
# Definition Name Value Class
# ---------- ---- ----- -----
SOH = chr(0x01) # Start of Heading 0x01 STD
ENQ = chr(0x05) # Enquiry 0x05 STD
EOT = chr(0x04) # End of Transmission 0x04 STD
ACK = chr(0x06) # Positive Acknowledgement 0x06 STD
NAK = chr(0x15) # Negative Acknowledgement 0x15 STD
CAN = chr(0x18) # Cancel Current Command 0x18 STD
# Device Control Commands
# We have extended the four (4) standard "device control" commands by means of a two-byte communication protocol
DC1 = chr(0x11) # Device Control 1: Diagnostic Functions 0x11 STD
DC1_DEVICEID = chr(0x00) # Device Identification
DC1_SDCHECK = chr(0x01) # SD Card Check 0x00 ORG
# 0xFF ORG
DC2 = chr(0x12) # Device Control 2: Operational Functions 0x12 STD
DC2_SENDWAV = chr(0x00) # Send .WAV File 0x00 ORG
DC2_DELVOLATILE = chr(0x01) # Delete Volatile Files 0x01 ORG
# 0xFF ORG
DC3 = chr(0x13) # Device Control 3: Device-Specific Functions 0x13 STD
DC3_STARTREC = chr(0x00) # Start Recording 0x00 ORG
DC3_STOPREC = chr(0x01) # Stop Recording 0x01 ORG
DC3_STARTPLAY = chr(0x02) # Start Playback 0x02 ORG
DC3_STOPPLAY = chr(0x03) # Stop Playback 0x03 ORG
DC3_STARTSTREAM = chr(0x04) # Start Microphone Stream 0x04 ORG
DC3_STARTTRACKING = chr(0x05) # Start Tracking Microphone Stream for Peaks 0x05 ORG
DC3_STOPTRACKING = chr(0x06) # Stop Tracking Microphone Stream for Peaks 0x06 ORG
# 0xFF ORG
DC4 = chr(0x14) # Device Control 4: Simulation Functions 0x14 STD
DC4_NORMALHB = chr(0x00) # Playback of Normal Heart Beat 0x00 ORG
DC4_ESHMURMUR = chr(0x01) # Playback of Early Systolic Heart Beat 0x01 ORG
# 0xFF ORG
# Legend
# STD - Standard terminology / Standard reference for command
# ORG - Original or custom-made command and reference
| [
37811,
198,
11235,
4668,
7469,
50101,
13,
9078,
198,
198,
464,
1708,
8265,
10874,
286,
257,
1351,
286,
9729,
393,
17336,
284,
307,
973,
287,
262,
6946,
1022,
4410,
290,
262,
1630,
1080,
198,
198,
13256,
1395,
2047,
29207,
198,
37,
229... | 1.503502 | 2,427 |
from flask import Flask, render_template, jsonify, request, redirect, url_for, flash
from app import people
app = Flask(__name__)
app.debug=True
@app.route("/",methods=['GET','POST'])
@app.route('/run',methods=['GET','POST'])
app.run() | [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
33918,
1958,
11,
2581,
11,
18941,
11,
19016,
62,
1640,
11,
7644,
198,
6738,
598,
1330,
661,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
1324,
13,
24442,
28,
17821,
198,
... | 2.811765 | 85 |
# todo add a ylabel bis !
# write down that, if label on bis axis, need to pass the label to the other dict. Be careful bc two axison same figure and so can be crowded.
class Dict_ax_for_APlot(object):
"""
dict_ax_for_APlot is an object that stores the properties of each axs of a APlot.
DEFAULT_DICT is then showing the default properties for each axs before personalisation.
The parameters are:
title: message on top of the image.
xlabel: legend of x-axis. string.
ylabel: legend of y-axis. string.
xscale: scale of the x-axis. string.
yscale: scale of the y-axis. string.
basex: base for log scale on x-axis. float.
basey: base for log scale on y-axis. float.
parameters: values of the parameters we want to print under the figure. list of floats. Should not be longer than 20.
name_parameters: name of the parameters shown next to the value. list of strings. Should not be longer than 20.
xlim: range of the x-axis. 2 elements list or tuple of floats.
ylim: range of the y-axis. 2 elements list or tuple of floats.
"""
# default parameters
DEFAULT_STR = "Non-Defined."
DEFAULT_DICT = {'title': DEFAULT_STR,
'xlabel': DEFAULT_STR, 'ylabel': DEFAULT_STR,
'xscale': 'linear', 'yscale': 'linear',
'basex': 10, 'basey': 10,
'xint': False, 'yint': False,
'parameters': None, 'name_parameters': None,
'xlim': None, 'ylim': None}
DEFAULT_DICT_BIS = {'title': '',
'xlabel': '', 'ylabel': 'bis_axis',
'xscale': 'linear', 'yscale': 'linear',
'basex': 10, 'basey': 10,
'xint': False, 'yint': False,
'parameters': None, 'name_parameters': None,
'xlim': None, 'ylim': None}
# TODO it would be a good idea to design the setter with certain conditions:
# if another parameter than authorised is given, warning!
# parameters and name_parameters same length.
# check that scale and xint not set at the same time?
@classmethod
def help_dict_ax(cls):
"""
Semantics:
print possibilities for dict_ax and the default behavior.
"""
text = cls.DEFAULT_DICT
print(text)
| [
2,
284,
4598,
751,
257,
331,
18242,
47457,
5145,
198,
2,
3551,
866,
326,
11,
611,
6167,
319,
47457,
16488,
11,
761,
284,
1208,
262,
6167,
284,
262,
584,
8633,
13,
1355,
8161,
47125,
734,
7877,
1653,
976,
3785,
290,
523,
460,
307,
... | 2.314833 | 1,045 |
from dataclasses import dataclass
@dataclass | [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
628,
198,
31,
19608,
330,
31172
] | 3.285714 | 14 |
n, q = map(int, input().split())
graph = [[] for _ in range(n)]
for i in range(n - 1):
a, b = map(int, input().split())
graph[a - 1].append(b - 1)
graph[b - 1].append(a - 1)
group = [[], []]
town_color = [-1] * n
tmp = [[0, -1, 0]]
while tmp:
v, past, color = tmp.pop()
town_color[v] = color
group[color].append(v + 1)
for i in graph[v]:
if i == past: continue
tmp.append([i, v, color ^ 1])
# print(group[0])
# print(group[1])
# print(town_color)
for i in range(q):
c, d = map(int, input().split())
if town_color[c - 1] == town_color[d - 1]:
print("Town")
else:
print("Road")
| [
77,
11,
10662,
796,
3975,
7,
600,
11,
5128,
22446,
35312,
28955,
198,
198,
34960,
796,
16410,
60,
329,
4808,
287,
2837,
7,
77,
15437,
198,
1640,
1312,
287,
2837,
7,
77,
532,
352,
2599,
198,
220,
220,
220,
257,
11,
275,
796,
3975,
... | 2.144737 | 304 |
from __future__ import absolute_import, division, print_function
del absolute_import, division, print_function
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
12381,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
628
] | 4.185185 | 27 |
# This file is auto-generated from a Python script that parses a PhysiCell configuration (.xml) file.
#
# Edit at your own risk.
#
import os
from ipywidgets import Label,Text,Checkbox,Button,HBox,VBox,FloatText,IntText,BoundedIntText,BoundedFloatText,Layout,Box,Dropdown, Text
# Populate the GUI widgets with values from the XML
# Read values from the GUI widgets to enable editing XML
| [
220,
198,
2,
770,
2393,
318,
8295,
12,
27568,
422,
257,
11361,
4226,
326,
13544,
274,
257,
8687,
72,
28780,
8398,
20262,
19875,
8,
2393,
13,
198,
2,
198,
2,
5312,
379,
534,
898,
2526,
13,
198,
2,
198,
11748,
28686,
198,
6738,
2096... | 3.372881 | 118 |
from Bio import AlignIO
from Bio.Seq import Seq
from Bio import SeqIO
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
import numpy as np
import pandas
thing = True
# Using the nomenclature of the GUI explanation, here are some example GUI start/end values
# As a reminder, it goes S1s, S1e/H1s, H1e/S2s, S2e/H2s, H2e
# For the ji_cartFish we have: 2,49,93,152,193
# For the cd1d.fasta we have: 124,167,209,262,303
# For the hlaA.fasta we have: 170,218,260,306,348
# For cd1_ufa_genes.fasta: 22,66,105,158,199
# So in the main version of the script, we have a special loader for each data subset
# Can we make just a generalizable one? Let's give it a try...
#####################################################################################
| [
6738,
16024,
1330,
978,
570,
9399,
198,
6738,
16024,
13,
4653,
80,
1330,
1001,
80,
198,
6738,
16024,
1330,
1001,
80,
9399,
198,
6738,
16024,
1330,
5166,
3083,
17,
198,
6738,
16024,
13,
24874,
3083,
17,
1330,
5794,
62,
282,
16747,
198,... | 3.043651 | 252 |
# encoding : UTF-8
from .action_object import ActionObject
| [
2,
21004,
1058,
41002,
12,
23,
198,
198,
6738,
764,
2673,
62,
15252,
1330,
7561,
10267,
198
] | 3.529412 | 17 |
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
import os
import sys
import time
from threading import Timer
import subprocess
from framework import PyCliTest
from framework import Config
from framework import logit
from s3client_config import S3ClientConfig
| [
2,
198,
2,
15069,
357,
66,
8,
12131,
1001,
37861,
8987,
11419,
290,
14,
273,
663,
6708,
2403,
689,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
... | 3.800797 | 251 |
#Nessa aula, vamos começar nossos estudos com os laços
# e vamos fazer primeiro o “for”, que é uma estrutura versátil e simples de entender.
# Por exemplo:
i = int(input('Início: '))
f = int(input('fim: '))
p = int(input('Passo: '))
for c in range(i, f+1, p):
print(c)
print('fim') | [
2,
45,
21411,
257,
4712,
11,
410,
321,
418,
1282,
16175,
283,
299,
793,
418,
1556,
42418,
401,
28686,
8591,
16175,
418,
198,
2,
304,
410,
321,
418,
277,
19178,
6994,
7058,
267,
564,
250,
1640,
447,
251,
11,
8358,
38251,
334,
2611,
... | 2.2 | 130 |
# Author: Frank Cwitkowitz <fcwitkow@ur.rochester.edu>
# My imports
from amt_tools.models import OnsetsFrames
from amt_tools.features import MelSpec
from amt_tools.datasets import MAPS
from amt_tools.train import train, validate
from amt_tools.transcribe import *
from amt_tools.evaluate import *
import amt_tools.tools as tools
# Regular imports
from sacred.observers import FileStorageObserver
from torch.utils.data import DataLoader
from sacred import Experiment
import torch
import os
EX_NAME = '_'.join([OnsetsFrames.model_name(),
MAPS.dataset_name(),
MelSpec.features_name()])
ex = Experiment('Onsets & Frames 1 w/ Mel Spectrogram on MAPS')
@ex.config
@ex.automain
| [
2,
6434,
25,
5278,
327,
39289,
74,
20951,
1279,
16072,
39289,
74,
322,
31,
333,
13,
305,
35983,
13,
15532,
29,
198,
198,
2,
2011,
17944,
198,
6738,
716,
83,
62,
31391,
13,
27530,
1330,
1550,
28709,
35439,
198,
6738,
716,
83,
62,
3... | 2.79845 | 258 |
#!/usr/bin/python
# Python Imports
import collections
import datetime
import numbers
import swapper
from hashlib import sha256
# Django Imports
from django.conf import settings
from django.db import models
import utils
# Local Imports
from mwbase.models import PhoneCall, Practitioner, Visit, Connection
from transports import router, TransportError
from utils import enums
from utils.models import TimeStampedModel, ForUserQuerySet
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
11361,
1846,
3742,
198,
11748,
17268,
198,
11748,
4818,
8079,
198,
11748,
3146,
198,
11748,
1509,
11463,
198,
6738,
12234,
8019,
1330,
427,
64,
11645,
198,
198,
2,
37770,
1846,
3742,
198,
... | 3.705882 | 119 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2017 China Telecommunication Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from singleton import ClsSingleton
logtan = LogTan_Null()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
220,
15069,
1584,
12,
5539,
2807,
14318,
32560,
1766,
1539,
12052,
13,
198,
2,
198,
2,
220,
49962,
739,
... | 3.390698 | 215 |
# coding: utf-8
# # Intro to Pandas (5) - Accessing and Changing Specific Observations
# In the last lesson we saw how to rename and drop columns, and to set the index in a DataFrame.
#
# In this lesson we'll learn about positional and label-based selection and how to use this to make changes to specific observations.
# ## Module Imports
# In[1]:
#plotly.offline doesn't push your charts to the clouds
import plotly.offline as pyo
#allows us to create the Data and Figure objects
from plotly.graph_objs import *
#plotly.plotly pushes your charts to the cloud
import plotly.plotly as py
#pandas is a data analysis library
import pandas as pd
from pandas import DataFrame
# ## Positional and Label-based Selection
#
# First of all, I'm going to read some data into a DataFrame. I also need to make another DataFrame which has a label-based index rather than a positional index.
# In[2]:
baseRateData = pd.read_csv("http://www.richard-muir.com/data/public/csv/BoEBaseRate.csv")
baseRateData_r = baseRateData.rename(columns = {'VALUE' : 'Value', 'DATE' : 'Date'})
baseRateData_r.set_index(baseRateData_r['Date'], inplace=True)
baseRateData_r.drop(['Date'], axis = 1, inplace = True)
# Let's have a look at these DataFrames:
# In[3]:
baseRateData.head()
# In[4]:
baseRateData_r.head()
# #### Selecting observations in a DataFrame
#
# We can select observations from a DataFrame by using <code>df.loc</code> and <code>df.iloc</code>.
#
# - <code>df.loc</code> selects the observations by their index label
# - <code>df.iloc</code> selects the observations by their position
#
# Here, I'm using <code>df.loc</code> to select the first 10 rows from <code>baseRateData</code>. Note that <code>df.loc</code> doesn't work like a list slice in Python; rather than stopping before the specified number, we include that observation:
# In[5]:
baseRateData.loc[:9]
# If I try to use <code>df.loc</code> on <code>baseRateData_r</code>, this won't work because we have changed the index label:
# In[6]:
baseRateData_r.loc[:9]
# Instead I have to pass the row index label which I want:
# In[7]:
baseRateData_r.loc[:'15/01/1975']
# But <code>df.iloc</code> works the same on both DataFrames because in <code>baseRateData</code>, the index is equal to the position - <code>df.iloc</code> works on the ordinal position of the rows.
#
# Confusingly, <code>df.iloc</code> works in the same way as list and string slicing, stopping just before the specified position:
# In[8]:
baseRateData.iloc[:9]
# In[9]:
baseRateData_r.iloc[:9]
# For both <code>df.loc</code> and <code>df.iloc</code>, we can take a slice from the middle of the DataFrame:
# In[10]:
baseRateData_r.loc['06/01/1975':'13/01/1975']
# In[11]:
baseRateData.iloc[4:6]
# We can also combine the column names with <code>df.loc</code> and <code>df.iloc</code> to get 2D slices of a DataFrame.
#
# Remember that <code>df.loc</code> works on the labels:
# In[12]:
baseRateData.loc[5:13, 'DATE']
# But <code>df.iloc</code> operates on the index; the columns are numerically indexed (in the same way as the rows):
# In[13]:
baseRateData.iloc[5:13, 0]
# ### Changing Data in a DataFrame
#
# So now we can select individual rows and columns in a DataFrame by the index label or position. We can use this knowledge to make changes to specific observations within the DataFrame.
#
# Imagine that we were told that the first twenty rows of our data were incorrect; they should have been 1.15 instead of 11.5. Let's make some changes!
#
# First of all, I'm using <code>df.loc</code> to select the first 20 rows by label and only the 'VALUE' column. It's just a simple matter of setting the value which we want these observations to take:
# In[14]:
baseRateData.loc[:19, 'VALUE'] = 1.15
baseRateData.head(25)
# We can also do it with <code>df.iloc</code>. Remember that the slicing is slightly different...
#
# I'll change it instead to 2.15 so we can prove it works:
# In[15]:
baseRateData.iloc[:20, 0] = 2.15
baseRateData.head(25)
# ### What have we learnt this lesson?
# In this lesson we've seen how to access rows and columns by their label and position, and to use this positional selection to make changes to the data in the DataFrame.
# If you have any questions, please ask in the comments section or email <a href="mailto:me@richard-muir.com">me@richard-muir.com</a>
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
37219,
284,
16492,
292,
357,
20,
8,
532,
8798,
278,
290,
33680,
17377,
19243,
602,
198,
198,
2,
554,
262,
938,
11483,
356,
2497,
703,
284,
36265,
290,
4268,
15180,
11,
290,
... | 3.085151 | 1,421 |
# ------------------------------------------------------------------------
#
#
# Made with python 3.8.8
#
#
# ------------------------------------------------------------------------
eratostenes(100) | [
2,
16529,
982,
201,
198,
2,
201,
198,
2,
201,
198,
2,
14446,
351,
21015,
513,
13,
23,
13,
23,
201,
198,
2,
201,
198,
2,
201,
198,
2,
16529,
982,
201,
198,
201,
198,
263,
265,
455,
18719,
7,
3064,
8
] | 4.952381 | 42 |
"""Generate graph-based cross-validation spatial folds"""
import os
import time
from typing import Dict, List
from dataclasses import dataclass, field
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from tqdm import tqdm
from src.scv.scv import SpatialCV
X_1DIM_COL = "X_1DIM"
@dataclass
class RegGraphBasedSCV(SpatialCV):
"""Generates the Regularization Graph Based Spatial Cross-Validation folds
Attributes
----------
data: pd.Dataframe
The spatial dataset to generate the folds
fold_col: str
The fold column name
target_col: str
The targer attribute column name
adj_matrix: pd.Dataframe
The adjacency matrix regarding the spatial objects in the data
paper: bool
Whether to run experiments according to ICMLA21 paper
root_path : str
Root path
"""
kappa: float = 0.5
run_selection: bool = False
target_col: str = "TARGET"
adj_matrix: pd.DataFrame = field(default_factory=pd.DataFrame)
paper: bool = False
type_graph: str = "Sparse"
sill_target: Dict = field(default_factory=dict)
sill_reduced: Dict = field(default_factory=dict)
sill_max_reduced: Dict = field(default_factory=dict)
w_matrix: pd.DataFrame = field(default_factory=pd.DataFrame)
def _calculate_train_pca(self) -> np.array:
"""Return the PCA first component transformation on the traind data"""
pca = PCA(n_components=1)
train = self.data.drop(columns=[self.fold_col, self.target_col])
# For the IMCLA21 paper the PCA is executed only on the cennsus columns
if self.paper:
cols = [c for c in train.columns if "CENSUS" in c]
train = train[cols]
pca.fit(train)
return pca.transform(train).flatten()
def _calculate_removing_buffer_sill(self, fold_name, fold_data, global_var) -> Dict:
"""Calculate the sill for each fold to be used on the removing buffer process"""
fold_target = fold_data[self.target_col]
test_target = self.test_data[self.target_col]
target_var = fold_target.append(test_target).var()
self.sill_target[fold_name] = (target_var + global_var) / 2
def _calculate_selection_buffer_sill(
self, fold_name, fold_data, global_var
) -> Dict:
"""Calculate the sill for each fold to be used on the selection buffer process"""
reduced_var = fold_data[X_1DIM_COL].append(self.test_data[X_1DIM_COL]).var()
# self.sill_reduced[fold_name] = (reduced_var + global_var) / 2
self.sill_reduced[fold_name] = reduced_var
max_var_train = max(self.sill_reduced, key=self.sill_reduced.get)
for _ in self.sill_reduced:
self.sill_max_reduced[_] = self.sill_reduced[max_var_train]
def _initiate_buffers_sills(self) -> Dict:
"""Initialize and calculate the sills for the removing and selectiont procedures"""
global_target_var = self.data[self.target_col].var()
global_reduced_var = self.data[X_1DIM_COL].var()
self.sill_target = {}
self.sill_reduced = {}
for fold_name, fold_data in self.train_data.groupby(by=self.fold_col):
self._calculate_selection_buffer_sill(
fold_name, fold_data, global_reduced_var
)
self._calculate_removing_buffer_sill(
fold_name, fold_data, global_target_var
)
def _convert_adj_matrix_index_types(self) -> pd.DataFrame:
"""Convert adjacenty matrixy index and columns types to the same as in the data"""
self.adj_matrix.index = self.adj_matrix.index.astype(self.data.index.dtype)
self.adj_matrix.columns = self.adj_matrix.columns.astype(self.data.index.dtype)
self.w_matrix.index = self.w_matrix.index.astype(self.data.index.dtype)
self.w_matrix.columns = self.w_matrix.columns.astype(self.data.index.dtype)
@staticmethod
def _get_neighbors(indexes, adj_matrix) -> List:
"""Return the 1-degree neighborhood from a given sub-graph formed by indexes"""
area_matrix = adj_matrix.loc[indexes]
neighbors = area_matrix.sum(axis=0) > 0
neighbors = neighbors[neighbors].index
neighbors = [n for n in neighbors if n not in indexes]
return neighbors
def _calculate_longest_path(self) -> int:
"""Calculate the longest_path from a BFS tree taking the test set as root"""
path_indexes = self.test_data.index.values.tolist()
local_data_idx = (
self.test_data.index.values.tolist() + self.train_data.index.values.tolist()
)
matrix = self.adj_matrix.loc[local_data_idx, local_data_idx]
neighbors = self._get_neighbors(path_indexes, matrix)
size_tree = 0
while len(neighbors) > 0:
size_tree += 1
neighbors = self._get_neighbors(path_indexes, matrix)
path_indexes = path_indexes + neighbors
return size_tree
def _calculate_similarity_matrix(self, fold_data, attribute) -> np.ndarray:
"""Calculate the similarity matrix between test set and a given training
fold set based on a given attribute"""
test_values = self.test_data[attribute].to_numpy()
node_values = fold_data[attribute]
return (test_values - node_values) ** 2
@staticmethod
def _calculate_gamma(similarity, geo_weights, kappa) -> np.float64:
"""Calculate gamma or the semivariogram"""
gamma_dist = similarity - (kappa * (1 - geo_weights) * similarity)
sum_diff = np.sum(gamma_dist)
sum_dist = len((similarity))
return sum_diff / (2 * sum_dist)
def _get_neighbors_weights(self, index):
"""Return the matrix weights test set x neighbors"""
return self.w_matrix.loc[self.test_data.index, index]
def _calculate_gamma_by_node(self, neighbors, attribute, kappa) -> Dict:
"""Calculate the semivariogram by folds"""
nodes_gamma = {}
neighbors = [n for n in neighbors if n in self.train_data.index]
neighbors_data = self.train_data.loc[neighbors]
for index, node_data in neighbors_data.iterrows():
similarity = self._calculate_similarity_matrix(node_data, attribute)
geo_weights = self._get_neighbors_weights(index)
gamma = self._calculate_gamma(similarity, geo_weights, kappa)
nodes_gamma[index] = gamma
return nodes_gamma
def _get_n_fold_neighbohood(self) -> int:
"""Get ne number of folds neighbors from the test set"""
neighbors_idx = self._get_neighbors(self.test_data.index, self.adj_matrix)
neighbors_idx = [n for n in neighbors_idx if n in self.data.index]
return len(self.data.loc[neighbors_idx].groupby(self.fold_col))
@staticmethod
def _calculate_exponent(size_tree, count_n) -> np.float64:
"""Caclulate the decay exponent"""
return np.log(1 * size_tree - count_n) / np.log(1 * size_tree)
def _propagate_variance(self, attribute, kappa) -> List:
"""Calculate propagate variance"""
# Initialize variables
buffer = [] # containg the index of instaces buffered
nodes_gamma = {}
# Start creating the buffer
while len(buffer) < self.train_data.shape[0]:
# Get the instance indexes from te test set + the indexes buffer
growing_graph_idx = self.test_data.index.values.tolist() + buffer
# Get the neighbor
h_neighbors = self._get_neighbors(growing_graph_idx, self.adj_matrix)
# Calculate the semivariogram for each fold in the neighborhood
nodes_gamma.update(
self._calculate_gamma_by_node(h_neighbors, attribute, kappa)
)
buffer += h_neighbors
return nodes_gamma
def _calculate_selection_buffer(self, nodes_propagated, attribute):
"""Calculate buffer nodes"""
buffered_nodes = []
sill = self.data[attribute].var()
buffered_nodes = [
node for node, gamma in nodes_propagated.items() if gamma < sill
]
return buffered_nodes
def _calculate_removing_buffer(self, nodes_propagated, nodes_reduced, attribute):
"""Calculate buffer nodes"""
sill_target = self.test_data[attribute].var()
# sill_w_matrix = self.w_matrix.to_numpy().var()
sill_reduced = self.test_data[X_1DIM_COL].var()
# sill_target = self.kappa * sill_target + (1 - self.kappa) * sill_w_matrix
buffered_nodes_target = [
node for node, gamma in nodes_propagated.items() if gamma < sill_target
]
buffered_nodes_reduced = [
node for node, gamma in nodes_reduced.items() if gamma < sill_reduced
]
# return [node for node in buffered_nodes_target if node in buffered_nodes_reduced]
return buffered_nodes_target
def run(self):
"""Generate graph-based spatial folds"""
# Create folder folds
start_time = time.time()
self._init_fields()
self._make_folders(["folds", self.scv_method])
self.data[X_1DIM_COL] = self._calculate_train_pca()
for fold_name, test_data in tqdm(
self.data.groupby(by=self.fold_col), desc="Creating folds"
):
if fold_name != -1:
# Cread fold folder
self._mkdir(str(fold_name))
# Initialize x , y and reduce
self._split_data_test_train(test_data)
# Calculate local sill
self._initiate_buffers_sills()
# Ensure indexes and columns compatibility
self._convert_adj_matrix_index_types()
# Calculate selection buffer
nodes_prop_reduced = self._propagate_variance(X_1DIM_COL, self.kappa)
selection_buffer = self._calculate_selection_buffer(
nodes_prop_reduced, X_1DIM_COL
)
if self.run_selection:
self.train_data = self.train_data.loc[selection_buffer]
# The train data is used to calcualte the buffer. Thus, the size tree,
# and the gamma calculation will be influenced by the selection buffer.
# Calculate removing buffer
nodes_prop_target = self._propagate_variance(
self.target_col, self.kappa
)
removing_buffer = self._calculate_removing_buffer(
nodes_prop_target, nodes_prop_reduced, self.target_col
)
# removing_buffer = [node for node in removing_buffer if node in selection_buffer]
# removing_buffer = selection_buffer
self.train_data.drop(index=removing_buffer, inplace=True)
# Save buffered data indexes
self._save_buffered_indexes(removing_buffer)
# Save fold index relation table
self._save_fold_by_index_training()
# Clean data
self._clean_data(cols_drop=[X_1DIM_COL, self.fold_col])
# Save data
# self._save_data()
# Update cur dir
self.cur_dir = os.path.join(
self._get_root_path(), "folds", self.scv_method
)
# Save execution time
end_time = time.time()
self._save_time(end_time, start_time)
print(f"Execution time: {end_time-start_time} seconds")
| [
37811,
8645,
378,
4823,
12,
3106,
3272,
12,
12102,
341,
21739,
38744,
37811,
198,
11748,
28686,
198,
11748,
640,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2214,
198,
11748,
1979... | 2.249903 | 5,170 |
# Copyright (c) 2015 SONATA-NFV, UBIWHERE
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, UBIWHERE
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
import unittest
from son.package.package import Packager
from son.workspace.workspace import Workspace
from son.workspace.workspace import Project
| [
2,
220,
15069,
357,
66,
8,
1853,
311,
1340,
13563,
12,
21870,
53,
11,
471,
3483,
47357,
198,
2,
11096,
371,
34874,
15731,
1137,
53,
1961,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
... | 3.792899 | 338 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
bl_info = {
"name": "Bool Tool",
"author": "Vitor Balbio, Mikhail Rachinskiy, TynkaTopi, Meta-Androcto",
"version": (0, 3, 9),
"blender": (2, 79, 2),
"location": "View3D > Toolshelf",
"description": "Bool Tool Hotkey: Ctrl Shift B",
"wiki_url": "https://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Object/BoolTool",
"category": "Object",
}
import bpy
from bpy.app.handlers import persistent
from bpy.types import (
AddonPreferences,
Operator,
Panel,
Menu,
)
from bpy.props import (
BoolProperty,
StringProperty,
EnumProperty,
)
# ------------------- Bool Tool FUNCTIONS -------------------------
# Utils:
# Hide boolean objects
# Object is a Canvas
# Object is a Brush Tool Bool
# Object is a Poly Brush Tool Bool collection
"""
# EXPERIMENTAL FEATURES
def isMakeVertexGroup():
preferences = bpy.context.preferences
addon_prefs = preferences.addons[__name__].preferences
if addon_prefs.make_vertex_groups:
return True
else:
return False
def isMakeBoundary():
preferences = bpy.context.preferences
addon_prefs = preferences.addons[__name__].preferences
if addon_prefs.make_boundary:
return True
else:
return False
"""
# Do the Union, Difference and Intersection Operations with a Brush
# Remove Obejcts form the BoolTool System
# Toggle the Enable the Brush Object Property
# Find the Canvas and Enable this Brush
# Toggle the Fast Transform Property of the Active Brush
# Apply All Brushes to the Canvas
# Apply This Brush to the Canvas
# Handle the callbacks when modifying things in the scene
@persistent
# ------------------ Bool Tool OPERATORS --------------------------------------
# Fast Transform
# ------------------- Bool Tool OPERATOR CLASSES --------------------------------------------------------
# Brush Operators --------------------------------------------
# Boolean Union Operator
# Boolean Intersection Operator
# Boolean Difference Operator
# Boolean Slices Operator
# Auto Boolean operators (maintainer Mikhail Rachinskiy)
# --------------------------------------------------------------------------------------
# Utils Class ---------------------------------------------------------------
# Find the Brush Selected in Three View
# Move The Modifier in The Stack Up or Down
# Enable or Disable a Brush in the Three View
# Enable or Disable a Brush Directly
# Enable or Disable a Brush Directly
# Other Operations -------------------------------------------------------
# Remove a Brush or a Canvas
# Apply All to Canvas
# Apply This Brush to the Canvas
# TODO
# Apply This Brush To Mesh
# ------------------- MENU CLASSES ------------------------------
# 3Dview Header Menu
# ---------------- Toolshelf: Tools ---------------------
# ---------- Toolshelf: Properties --------------------------------------------------------
# ---------- Toolshelf: Brush Viewer -------------------------------------------------------
# ------------------ BOOL TOOL Help ----------------------------
# ------------------ BOOL TOOL ADD-ON PREFERENCES ----------------------------
# Add-ons Preferences Update Panel
# Define Panel classes for updating
panels = (
VIEW3D_PT_booltool_tools,
VIEW3D_PT_booltool_config,
VIEW3D_PT_booltool_bviewer,
)
# ------------------- Class List ------------------------------------------------
classes = (
PREFS_BoolTool_Props,
VIEW3D_MT_booltool_menu,
VIEW3D_PT_booltool_tools,
VIEW3D_PT_booltool_config,
VIEW3D_PT_booltool_bviewer,
OBJECT_OT_BoolTool_Auto_Union,
OBJECT_OT_BoolTool_Auto_Difference,
OBJECT_OT_BoolTool_Auto_Intersect,
OBJECT_OT_BoolTool_Auto_Slice,
OBJECT_OT_BoolTool_Auto_Subtract,
BTool_Union,
BTool_Diff,
BTool_Inters,
BTool_Slice,
BTool_DrawPolyBrush,
BTool_Remove,
BTool_AllBrushToMesh,
BTool_BrushToMesh,
BTool_FindBrush,
BTool_MoveStack,
BTool_EnableBrush,
BTool_EnableThisBrush,
BTool_EnableFTransform,
BTool_FastTransform,
WM_OT_BoolTool_Help,
)
# ------------------- REGISTER ------------------------------------------------
addon_keymaps = []
addon_keymapsFastT = []
# Fast Transform HotKeys Register
# Fast Transform HotKeys UnRegister
if __name__ == "__main__":
register()
| [
2,
46424,
347,
43312,
38644,
38559,
24290,
9878,
11290,
46424,
198,
2,
198,
2,
220,
770,
1430,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
198,
2,
220,
13096,
340,
739,
262,
2846,
286,
262,
22961,
3611,
5094,
1378... | 3.181982 | 1,665 |
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from logging import getLogger
from pkg_resources import iter_entry_points
from opentelemetry.instrumentation.auto_instrumentation.components import (
initialize_components,
)
logger = getLogger(__file__)
if (
hasattr(sys, "argv")
and sys.argv[0].split(os.path.sep)[-1] == "celery"
and "worker" in sys.argv[1:]
):
from celery.signals import worker_process_init # pylint:disable=E0401
@worker_process_init.connect(weak=False)
else:
initialize()
| [
2,
15069,
383,
4946,
31709,
41935,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
9... | 3.181287 | 342 |
from socket import *
import threading
try:
pairfamily = AF_UNIX
except NameError:
pairfamily = AF_INET
def SocketPair(family=pairfamily, type_=SOCK_STREAM, proto=IPPROTO_IP):
"""Wraps socketpair() to support Windows using local ephemeral ports"""
try:
sock1, sock2 = socketpair(family, type_, proto)
return (sock1, sock2)
except NameError:
listensock = socket(family, type_, proto)
listensock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
listensock.bind( ('localhost', 0) )
iface, ephport = listensock.getsockname()
listensock.listen(1)
sock1 = socket(family, type_, proto)
connthread = threading.Thread(target=pairConnect, args=[sock1, ephport])
connthread.setDaemon(1)
connthread.start()
sock2, sock2addr = listensock.accept()
listensock.close()
return (sock1, sock2)
| [
6738,
17802,
1330,
1635,
198,
11748,
4704,
278,
198,
198,
28311,
25,
198,
220,
220,
220,
5166,
17989,
796,
12341,
62,
4944,
10426,
198,
16341,
6530,
12331,
25,
198,
220,
220,
220,
5166,
17989,
796,
12341,
62,
1268,
2767,
198,
198,
429... | 2.349869 | 383 |
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from rest_framework_jwt.settings import api_settings
from notes.models import PersonalNote
from django.contrib.auth.models import User, Group
# from taggit_serializer.serializers import (TagListSerializerField, TaggitSerializer) | [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
1334,
62,
30604,
13,
12102,
2024,
1330,
30015,
47139,
1352,
198,
6738,
1334,
62,
30604,
62,
73,
46569,
13,
33692,
1330,
40391,
62,
33692,
198,
6738,
4710,
13,
27530,
1330,
15644,
642... | 3.962963 | 81 |
# Copyright 2019-2020 The Kale Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
setup(
name='kubeflow-kale',
version='0.5.1',
description='Convert JupyterNotebooks to Kubeflow Pipelines deployments',
url='https://github.com/kubeflow-kale/kale',
author='Stefano Fioravanzo',
author_email='stefano.fioravanzo@gmail.com',
license='Apache License Version 2.0',
packages=['kale',
'kale.common',
'kale.config',
'kale.marshal',
'kale.processors',
'kale.rpc',
'kale.static_analysis',
],
install_requires=[
'kfp',
'autopep8 >=1.4, <1.5',
'astor >= 0.8.1',
'nbformat >=4.4, <5.0',
'networkx >=2.3, <3.0',
'jinja2 >=2.10, <3.0',
'graphviz >=0.13, <1.0',
'pyflakes >=2.1.1',
'dill >=0.3, <0.4',
'IPython >= 7.6.0',
'jupyter-client >= 5.3.4',
'jupyter-core >= 4.6.0',
'nbconvert >= 5.6.1, < 6.0.0',
'ipykernel >= 5.1.4',
'notebook >= 6.0.0',
'packaging > 20',
'ml_metadata == 0.24.0',
'progress >= 1.5',
],
extras_require={
'dev': [
'pytest',
'pytest-clarity',
'testfixtures',
'pytest-cov',
'flake8',
'flake8-docstrings'
]
},
entry_points={'console_scripts':
['kale=kale.command_line:main',
'kale_server=kale.command_line:server',
'kale-volumes=kale.command_line:kale_volumes']},
python_requires='>=3.6.0',
include_package_data=True,
zip_safe=False
)
| [
2,
220,
15069,
13130,
12,
42334,
383,
509,
1000,
46665,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
2... | 2.019874 | 1,107 |
from io import TextIOWrapper
from typing import Optional, Sequence, Union
from services.external_api.client import RetryConfig
from starkware.starknet.cli.starknet_cli import assert_tx_received, validate_arguments
from starkware.starknet.definitions import constants, fields
from starkware.starknet.public.abi_structs import identifier_manager_from_abi
from starkware.starknet.services.api.contract_class import ContractClass
from starkware.starknet.services.api.gateway.gateway_client import GatewayClient
from starkware.starknet.services.api.gateway.transaction import Deploy
from starkware.starknet.utils.api_utils import cast_to_felts
from starkware.starkware_utils.error_handling import StarkErrorCode
from protostar.commands.deploy.gateway_response import SuccessfulGatewayResponse
from protostar.protostar_exception import ProtostarException
async def deploy(
gateway_url: str,
compiled_contract_file: TextIOWrapper,
constructor_args: Optional[Sequence[Union[str, int]]] = None,
salt: Optional[str] = None,
token: Optional[str] = None,
) -> SuccessfulGatewayResponse:
"""Version of deploy function from starkware.starknet.cli.starknet_cli independent of CLI logic."""
inputs = cast_to_felts(constructor_args or [])
if salt is not None and not salt.startswith("0x"):
raise ValueError(f"salt must start with '0x'. Got: {salt}.")
try:
numeric_salt: int = (
fields.ContractAddressSalt.get_random_value()
if salt is None
else int(salt, 16)
)
except ValueError as err:
raise ValueError("Invalid salt format.") from err
contract_class = ContractClass.loads(data=compiled_contract_file.read())
abi = contract_class.abi
assert abi is not None, "Missing ABI in the given contract class."
for abi_entry in abi:
if abi_entry["type"] == "constructor":
validate_arguments(
inputs=inputs,
abi_entry=abi_entry,
identifier_manager=identifier_manager_from_abi(abi=abi),
)
break
else:
if len(inputs) != 0:
raise ValueError(
"Constructor args cannot be specified for contracts without a constructor."
)
tx = Deploy(
contract_address_salt=numeric_salt,
contract_definition=contract_class,
constructor_calldata=inputs,
version=constants.TRANSACTION_VERSION,
) # type: ignore
gateway_client = GatewayClient(
url=gateway_url, retry_config=RetryConfig(n_retries=1)
)
gateway_response = await gateway_client.add_transaction(tx=tx, token=token)
if gateway_response["code"] != StarkErrorCode.TRANSACTION_RECEIVED.name:
raise DeployContractException(
message=f"Failed to send transaction. Response: {gateway_response}."
)
contract_address = int(gateway_response["address"], 16)
return SuccessfulGatewayResponse(
address=contract_address,
code=gateway_response["code"],
transaction_hash=gateway_response["transaction_hash"],
)
| [
6738,
33245,
1330,
8255,
40,
3913,
430,
2848,
198,
6738,
19720,
1330,
32233,
11,
45835,
11,
4479,
198,
198,
6738,
2594,
13,
22615,
62,
15042,
13,
16366,
1330,
4990,
563,
16934,
198,
6738,
19278,
1574,
13,
301,
668,
3262,
13,
44506,
13... | 2.642918 | 1,179 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-12 04:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
319,
2864,
12,
2919,
12,
1065,
8702,
25,
1507,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738... | 2.791045 | 67 |
import avlpy
# --------------------------------------------------------------------------------------------
# AVL SESSION
# --------------------------------------------------------------------------------------------
# setup avl session
avlSess = avlpy.avlRun('avl3.35','wingus.avl','wingus.mass')
# set cruise condition to 9 m/s
avlSess.set_flight_constraint('C1','V','9')
# set elevator to pitch such that pitch moment is 0 at cruise
avlSess.set_var_constraint('D1','PM',0)
# write flow analysis to default avl_tmp location
fname,proc_out = avlSess.get_flow_analysis('ST')
# read avl flow analysis to dictionary
avl_dict = avlpy.read_avl_flow_analysis(fname)
# print some important constants
print("Alpha: " + str(avl_dict['Alpha']))
print("Elevator Defl.: " + str(avl_dict['elevator']))
print("Neutral Point: " + str(avl_dict['Xnp']))
print("Cma: " + str(avl_dict["Cma"]))
print("Clb: " + str(avl_dict["Clb"]))
print("Cnb: " + str(avl_dict["Cnb"]))
# perform avl dynamic value analysis
fname2,proc_out2 = avlSess.get_eig_analysis('S')
# read state matrices to python arrays
A,B = avlpy.read_avl_sys_mat(fname2)
print("\nDynamic 'A' Matrix: " + str(A))
# --------------------------------------------------------------------------------------------
# READING AVL FILES
# --------------------------------------------------------------------------------------------
surfaces = avlpy.read_avl_file('wingus.avl')
print("\nExample Surfaces File:")
print(surfaces)
# save surfaces to new surfaces.avl file
avlpy.save_avl_file('surfaces.avl',surfaces) | [
11748,
1196,
75,
9078,
198,
2,
16529,
1783,
10541,
198,
2,
14661,
43,
311,
47621,
198,
2,
16529,
1783,
10541,
198,
2,
9058,
1196,
75,
6246,
198,
615,
75,
50,
408,
796,
1196,
75,
9078,
13,
615,
75,
10987,
10786,
615,
75,
18,
13,
... | 3.186475 | 488 |
import logging
import os
import asyncio
from html import escape
from aiogram import Bot, Dispatcher, executor, types
BOT_TOKEN: str = os.getenv("BOT_TOKEN")
MSG_LENGTH_LIMIT = 2 ** 12
SANDBOX_USER = 'bot'
# Configure logging
logging.basicConfig(level=logging.INFO)
# Initialize bot and dispatcher3
bot = Bot(token=BOT_TOKEN, parse_mode="HTML")
dp = Dispatcher(bot)
COMMANDS = ['sed', 'grep', 'cut', 'tr', 'tail', 'head', 'uniq', 'sort', 'awk']
async def run_in_container(cmd: str, stdin: str) -> (str, str, int):
"""
Run program in container.
Returns stdout, stderr and exit_code.
"""
proc = await asyncio.create_subprocess_exec("su", SANDBOX_USER, "-c", f"/usr/src/app/sandbox.sh {cmd}",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
stdin=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate(stdin.encode("utf-8"))
return stdout.decode('utf-8', errors='ignore'), stderr.decode('utf-8', errors='ignore'), 0 # TODO: exit code
@dp.message_handler(regexp=f'^({"|".join(COMMANDS)})')
@dp.message_handler(commands=['start', 'help'])
async def send_welcome(message: types.Message):
"""
This is a handler for `/help` and `/start` commands
:param message:
:return:
"""
await message.reply("""Hi!
I am stream editor bot. I can evaluate best Unix stream processing utilities in chat.
Just add me in your group and learn how to use Unix stream editors.
<b>Usage:</b>
<i>command args</i>, where command is one of my supported commands.
Reply on any message to use it as command input.
Now I support: """ + ', '.join(COMMANDS))
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True)
| [
11748,
18931,
198,
11748,
28686,
198,
11748,
30351,
952,
198,
6738,
27711,
1330,
6654,
198,
6738,
257,
72,
21857,
1330,
18579,
11,
3167,
8071,
2044,
11,
3121,
273,
11,
3858,
198,
198,
33,
2394,
62,
10468,
43959,
25,
965,
796,
28686,
1... | 2.369032 | 775 |
"""Simple script to play sounds.
Notes
-----
Having trouble playing sounds correctly in debian so far. It seems that
Psychopy sound recommentations has changed. I need to have a closer look:
<https://www.psychopy.org/api/sound.html>
"""
import os
import psychtoolbox as ptb
from psychopy import prefs
prefs.hardware['audioLib'] = ['PTB']
from psychopy import core, sound, event
path_in = "/home/faruk/Git/minimalist_psychopy_examples/future/test"
# Print the sound files
sounds = sorted(os.listdir(path_in))
# Play sounds one by one
for i in sounds:
sound_i = os.path.join(path_in, i)
test_sound = sound.Sound(sound_i, volume=1, sampleRate=44100)
now = ptb.GetSecs()
test_sound.play()
print(i)
core.wait(2)
core.quit()
print("Finished.")
| [
37811,
26437,
4226,
284,
711,
5238,
13,
198,
198,
16130,
198,
30934,
198,
14698,
5876,
2712,
5238,
9380,
287,
50001,
523,
1290,
13,
632,
2331,
326,
198,
31923,
11081,
2128,
664,
296,
434,
602,
468,
3421,
13,
314,
761,
284,
423,
257,
... | 2.806569 | 274 |
#!/usr/bin/env python
#http://geoinformaticstutorial.blogspot.it/2012/09/reading-raster-data-with-python-and-gdal.html
#http://www.gis.usu.edu/~chrisg/python/2009/lectures/ospy_slides4.pdf
from osgeo import gdal,ogr
from osgeo.gdalconst import *
import struct
import sys
lon = 12.502742
lat = 42.243713
lat = float(sys.argv[2])
lon = float(sys.argv[3])
ds = gdal.Open(sys.argv[1], GA_ReadOnly)
if ds is None:
print 'Failed open file'
sys.exit(1)
transf = ds.GetGeoTransform()
cols = ds.RasterXSize
rows = ds.RasterYSize
bands = ds.RasterCount #1
band = ds.GetRasterBand(1)
bandtype = gdal.GetDataTypeName(band.DataType) #Int16
driver = ds.GetDriver().LongName #'GeoTIFF'
success, transfInv = gdal.InvGeoTransform(transf)
if not success:
print "Failed InvGeoTransform()"
sys.exit(1)
px, py = gdal.ApplyGeoTransform(transfInv, lon, lat)
structval = band.ReadRaster(int(px), int(py), 1,1, buf_type = band.DataType )
fmt = pt2fmt(band.DataType)
intval = struct.unpack(fmt , structval)
print round(intval[0],2) #intval is a tuple, length=1 as we only asked for 1 pixel value
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
4023,
1378,
469,
36743,
687,
1512,
301,
44917,
13,
35217,
13,
270,
14,
6999,
14,
2931,
14,
25782,
12,
81,
1603,
12,
7890,
12,
4480,
12,
29412,
12,
392,
12,
21287,
282,
13,
... | 2.4102 | 451 |
# -*- coding: utf-8 -*-
"""
INTRO
@author: Yi Zhang. Created on Thu May 23 11:07:25 2019
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft
Delft, Netherlands
"""
import numpy as np
from screws.freeze.main import FrozenOnly
class CTEXTTBase(FrozenOnly):
"""
Parent of CoordinateTransformationTrace3D.
In Trace, the very import difference is that we take NOT mesh-grid inputs
to evaluate the mapping and so on. This is because we have different faces
(3D) or edges (2D) for the trace mapping. If we use the mesh-grid points
as we did in CoordinateTransformation, then, for example, in 3D case, we
needs 6 inputs of shape = (2,), at least, which is not a smart way.
"""
def __init__(self, ct):
""" """
self._ct_ = ct
# self._mapping_ = None
# self._Jacobian_matrix_ = None
# self._metric_matrix_ = None
self._freeze_self_()
# def _reset_(self):
# self._mapping_ = None
# self._Jacobian_matrix_ = None
# self._metric_matrix_ = None
@property
@property
def ndim(self):
"""
this is a trace mapping is in n dimensonal object; itself is a n-1 dimensional
one.
"""
return self._ct_.ndim
def ___generate_trace_evaluation_points___(self):
"""
When even we try to compute the trace mapping or trace Jacobian_matrix,
we run this method before hands to generate proper points (in reference
coordinates) for 4 edges(2D) or 6 sides(3D).
This looks very bad, since if we have done trace mapping, when we
further compute trace Jacobian, we will repeat it again, why not just
store it? No, we do not do this, because we always search
evaluation_points_gird from `self._ct_`, and when we reset
evaluation_points_gird, we will have to reset the stored value as well.
Of course, this is doable, but I think it makes the code a little bit
more un-readable. And what is more, this process is very fast, so, who
cares if we run it one more time?
"""
raise Exception(" <CoordinateTransformation.Trace> : To be overwritten.")
@property
def mapping(self):
"""
The mapping. To compute it we just need to employ the
CoordinateTransformation.
Returns
-------
self._mapping_ : dict
Unlike the CoordinateTransformation.mapping which must be of
structured data sturcture (so we put it in a ndarray), we here put
it in a dict just like what we have in meshComponents.trace.
"""
raise Exception(" <CoordinateTransformation.Trace> : To be overwritten.")
@property
def Jacobian_matrix(self):
"""
The Jacobian matrix. To compute it we just need to employ the
CoordinateTransformation.
Returns
-------
self._Jacobian_matrix_ : dict
As self.mapping, here we also put it in a dict whose keys represent
the numbering of the trace element. Just like what we always have
in meshComponents.trace.
"""
raise Exception(" <CoordinateTransformation.Trace> : To be overwritten.")
@property
def metric_matrix(self):
""" The entries of metric_matrix is normally denoted as g_{i,j}. """
# if self._metric_matrix_ is None:
# J = self.Jacobian_matrix
# G = {}
# for k in self._mesh_.trace.elements.position_representive:
# Gk = [[None for i in range(self.ndim)] for j in range(self.ndim)]
# for i in range(self.ndim):
# for j in range(i, self.ndim):
# Gk[i][j] = J[k][0][i] * J[k][0][j]
# for l in range(1, self._ct_.ndim):
# Gk[i][j] += J[k][l][i] * J[k][l][j]
# if i != j:
# Gk[j][i] = Gk[i][j]
# G[k] = np.array(Gk)
# self._metric_matrix_ = G
# return self._metric_matrix_
J = self.Jacobian_matrix
G = {}
for k in self._mesh_.trace.elements.position_representive:
Gk = [[None for _ in range(self.ndim-1)] for _ in range(self.ndim-1)]
for i in range(self.ndim-1):
for j in range(i, self.ndim-1):
Gk[i][j] = J[k][0][i] * J[k][0][j]
for l in range(1, self.ndim):
Gk[i][j] += J[k][l][i] * J[k][l][j]
if i != j:
Gk[j][i] = Gk[i][j]
G[k] = np.array(Gk)
return G
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
1268,
5446,
46,
198,
198,
31,
9800,
25,
26463,
19439,
13,
15622,
319,
26223,
1737,
2242,
1367,
25,
2998,
25,
1495,
13130,
198,
220,
220,
220,
220,
220,
220... | 2.102564 | 2,301 |
"""
Tests sklearn matrix decomposition converters
"""
import unittest
import warnings
import sys
from distutils.version import LooseVersion
import numpy as np
import torch
import sklearn
from sklearn.decomposition import FastICA, KernelPCA, PCA, TruncatedSVD
from sklearn.cross_decomposition import PLSRegression as PLSR
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
import hummingbird.ml
# PLS regressor n_componenets two
# PLS regressor n_componenets two no scale
if __name__ == "__main__":
unittest.main()
| [
37811,
198,
51,
3558,
1341,
35720,
17593,
26969,
9150,
6718,
1010,
198,
37811,
198,
11748,
555,
715,
395,
198,
11748,
14601,
198,
11748,
25064,
198,
6738,
1233,
26791,
13,
9641,
1330,
6706,
577,
14815,
198,
198,
11748,
299,
32152,
355,
... | 3.19337 | 181 |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 2 13:11:39 2018
@author: 13383861
"""
import sys
import enum
sys.path.append('.')
sys.path.append('..')
import requests
import os
import time
from collections import namedtuple
import copy
import random
import typing
import functools
import json
import threading
import pathlib
import AirSimInterface.client as airsim
from AirSimInterface.types import *
import numpy as np
#%%
#%%
class UE4Coord:
'''A coordinate which represents an objects location in an unreal engine environment'''
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 10, 6)
assert set(test_grid.get_neighbors(UE4Coord(2,2), 1.9)) == set([UE4Coord(1,2), UE4Coord(2,1), UE4Coord(2,3), UE4Coord(3,2), UE4Coord(3,3), UE4Coord(1,3), UE4Coord(1,1), UE4Coord(3,1)])
sensor_reading = lambda image_loc: get_highest_pred(get_image_response(image_loc))
#assert get_highest_pred(get_image_response('C:/Users/13383861/Downloads/test_train.jpg'))[0] > 0.6
#test
#an agent precept consists of a grid location, a detection probability, a timestep, a timestamp and the observer name
AgentObservation = namedtuple('obs_location', ['grid_loc','probability','timestep', 'timestamp', 'observer_name'])
#A belief map component consists of a grid location and a likelihood
BeliefMapComponent = namedtuple('belief_map_component', ['grid_loc','likelihood'])
#%%
#Code and test for class which manages agent observations in a set grid
class AgentObservations():
'''A class which records agent observations in a UE4Grid'''
def get_most_recent_observation(self, observations = []):
'''Returns the most recent observation in a list of observations'''
if not observations:
observations = self.observations
return sorted(observations, key = lambda observation: observation.timestamp, reverse = True)[0]
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 10, 6)
test_agent_observations = AgentObservations(test_grid)
obs1 = AgentObservation(UE4Coord(0,0),0.5, 1, 1234, 'agent1')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent1')
obs3 = AgentObservation(UE4Coord(0,1),0.9, 3, 1237, 'agent1')
test_agent_observations.record_agent_observation(obs1)
test_agent_observations.record_agent_observation(obs2)
test_agent_observations.record_agent_observation(obs3)
assert test_agent_observations.get_most_recent_observation() == obs3
assert test_agent_observations.get_most_recent_observation_at_position(UE4Coord(0,0)) == obs2
assert test_agent_observations.get_all_observations_at_position(UE4Coord(0,1)) == [obs3]
calc_posterior = lambda observation, prior: (prior * observation) / ((prior * observation) + (1-prior)*(1-observation))
assert abs(calc_posterior(0.5, 0.2) - 0.2) <= 0.001
assert abs(calc_posterior(0.8, 0.2) - 0.5) <= 0.001
#%%
#Calculation of posterior given prior and observations
def get_posterior_given_obs(observations:list, prior):
'''For a sequence of observations calculates the posterior probability given a prior.'''
for observation in observations:
prior = calc_posterior(observation, prior)
return prior
assert abs(get_posterior_given_obs([0.5,0.2,0.8], 0.5) - 0.5) <= 0.001
#%%
####################### Belief map and tests #######################
#A belief map has an agent name (beliefs belong to an agent) consists of belief map components
#Leave this as namedtuple if don't need to define methods
#maybe this should go in contsructor and make regular class
def create_belief_map(grid, agent_name, prior = {}):
'''Creates an occupancy belief map for a given observer and a set of grid locations.
Prior is a mapping of grid_points to probabilities'''
if not prior:
#use uniform uninformative prior
prior = {grid_point: 1/len(grid.get_grid_points()) for grid_point in grid.get_grid_points()}
return BeliefMap(agent_name, grid, [BeliefMapComponent(grid_point, prior[grid_point]) for grid_point in grid.get_grid_points()], prior)
#return {grid_locs[i]: ObsLocation(grid_locs[i],prior[i], 0, time.time(), observer_name) for i in range(len(grid_locs))}
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 10, 6)
test_map = create_belief_map(test_grid, "agent1")
assert test_map.get_belief_map_component(UE4Coord(0,0)) == BeliefMapComponent(UE4Coord(0,0), 1/len(test_grid.get_grid_points()))
assert test_map._get_observation_grid_index(UE4Coord(0,0)) == 5
test_map.update_from_prob(UE4Coord(0,0), 0.9)
assert 0.132<test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.133
#prove order in which observations come in doesn't matter
obs1 = AgentObservation(UE4Coord(0,0),0.4, 1, 1234, 'agent1')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent1')
obs3 = AgentObservation(UE4Coord(0,0),0.93, 3, 1237, 'agent1')
test_map = create_belief_map(test_grid, "agent1")
test_map.update_from_observation(obs1)
assert 0.0111 < test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.0112
test_map.update_from_observation(obs2)
assert 0.025688 < test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.0256881
test_map.update_from_observation(obs3)
assert 0.2594 < test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.2595
#now check observing in a different order gives same result
test_map = create_belief_map(test_grid, "agent1")
test_map.update_from_observation(obs2)
test_map.update_from_observation(obs1)
test_map.update_from_observation(obs3)
assert 0.2594 < test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.2595
#now check observing in a different order gives same result
test_map = create_belief_map(test_grid, "agent1")
test_map.update_from_observation(obs3)
test_map.update_from_observation(obs2)
test_map.update_from_observation(obs1)
assert 0.2594 < test_map.get_belief_map_component(UE4Coord(0,0)).likelihood < 0.2595
####################### Belief map and tests #######################
#%%
####################### Observation Set Manager and tests #######################
class ObservationSetManager:
'''
Manages the sensor measurements of other agents. Observations don't have to be taken at disrete locations -
the continuous position can be recorded and the grid location inferred from this.
Calculating a belief map from these sets of observations requires a grid so that each recorded observation can
be
'''
#really strange behaviour: using this initialises the class with observations that don't exist... self.observation_sets[rav_name] = set()
def init_rav_observation_set(self, rav_name, observations = None):
'''initialise a new list of observations for a RAV'''
if not observations:
self.observation_sets[rav_name] = set()
else:
self.observation_sets[rav_name] = observations
#self.observation_sets[rav_name] = observations
def get_observation_set(self, rav_name) -> typing.Set[AgentObservation]:
'''Get list of observations from a RAV'''
return self.observation_sets[rav_name]
def update_from_other_obs_list_man(self, other):
'''Might need to check that the timestamps must be different...'''
for rav_name, observation_set in other.observation_sets.items():
self.update_rav_obs_set(rav_name, observation_set)
def get_discrete_belief_map_from_observations(self, grid):
'''Given a descrete grid, returns a belief map containing the likelihood of the source
being contained in each grid segment'''
#ToDo:
#Currently observations must be made at grid locations - instead compute which observations are made
#in each grid location and then compute the belief map
return_belief_map = create_belief_map(grid, self.agent_name)
return_belief_map.update_from_observations(self.get_all_observations())
return return_belief_map
def get_continuous_belief_map_from_observations(self, grid_bounds):
'''Given grid bounds, returns a function which returns the likelihood given the
continuous position of the RAV. I.E. transform the discrete PDF as above to a
continuous one.'''
pass
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 6, 5)
test_ObservationSetManager = ObservationSetManager('agent1')
test_ObservationSetManager.observation_sets
obs1 = AgentObservation(UE4Coord(0,0),0.5, 1, 1234, 'agent2')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent2')
obs3 = AgentObservation(UE4Coord(0,1),0.95, 3, 1237, 'agent2')
obs4 = AgentObservation(UE4Coord(0,1),0.9, 3, 1238, 'agent1')
test_ObservationSetManager.init_rav_observation_set('agent2', set([obs1, obs2]))
test_ObservationSetManager.observation_sets
test_ObservationSetManager.update_rav_obs_set('agent2', set([obs3]))
test_ObservationSetManager.get_all_observations()
assert test_ObservationSetManager.get_observation_set('agent2') == set([obs1, obs2, obs3])
assert test_ObservationSetManager.get_observation_set('agent1') == set([])
test_ObservationSetManager.update_rav_obs_set('agent1', set([obs4]))
assert not test_ObservationSetManager.get_all_observations().difference(set([obs1, obs2, obs3, obs4]))
###################################################
# Check that duplicate observations aren't added
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 6, 5)
test1_ObservationSetManager = ObservationSetManager('agent1')
obs1 = AgentObservation(UE4Coord(0,0),0.5, 1, 1234, 'agent2')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent2')
obs3 = AgentObservation(UE4Coord(0,1),0.95, 3, 1237, 'agent2')
test1_ObservationSetManager.update_rav_obs_set('agent2',[obs1, obs2, obs3])
test1_ObservationSetManager.observation_sets
#test that duplicate measurements won't occur
obs4 = AgentObservation(UE4Coord(0,1),0.95, 3, 1237, 'agent2')
test1_ObservationSetManager.update_rav_obs_set('agent2', set([obs4]))
assert test1_ObservationSetManager.get_observation_set('agent2') == set([obs1, obs2, obs3])
assert abs(test1_ObservationSetManager.get_discrete_belief_map_from_observations(test_grid).get_belief_map_component(UE4Coord(0,0)).likelihood - 0.074468) < 0.0001
assert abs(test1_ObservationSetManager.get_discrete_belief_map_from_observations(test_grid).get_belief_map_component(UE4Coord(0,1)).likelihood - 0.395833) < 0.0001
#%%
######################### Action selection strategies #########################
def get_move_from_belief_map_epsilon_greedy(belief_map: BeliefMap, current_grid_loc: UE4Coord, epsilon: float, eff_radius = None) -> UE4Coord:
'''Epsilon greedy move selection'''
#assume grid is regular, get all neighbors that are within max(lat_spacing, long_spacing)7
#assuming that lat_spacing < 2* lng_spacing and visa versa
if not eff_radius:
eff_radius = max(belief_map.get_grid().get_lat_spacing(), belief_map.get_grid().get_lng_spacing())
#a list of UE4Coord
neighbors = belief_map.get_grid().get_neighbors(current_grid_loc, eff_radius)
#don't move to new position if can't find any neighbors to move to
if not neighbors:
return current_grid_loc
#neighbors = list(filter(lambda grid_loc: grid_loc.get_dist_to_other(current_grid_loc) <= eff_radius and grid_loc!=current_grid_loc, bel_map.keys()))
if random.random() < epsilon:
#epsilon random
return_move = random.choice(neighbors)
else:
#otherwise choose move that has highest value
max_move_value = 0
for neighbor in neighbors:
if belief_map.get_belief_map_component(neighbor).likelihood > max_move_value:
max_move_value = belief_map.get_belief_map_component(neighbor).likelihood
return_move = neighbor
# move = max(map(lambda neighbor: bel_map[neighbor].likelihood, neighbors))
return return_move
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 6, 5)
obs1 = AgentObservation(UE4Coord(0,0),0.5, 1, 1234, 'agent2')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent2')
obs3 = AgentObservation(UE4Coord(0,1),0.95, 3, 1237, 'agent2')
#(grid, agent_name, prior = {})
obs_man = ObservationSetManager("agent1")
obs_man.update_rav_obs_set('agent2', [obs1, obs2, obs3])
belief_map = obs_man.get_discrete_belief_map_from_observations(test_grid)
assert get_move_from_belief_map_epsilon_greedy(belief_map, UE4Coord(1,1), 0.0, 1.8) == UE4Coord(0,1)
#%%
#everything that could be important for measuring agent performance/progress
AgentAnalysisState = namedtuple('AgentAnalysisState', ['timestep',
'timestamp',
'rav_name',
'position_intended',
'position_measured',
#maybe add distance travelled for current timestep
'total_dist_travelled',
'remaining_batt_cap',
'prop_battery_cap_used',
'sensor_reading',
#is it necessary to record the grid along with the likelihoods in case want the grid to
#dynamically change? For now assume grid is fixed and in 1-1 correspondance with likelihoods
#'occ_grid_likelihoods',
#which other agents did the agent coordinate with on this timestep
'coordinated_with_other_names'])
#metadata related to the agent - details about grid its operating in, prior that was worked with, to be updated...
AgentAnalysisMetadata= namedtuple("MissionAnalysisData", ["agents_used", "grid_origin", 'grid_lat_spacing',
'grid_lng_spacing','lng_lim', 'lat_lim',
'no_lat_points', 'no_lng_points', 'prior'])
def get_agent_state_for_analysis(agent_analysis_state: AgentAnalysisState):
'''Returns elements of agent state that are important for analysis that can be written to csv. Position, battery cap., total_dist_travelled, battery_consumed, occ_grid'''
#csv_headers = ['timestep', 'timestamp', 'rav_name', 'position_intended', 'position_measured', 'total_dist_travelled', 'remaining_batt_cap', 'prop_battery_cap_used', 'sensor_reading', 'occ_grid_locs', 'occ_grid_likelihoods', 'coordinated_with_other_bool', 'coordinated_with_other_names']
#return str(agent_analysis_state._fields).replace(')','').replace('(','').replace("'", '')
return _get_agent_state_for_analysis(**agent_analysis_state._asdict())
def get_agent_observation(agent_observation: AgentObservation):
'''Returns elements of agent state that are important for analysis that can be written to csv. Position, battery cap., total_dist_travelled, battery_consumed, occ_grid'''
#csv_headers = ['timestep', 'timestamp', 'rav_name', 'position_intended', 'position_measured', 'total_dist_travelled', 'remaining_batt_cap', 'prop_battery_cap_used', 'sensor_reading', 'occ_grid_locs', 'occ_grid_likelihoods', 'coordinated_with_other_bool', 'coordinated_with_other_names']
#return str(agent_analysis_state._fields).replace(')','').replace('(','').replace("'", '')
return _get_agent_observation(**agent_observation._asdict())
#AgentObservation = namedtuple('obs_location', ['grid_loc','probability','timestep', 'timestamp', 'observer_name'])
testAgentAnalysisState = AgentAnalysisState(2, 100, 'test', 'test', 'test', 'test', 'test', 'test', 'test', 'test')
testAgentAnalysisState._asdict()
assert get_agent_state_for_analysis(testAgentAnalysisState) == "2,100,test,test,test,test,test,test,test,test"
assert calc_likelihood([0.1,0.1,0.2,0.4]) == 0.1*0.1*0.2*0.4
def create_belief_map_from_observations(grid: UE4Grid, agent_name: str, agent_belief_map_prior: typing.Dict[UE4Coord, float], agent_observations: typing.Set[AgentObservations]):
'''Since the calculation of posterior likelihood is based only on prior and observations (independent of order), updating a belief map component from measurements can be done
by the following update formula:
prior * product(over all i observations) observation_i
----------------------------------------------------------------------------------------------------------------------
prior * product(over all i observations) observation_i + (1-prior) * product(over all i observations) (1-observation_i)
'''
return_bel_map = create_belief_map(grid.get_grid(), agent_name, agent_belief_map_prior)
#update belief map based on all observations...
return_bel_map.update_from_observations(agent_observations)
#grid, agent_name, prior = {}
#update_bel_map(update_bel_map(test_map, 0.5, 3), 0.5,3)
class BaseROCSAFEAgent:
'''Base class for all agents related to the ROCSAFE project, contains minimal functionality. Designed with goal main in mind
to be able to compare and measure agent performance in a consistent way'''
pass
class BaseGridAgent:
'''Base class for all agents that use a grid representation of the environment, contains minimal functionality. Designed with goal main in mind
to be able to compare and measure agent performance in a consistent way'''
#create a base agent class
class OccupancyGridAgent():
'''agent that moves around an occupancy grid in order to locate a source of radiation. Uses a rav agent'''
ImageDir = 'D:/ReinforcementLearning/DetectSourceAgent/Data/SensorData'
#stores analysis csvs. Each csv contains agent state at each timestep
AgentStateDir = "D:/ReinforcementLearning/DetectSourceAgent/Analysis"
#stores observation json
ObservationDir = "D:/ReinforcementLearning/DetectSourceAgent/Observations"
MockedImageDir = 'D:/ReinforcementLearning/DetectSource/Data/MockData'
#break apart this into components, one which manages actuation/sensing, one which manages/represents state, etc.
def __eq__(self, other):
'''This agent is the same as another agent if names are the same. Refine this later'''
return self.agent_name == other.agent_name
def get_available_actions(self, state):
'''Returns actions available to RAV based on its current state'''
pass
def get_belief_map_after_t_timesteps(self, t):
'''Calculates what the agent's belief map would be after t timesteps'''
pass
def get_agent_state_for_analysis(self):
'''AgentAnalysisState = namedtuple('AgentAnalysisState', ['timestep','timestamp','rav_name',
'position_intended','position_measured',
'total_dist_travelled','remaining_batt_cap',
'prop_battery_cap_used',
'sensor_reading',
#which other agents did the agent coordinate with on this timestep
'coordinated_with_other_names'])'''
return get_agent_state_for_analysis(AgentAnalysisState(self.timestep, time.time(), self.get_agent_name(),
self.current_pos_intended, self.current_pos_measured,
self.total_dist_travelled, self.rav.getRemainingBatteryCap(),
self.prop_battery_cap_used,
self.current_reading,
#'[' + ','.join(map(lambda loc: loc.likelihood, self.current_belief_map.get_belief_map_components())) + ']',
#self.get_grid_locs_likelihoods_lists()[1],
self.others_coordinated_this_timestep))
#coordination strategy:
#agent will write all measurements in its possession to a file at each timestep. When communication requested,
#other agent will read all measurements from the file.
def coord_with_other(self, other_rav_name):
'''coordinate with other rav by requesting their measurement list and sending our own measurement list first write own measurement list to file'''
if self.can_coord_with_other(other_rav_name):
observations_from_other_agents = self._read_observations(self)
print('read observations from other agents: {}'.format(observations_from_other_agents))
for observations_from_other_agent in observations_from_other_agents.values():
self.observation_manager.update_rav_obs_set(observations_from_other_agent)
#this only updates observations not seen previously since a set is maintained of all seen observations
self.current_belief_map.update_from_observations(self.observation_manager.get_all_observations())
self.others_coordinated_this_timestep.append(other_rav_name)
self.coordinated_this_timestep = True
def _write_observations(self, file_loc):
'''writes agent measurements to file to be read by other agent'''
with open(file_loc, 'a') as f:
json.dump(str(self.observation_manager.observation_sets), f)
def explore_timestep(self):
'''Gets rav to explore next timestep'''
#grid: UE4Grid, agent_name: str, agent_belief_map_prior: typing.Dict[UE4Coord, float], agent_observations: typing.List[AgentObservations]
next_pos = self.move_from_bel_map_callable(self.current_belief_map, self.current_pos_intended, self.epsilon)
print("self.current_pos_intended: {}".format(self.current_pos_intended ))
self.move_agent(next_pos)
self.current_pos_intended = next_pos
self.current_pos_measured = self.rav.getMultirotorState(vehicle_name = self.agent_name).kinematics_estimated.position
self.update_agent_pos_measured()
#record image at location
self.record_image()
#get sensor reading, can be done on separate thread
print('getting sensor reading for {}'.format(OccupancyGridAgent.ImageDir + "/photo_" + str(self.timestep) + '.png'))
self.current_reading = float(sensor_reading(OccupancyGridAgent.ImageDir + "/photo_" + str(self.timestep) + '.png')[0])
#mocked sensor reading
#self.current_reading = float(sensor_reading("D:/ReinforcementLearning/DetectSourceAgent/Data/MockData/test_train.jpg")[0])
print('sensro reading: {}'.format(self.current_reading))
print("updating belief map position {} from {}".format(self.current_pos_intended, self.current_belief_map.get_belief_map_component(self.current_pos_intended)))
self.current_belief_map.update_from_prob(self.current_pos_intended, self.current_reading)
print(" to {}".format(self.current_belief_map.get_belief_map_component(self.current_pos_intended)))
#['grid_loc','probability','timestep', 'timestamp', 'observer_name'])
newest_observation = AgentObservation(self.current_pos_intended, self.current_reading, self.timestep, time.time(), self.agent_name)
self.observation_manager.update_rav_obs_set(self.agent_name, [AgentObservation(self.current_pos_intended, self.current_reading, self.timestep, time.time(), self.agent_name)])
#self._write_observations(self.observations_file_loc)
self.update_state_for_analysis_file(self.agent_state_file_loc, self.get_agent_state_for_analysis())
print("Observation made: {}".format(newest_observation))
self.update_observations_file(self.observations_file_loc, newest_observation)
#if agent is in range, communicate
if __name__ != '__main__':
grid = UE4Grid(20, 15, UE4Coord(0,0), 120, 150)
#grid, move_from_bel_map_callable, height, epsilon, multirotor_client, agent_name, performance_csv_path: "file path that agent can write performance to", prior = []
occupancy_grid_agent = OccupancyGridAgent(grid, get_move_from_belief_map_epsilon_greedy, -12, 0.2, MockRavForTesting(), 'agent1')
#write some tests for agent here
occupancy_grid_agent.current_pos_intended = UE4Coord(0,0)
occupancy_grid_agent.current_pos_measured = None
occupancy_grid_agent.current_reading = 0.1
occupancy_grid_agent.get_agent_state_for_analysis()
occupancy_grid_agent.explore_timestep()
##################### Functions that can deal with the initialization of RAVs ####################
#%%
#%%
if __name__ == '__main__':
grid = UE4Grid(20, 15, UE4Coord(0,0), 120, 150)
rav_names = ["Drone1"]
#, "Drone2"]
client = airsim.MultirotorClient()
for rav_name in rav_names:
create_rav(client, rav_name)
#assert client.getVehiclesInRange("Drone1", ["Drone2"],1000000) == ["Drone2"]
#print('vehicles in range: ', client.getVehiclesInRange("Drone1", ["Drone2"] ,1000000))
#rav1.simShowPawnPath(False, 1200, 20)
#grid shared between rav
#grid, move_from_bel_map_callable, height, epsilon, multirotor_client, agent_name, performance_csv_path: "file path that agent can write performance to", prior = []
#for grid_coord_index in range(1,len(grid.get_grid_points())):
# client.showPlannedWaypoints(grid.get_grid_points()[grid_coord_index-1].x_val,
# grid.get_grid_points()[grid_coord_index-1].y_val,
# grid.get_grid_points()[grid_coord_index-1].z_val,
# grid.get_grid_points()[grid_coord_index].x_val,
# grid.get_grid_points()[grid_coord_index].y_val,
# grid.get_grid_points()[grid_coord_index].z_val,
# lifetime = 200)
occupancy_grid_agent1 = OccupancyGridAgent(grid, UE4Coord(0,0), get_move_from_belief_map_epsilon_greedy, -12, 0.3, client, "Drone1")
occupancy_grid_agent1.explore_t_timesteps(20)
#occupancy_grid_agent2 = OccupancyGridAgent(grid, UE4Coord(20,15),get_move_from_belief_map_epsilon_greedy, -12, 0.3, client, "Drone2")
#occupancy_grid_agent1.explore_t_timesteps(10)
#p1 = threading.Thread(target = run_t_timesteps, args = (occupancy_grid_agent1,))
#p2 = threading.Thread(target = run_t_timesteps, args = (occupancy_grid_agent2,))
#p1.start()
#p2.start()
#p1.join()
#p2.join()
# showPlannedWaypoints(self, x1, y1, z1, x2, y2, z2, thickness=50, lifetime=10, debug_line_color='red', vehicle_name = '')
destroy_rav(client, "Drone1")
#destroy_rav(client, "Drone2")
#for grid_loc in grid_locs:
##rav.moveOnPathAsync(list(map(lambda x: x.to_vector3r(),grid_locs)), 8)
#rav.moveToPositionAsync(0,0, -20, 5).join()
#print('rav position: {}'.format(rav.getMultirotorState().kinematics_estimated.position))
#responses = rav.simGetImages([ImageRequest("3", ImageType.Scene)])
#response = responses[0]
#filename = OccupancyGridAgent.ImageDir + "/photo_" + str(1)
#airsim.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8)
# grid, move_from_bel_map_callable, height, epsilon, multirotor_client, prior = []
#pos, likelihood = OccupancyGridAgent(grid, get_move_from_bel_map, -12, 0.3, rav, "Drone1").explore_t_timesteps(125)
#print('determined {} as source with likelihood {}'.format(pos, likelihood))
#rav.moveToPositionAsync(pos.x_val, pos.y_val, -5, 3).join()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
5267,
220,
362,
1511,
25,
1157,
25,
2670,
2864,
198,
198,
31,
9800,
25,
1511,
2548,
2548,
5333,
198,
37811,
198,
198,
11748,
25064,
198,
... | 2.433273 | 11,547 |