content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import requests
from PIL import Image
header = {'Authorization': 'Basic cmVwZWF0OnN3aXRjaA==', }
response = requests.get('http://www.pythonchallenge.com/pc/ring/yankeedoodle.csv', headers=header)
with open('yankeedoodle.csv', 'wb') as f:
f.write(response.content)
with open('yankeedoodle.csv', 'r') as f:
data = f.read().replace('\n', '').split(',')
num_data = len(data)
print('total:', num_data, '=', end=' ')
prime_factor(num_data) # 7367 = 53 * 139
img = Image.new('F', (139, 53))
for ind in range(num_data):
img.putpixel((ind // img.height, ind % img.height), 256 * float(data[ind]))
img.show() # n=str(x[i])[5]+str(x[i+1])[5]+str(x[i+2])[6]
x = [f'{float(s):.5f}' for s in data]
encoded_str = [str(x[i])[5] + str(x[i + 1])[5] + str(x[i + 2])[6] for i in range(0, num_data // 3 * 3, 3)]
print(encoded_str)
print(bytes(int(i) for i in encoded_str))
# 'So, you found the hidden message.\nThere is lots of room here for a long message, but we only need
# very little space to say "look at grandpa", so the rest is just garbage. \nVTZ.l
| [
11748,
7007,
198,
6738,
350,
4146,
1330,
7412,
198,
198,
25677,
796,
1391,
6,
13838,
1634,
10354,
705,
26416,
12067,
53,
86,
57,
48397,
15,
2202,
45,
18,
64,
55,
49,
6592,
32,
855,
3256,
1782,
198,
26209,
796,
7007,
13,
1136,
10786,... | 2.431193 | 436 |
import sys
import json
import urllib
import logging
import requests
import argparse
from urlparse import urljoin
from bs4 import BeautifulSoup
# parse arguments
parser = argparse.ArgumentParser(prog="npmjs_dependents", description="Parse arguments")
parser.add_argument("name", help="Name of the package to query dependents")
parser.add_argument("-o", "--outfile", help="Path to the output file for storing dependents info")
args = parser.parse_args(sys.argv[1:])
name = args.name
outfile = args.outfile
# deprecated
# breath-first search for dependents
dependents = set()
queue = [name]
while queue:
vertex = queue.pop(0)
if vertex not in dependents:
dependents.add(vertex)
queue.extend(set(get_dependents_html(vertex)) - dependents)
dependents -= {name}
# post-processing
print("there are %d dependents for package name %s" % (len(dependents), name))
if outfile:
json.dump(list(dependents), open(outfile, 'w'), indent=2)
| [
11748,
25064,
198,
11748,
33918,
198,
11748,
2956,
297,
571,
198,
11748,
18931,
198,
11748,
7007,
198,
11748,
1822,
29572,
198,
6738,
19016,
29572,
1330,
19016,
22179,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
628,
198,
2,
21136,
... | 3.063694 | 314 |
# v1: Com Radix Sort LSD (A2) (Lado Direito -> Lado Esquerdo) Atua Sobre Digitos e Nao Bits (Para inteiros) |
# Ex.: 1999 >>> 1 <- 9 <- 9 <- 9
# *** VERSAO RELATORIO *** | Tabela 2 e 3
# #### BIBLIOTECAS ####
import sys
import time
import msvcrt
from io import StringIO
# #### CONSTANTES ####
CMD_IN_GLOBAL = "PESQ_GLOBAL\n"
CMD_IN_UTILIZADORES = "PESQ_UTILIZADORES\n"
CMD_IN_TERMINADO = "TCHAU\n"
CMD_IN_TERMINADO2 = "TCHAU"
CMD_IN_PALAVRAS = "PALAVRAS\n"
CMD_IN_FIM = "FIM.\n"
CMD_OUT_GUARDADO = "GUARDADAS"
# #### FUNCOES ####
# v1: Com Radix Sort LSD (A2) (Lado Direito -> Lado Esquerdo) Atua Sobre Digitos e Nao Bits (Para inteiros) |
# Ex.: 1999 >>> 1 <- 9 <- 9 <- 9
# *** VERSAO RELATORIO *** | Tabela 2 e 3
if __name__ == '__main__':
# ### START ###
main()
| [
2,
410,
16,
25,
955,
5325,
844,
33947,
27483,
357,
32,
17,
8,
357,
43,
4533,
34177,
10094,
4613,
406,
4533,
8678,
10819,
4598,
8,
1629,
6413,
36884,
260,
7367,
270,
418,
304,
399,
5488,
44733,
357,
47,
3301,
493,
20295,
4951,
8,
9... | 2.138587 | 368 |
"""
Runtime: 1626 ms, faster than 5.01% of Python3 online submissions for Linked List Cycle II.
Memory Usage: 17.3 MB, less than 73.61% of Python3 online submissions for Linked List Cycle II.
"""
from typing import List
from typing import Optional
if __name__ == "__main__":
main()
| [
37811,
198,
41006,
25,
1467,
2075,
13845,
11,
5443,
621,
642,
13,
486,
4,
286,
11361,
18,
2691,
22129,
329,
7502,
276,
7343,
26993,
2873,
13,
198,
30871,
29566,
25,
1596,
13,
18,
10771,
11,
1342,
621,
8854,
13,
5333,
4,
286,
11361,
... | 3.416667 | 84 |
# =============================================================================
# periscope-ps (blipp)
#
# Copyright (c) 2013-2016, Trustees of Indiana University,
# All rights reserved.
#
# This software may be modified and distributed under the terms of the BSD
# license. See the COPYING file for details.
#
# This software was created at the Indiana University Center for Research in
# Extreme Scale Technologies (CREST).
# =============================================================================
import time
EVENT_TYPES={
"dummy":"ps:testing:dummy"
}
class Probe:
"""
Dummy probe that just sleeps and returns 1
"""
| [
2,
38093,
25609,
198,
2,
220,
583,
2304,
3008,
12,
862,
357,
2436,
3974,
8,
198,
2,
198,
2,
220,
15069,
357,
66,
8,
2211,
12,
5304,
11,
9870,
2841,
286,
10278,
2059,
11,
198,
2,
220,
1439,
2489,
10395,
13,
198,
2,
198,
2,
220,... | 4 | 163 |
import os
import sys
sys.path.append(os.getcwd())
import h5py
import tests.vis_gui
import torch
import numpy as np
import skimage.transform
import matplotlib.colors
import matplotlib.pyplot
import pyrenderer
if __name__ == "__main__":
#ui = UIStepsize(os.path.join(os.getcwd(), "..\\..\\results\\stepsize\\skull4gauss"))
ui = UIStepsize(os.path.join(os.getcwd(), "..\\..\\results\\stepsize\\thorax2gauss"))
ui.show()
| [
11748,
28686,
198,
11748,
25064,
198,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
1136,
66,
16993,
28955,
198,
198,
11748,
289,
20,
9078,
198,
11748,
5254,
13,
4703,
62,
48317,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
1... | 2.547059 | 170 |
"""Django tests to ensure that the app is working correctly are written and run here."""
import tempfile
import datetime
from django.db.models.fields.files import ImageFieldFile
from django.test import TestCase
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib.auth.models import User
from django.test.client import Client
from .models import Profile, Image, Challenge
from . import validate, image_metadata, ml_ai_image_classification
class TestAdminPanel(TestCase):
"""test admin functionality"""
def create_user(self):
"""create a test admin"""
self.username = "test_admin"
self.password = User.objects.make_random_password()
user, created = User.objects.get_or_create(username=self.username)
user.set_password(self.password)
user.is_staff = True
user.is_superuser = True
user.is_active = True
user.save()
self.user = user
def test_spider_admin(self):
"""test that admin can login and access admin pages"""
self.create_user()
client = Client()
client.login(username=self.username, password=self.password)
admin_pages = [
"/admin/",
]
for page in admin_pages:
resp = client.get(page)
self.assertEqual(resp.status_code, 200)
assert b"<!DOCTYPE html" in resp.content
self.user.delete()
class TestUserPanel(TestCase):
"""test user functionality"""
def create_user(self):
"""create a test user"""
self.username = "testuser"
self.password = "Cheesytoenails@123"
user, created = User.objects.get_or_create(username=self.username)
user.set_password(self.password)
user.is_staff = False
user.is_superuser = False
user.is_active = True
user.save()
self.user = user
def test_user_credentials(self):
"""test that the user has the correct username, password"""
self.create_user()
client = Client()
client.login(username=self.username, password=self.password)
self.assertEqual("testuser",self.username)
self.assertEqual("Cheesytoenails@123",self.password)
self.user.delete()
def test_user_profile(self):
"""test that user profile is created when user is created"""
self.create_user()
if len(Profile.objects.filter(user=self.user)) !=1:
self.fail("Profile not created correctly")
self.user.delete()
def test_spider_user(self):
"""test that user can login and access user pages"""
self.create_user()
client = Client()
client.login(username=self.username, password=self.password)
user_pages = [
"/polls/",
"/polls/feed",
"/polls/uploadimage",
"/polls/leaderboards",
"/polls/profile",
"/polls/viewprofile/"+str(self.user.username)
]
for page in user_pages:
resp = client.get(page)
self.assertEqual(resp.status_code, 200)
assert b"<!DOCTYPE html" in resp.content
# should be redirected away from admin page
admin_page = "/admin/"
resp = client.get(admin_page)
self.assertEqual(resp.status_code, 302)
self.user.delete()
class TestValidate(TestCase):
"""tests for validate.py"""
def create_user(self,username,password):
"""create a test admin"""
self.username = username
self.password = password
user, created = User.objects.get_or_create(username=self.username)
user.set_password(self.password)
user.is_staff = False
user.is_superuser = False
user.is_active = True
user.save()
self.user = user
def setUp(self):
"""create a user and paths to test images"""
self.create_user("testuser","Cheesytoenails@123")
self.large_image_path = './media/feed/picture/6mb_image.jpg'
self.good_image_path = './media/feed/picture/Brennan_On_the_Side_of_the_Angels_2.jpg'
self.bad_image_path = './media/feed/picture/university-of-exeter-forum.jpg'
def tearDown(self):
"""delete the test user"""
User.objects.get(username="testuser").delete()
def test_check_user_unique(self):
"""should raise error if username already exists"""
self.assertRaises(ValidationError, validate.check_user_unique, "testuser")
try:
validate.check_user_unique("unique_user")
except ValidationError:
self.fail("validate.check_user_unique() raised ValidationError unexpectedly!")
def test_validate_number(self):
"""test that password number validation works correctly"""
# test with input expected to raise error
self.assertRaises(ValidationError,validate.validate_number,"hello")
# test with more extreme input
self.assertRaises(ValidationError,validate.validate_number,"onzgoiaegnimnMAMS")
# test with valid input
try:
validate.validate_number("hdbg1247562mdm")
except ValidationError:
self.fail("validate.validate_number() raised ValidationError unexpectedly!")
def test_validate_special(self):
"""test that password special character validation works correctly"""
# test with input expected to raise error
self.assertRaises(ValidationError,validate.validate_number,"hello")
# test with more extreme input
self.assertRaises(ValidationError,validate.validate_number,"onzgoiaegnimnMAMS")
# test with valid input
try:
validate.validate_number("@124hsvjv£%*(*&^%£")
except ValidationError:
self.fail("validate.check_image_type() raised ValidationError unexpectedly!")
def test_validate_upper_lower(self):
"""test that upper and lower case validation for passwords works"""
# test with input that should raise error
self.assertRaises(ValidationError, validate.validate_upper_lower, "hello")
# more extreme input
self.assertRaises(ValidationError, validate.validate_upper_lower, "kjaelnal31259$$*&")
# input that should be valid
try:
validate.validate_upper_lower("HEllO")
except ValidationError:
self.fail("validate.validate_upper_lower() raised ValidationError unexpectedly!")
def test_validate_check_image_type(self):
"""test that image type is validated correctly"""
# any image that is not .jpg should raise an error
image = tempfile.NamedTemporaryFile(suffix=".png")
self.assertRaises(ValidationError, validate.check_image_type, image)
# test that it works with jpg
image = tempfile.NamedTemporaryFile(suffix=".jpg")
try:
validate.check_image_type(image)
except ValidationError:
self.fail("validate.check_image_type() raised ValidationError unexpectedly!")
def test_validate_image_size(self):
"""test that image size is validated correctly"""
image = tempfile.NamedTemporaryFile(suffix='.jpg')
#a small tempfile should be valid
self.assertEqual('valid',validate.validate_image_size(image.name))
#large image under 20mb should be valid
self.assertEqual('valid',validate.validate_image_size(self.large_image_path))
def test_validate_metadata(self):
"""test that metadata is validated"""
# image with correct metadata should be valid
self.assertEqual("valid",validate.validate_metadata(self.good_image_path))
# image without metadata should be invalid
self.assertEqual("missing metadata",validate.validate_metadata(self.bad_image_path))
class TestImage(TestCase):
"""test the Image model"""
def create_user(self):
"""create a test user"""
self.username = "test_user"
self.password = User.objects.make_random_password()
user, created = User.objects.get_or_create(username=self.username)
user.set_password(self.password)
user.is_staff = False
user.is_superuser = False
user.is_active = True
user.save()
self.user = user
def setUp(self):
"""create a test user and a temporary test image to be stored in an Image model object"""
self.create_user()
self.img_obj = Image.objects.create(user=self.user
,
description="desc",
img=SimpleUploadedFile(name='test_image.jpg',
content='',
content_type='image/jpeg'),
gps_coordinates=(50.7366, -3.5350),
taken_date=datetime.datetime.now(),score=0)
def tearDown(self):
"""delete the user and their test image objects"""
images = Image.objects.filter(user=self.user)
for image in images:
image.img.delete()
image.delete()
self.user.delete()
def test_image(self):
"""test that Image objects are created and stored correctly"""
self.assertEqual("desc",self.img_obj.description)
if isinstance(self.img_obj.img,ImageFieldFile) == False:
self.fail("img in Image model not stored correctly")
self.assertEqual((50.7366,-3.5350),self.img_obj.gps_coordinates)
self.assertEqual(0,self.img_obj.score)
class TestChallenge(TestCase):
"""test the Challenge model"""
def setUp(self):
"""set up a test challenge object"""
self.challenge_obj = Challenge.objects.create(name='test_challenge',
description='desc',
location=(50.7366,-3.5350),
locationRadius=1,
subject='building',
startDate=datetime.datetime.now(),
endDate=datetime.datetime.now()
)
self.challenge_obj.save()
def tearDown(self):
"""delete the challenge object"""
self.challenge_obj.delete()
def test_challenge(self):
"""test that challnge objects are being saved and stored correctly"""
self.assertEqual(self.challenge_obj.name,'test_challenge')
self.assertEqual(self.challenge_obj.description,'desc')
self.assertEqual(self.challenge_obj.location,(50.7366,-3.5350))
self.assertEqual(self.challenge_obj.subject,'building')
class TestImageMetadata(TestCase):
"""test methods from image_metadata"""
def create_user(self):
"""create a test user"""
self.username = "test_user"
self.password = User.objects.make_random_password()
user, created = User.objects.get_or_create(username=self.username)
user.set_password(self.password)
user.is_staff = False
user.is_superuser = False
user.is_active = True
user.save()
self.user = user
def setUp(self):
"""set up a good image with metadata, bad image without"""
self.good_image_path = './media/feed/picture/Brennan_On_the_Side_of_the_Angels_2.jpg'
self.bad_image_path = './media/feed/picture/university-of-exeter-forum.jpg'
def tearDown(self):
"""nothing to tear down"""
pass
def test_get_gps(self):
"""test that gps data is gathered correctly"""
# image without metadata should throw exception
with self.assertRaises(Exception) as context:
image_metadata.get_gps(self.bad_image_path)
self.assertTrue('exif not found' in str(context.exception))
# image with metadata should return a tuple with location data
try:
ret_val = image_metadata.get_gps(self.good_image_path)
if isinstance(ret_val,tuple) == False:
self.fail("image_metadata.get_gps() does not return a tuple")
else:
pass
except Exception:
self.fail("image_metadata.get_gps() threw an unexpected exception")
def test_get_lat(self):
"""test that latitudes are either positive or negative depending on north vs south"""
assert image_metadata.get_lat("N",[1,0,2]) >0
assert image_metadata.get_lat("S",[1,0,2]) <0
# ignore unexpected data
assert image_metadata.get_lat("asfadfac",[1,0,2]) <0
def test_get_long(self):
"""test that longitudes are either positive or negative depending on east vs west"""
assert image_metadata.get_long("E",[1,0,2]) >0
assert image_metadata.get_lat("W",[1,0,2]) <0
# unexpected data should be ignored
assert image_metadata.get_lat("asfadfac",[1,0,2]) <0
def test_get_distance(self):
"""test that distance between two points is correct"""
# this sum shows whether the distance calculation is working
self.assertEqual(0,image_metadata.get_distance((50.7366, -3.5350),(50.7366, -3.5350)))
def test_get_time(self):
"""test that time data is gathered from an image"""
try:
ret_val = image_metadata.get_time(self.good_image_path)
try:
time = datetime.datetime.strptime(ret_val, '%Y:%m:%d %H:%M:%S')
except:
self.fail("image_metadata.get_time() does not return a datetime")
except:
self.fail("image_metadata.get_time() fails to find a time")
def test_get_time_dif(self):
"""test the image_metadata get_time_dif by recreating the logic"""
time = datetime.datetime.now()
difference = time - time
datetime.timedelta(0, 8, 562000)
seconds_in_day = 24 * 60 * 60
ret_val = (difference.days * seconds_in_day + difference.seconds) / 60
#difference between equal dates should be 0 to prove that the sum is calculated correctly
self.assertEqual(0,ret_val) | [
37811,
35,
73,
14208,
5254,
284,
4155,
326,
262,
598,
318,
1762,
9380,
389,
3194,
290,
1057,
994,
526,
15931,
198,
11748,
20218,
7753,
198,
11748,
4818,
8079,
198,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
25747,
13,
16624,
13... | 2.823421 | 4,321 |
import sys
import importlib
from src.models import Apo, Locusdbentity, Referencedbentity, Phenotypeannotation, \
Source, PhenotypeannotationCond, Taxonomy, Chebi, Phenotype, Allele, Reporter, Chebi
from scripts.loading.database_session import get_session
from scripts.loading.util import get_strain_taxid_mapping
__author__ = 'sweng66'
cond_start_index = 12
cond_stop_index = 33
column_size = 36
cond_class = ['treatment', 'media', 'phase', 'temperature',
'chemical', 'assay', 'radiation']
# degree_file = "scripts/loading/phenotype/data/sample_line_with_degree.txt"
if __name__ == '__main__':
infile = None
if len(sys.argv) >= 2:
infile = sys.argv[1]
else:
print("Usage: python load_phenotype.py datafile")
print("Usage example: python load_phenotype.py scripts/loading/phenotype/data/phenotype_dataCuration091717.tsv")
exit()
logfile = "scripts/loading/phenotype/logs/" + infile.split('/')[4].replace(".txt", ".log")
load_phenotypes(infile, logfile)
| [
11748,
25064,
198,
11748,
1330,
8019,
198,
6738,
12351,
13,
27530,
1330,
5949,
78,
11,
406,
10901,
9945,
26858,
11,
6524,
14226,
771,
65,
26858,
11,
34828,
8690,
1236,
14221,
11,
3467,
198,
220,
220,
220,
8090,
11,
34828,
8690,
1236,
... | 2.585366 | 410 |
# data dict
dict_path = "dataset/dict.txt"
# Data shape
data_shape = [1, 60, -1]
# Minibatch size.
batch_size = 128
# Learning rate.
lr = 1e-3
# Learning rate decay strategy. 'piecewise_decay' or None is valid.
lr_decay_strategy = None
# L2 decay rate.
l2decay = 4e-4
# Momentum rate.
momentum = 0.9
# The threshold of gradient clipping.
gradient_clip = 10.0
# The number of iterations.
total_step = 720000
# Log period.
log_period = 100
# character class num + 1 .
num_classes = 62
# Save model period. '-1' means never saving the model.
save_model_period = 5000
# Evaluate period. '-1' means never evaluating the model.
eval_period = 5000
# The list file of images to be used for training.
train_list = 'dataset/train.txt'
# The list file of images to be used for training.
test_list = 'dataset/test.txt'
train_prefix = 'dataset/train'
test_prefix = 'dataset/test'
# Which type of network to be used. 'crnn_ctc' or 'attention'
use_model = 'crnn_ctc'
# Save model path
model_path = 'models/%s/train/' % use_model
infer_model_path = 'models/%s/infer/' % use_model
# The init model file of directory.
init_model = None
# Whether use GPU to train.
use_gpu = True
# Min average window.
min_average_window = 10000
# Max average window. It is proposed to be set as the number of minibatch in a pass.
max_average_window = 12500
# Average window.
average_window = 0.15
# Whether use parallel training.
parallel = False
| [
2,
1366,
8633,
198,
11600,
62,
6978,
796,
366,
19608,
292,
316,
14,
11600,
13,
14116,
1,
198,
2,
6060,
5485,
198,
7890,
62,
43358,
796,
685,
16,
11,
3126,
11,
532,
16,
60,
198,
2,
1855,
571,
963,
2546,
13,
198,
43501,
62,
7857,
... | 2.949896 | 479 |
# This file is executed on every boot (including wake-boot from deepsleep)
import uos
import gc
import network
import sys
# import webrepl
# import esp
from wifi import *
sys.path.reverse()
# uos.dupterm(None, 1) # disable REPL on UART(0)
# esp.osdebug(None)
# webrepl.start()
gc.collect()
# Se inicia la conexión WiFi
connection = network.WLAN(network.STA_IF)
connection.active(True)
connection.connect(ssid, password)
| [
2,
770,
2393,
318,
10945,
319,
790,
6297,
357,
8201,
7765,
12,
18769,
422,
2769,
42832,
8,
198,
198,
11748,
334,
418,
198,
11748,
308,
66,
198,
11748,
3127,
198,
11748,
25064,
198,
2,
1330,
3992,
35666,
198,
2,
1330,
15024,
198,
673... | 2.958333 | 144 |
# -*- conding: utf8 -*-
"""
@author: Muhammed Zeba (parice02)
"""
import time
import sqlite3
from re import compile, I
from typing import List, Dict
from pathlib import Path
import json
def regexp(motif: str, item: str) -> bool:
"""retourne True si le motif regex a été satisfait dans l'item
False sinon
"""
pattern = compile(motif, I)
return pattern.search(item) is not None
def listfetchall(cursor: sqlite3.Cursor) -> List:
"Return all rows from a cursor as a list"
return [row[0] for row in cursor.fetchall()]
class Timer(object):
""" """
class LoggerTimer(Timer):
"""
Source: https://saladtomatonion.com/blog/2014/12/16/mesurer-le-temps-dexecution-de-code-en-python/
"""
@staticmethod
class DBSQLite3(object):
""" """
def __init__(self, sqlite3_db: str = "db.db"):
""" """
self._connection = sqlite3.connect(sqlite3_db)
self._connection.create_function("regexp", 2, regexp)
self._cursor = self._connection.cursor()
def close_connection(self):
""" """
self._connection.close()
def close_cursor(self):
""" """
self._cursor.close()
@LoggerTimer("DBSQLite.execute_query() process time")
def execute_query(self, params) -> List:
""" """
query = "SELECT DISTINCT mot FROM mots WHERE LENGTH(mot) = :len AND regexp(:expr, mot)"
try:
self._cursor.execute(query, params)
results = listfetchall(self._cursor)
return (
results
if len(results) != 0
else [
0,
_("Aucune correspondance trouvée"),
]
)
except Exception as e:
return [
0,
e.__str__(),
]
| [
2,
532,
9,
12,
1779,
278,
25,
3384,
69,
23,
532,
9,
12,
198,
198,
37811,
198,
31,
9800,
25,
8252,
2763,
1150,
1168,
1765,
64,
357,
1845,
501,
2999,
8,
198,
37811,
198,
198,
11748,
640,
198,
11748,
44161,
578,
18,
198,
6738,
302,... | 2.135041 | 859 |
import discord
import emc
from emc.async_ import get_data
from discord.ext import commands
| [
11748,
36446,
198,
11748,
795,
66,
198,
6738,
795,
66,
13,
292,
13361,
62,
1330,
651,
62,
7890,
198,
6738,
36446,
13,
2302,
1330,
9729,
198
] | 3.5 | 26 |
from typing import List
import numpy as np
from ..base import BaseAudioEncoder
from ...helper import batching
| [
628,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
11485,
8692,
1330,
7308,
21206,
27195,
12342,
198,
6738,
2644,
2978,
525,
1330,
15458,
278,
628
] | 3.625 | 32 |
# pylint: disable=too-few-public-methods, W0231, print-statement, useless-object-inheritance
# pylint: disable=no-classmethod-decorator
"""Test external access to protected class members."""
from __future__ import print_function
class MyClass(object):
"""Class with protected members."""
_cls_protected = 5
def test(self):
"""Docstring."""
self._protected += self._cls_protected
print(self.public._haha) # [protected-access]
def clsmeth(cls):
"""Docstring."""
cls._cls_protected += 1
print(cls._cls_protected)
clsmeth = classmethod(clsmeth)
def _private_method(self):
"""Doing nothing."""
class Subclass(MyClass):
"""Subclass with protected members."""
INST = Subclass()
INST.attr = 1
print(INST.attr)
INST._protected = 2 # [protected-access]
print(INST._protected) # [protected-access]
INST._cls_protected = 3 # [protected-access]
print(INST._cls_protected) # [protected-access]
class Issue1031(object):
"""Test for GitHub issue 1031"""
_attr = 1
def correct_access(self):
"""Demonstrates correct access"""
return type(self)._attr
def incorrect_access(self):
"""Demonstrates incorrect access"""
if self._attr == 1:
return type(INST)._protected # [protected-access]
return None
class Issue1802(object):
"""Test for GitHub issue 1802"""
def __eq__(self, other):
"""Test a correct access as the access to protected member is in a special method"""
if isinstance(other, self.__class__):
answer = self._foo == other._foo
return answer and self.__private == other.__private # [protected-access]
return False
def not_in_special(self, other):
"""
Test an incorrect access as the access to protected member is not inside a special method
"""
if isinstance(other, self.__class__):
return self._foo == other._foo # [protected-access]
return False
def __le__(self, other):
"""
Test a correct access as the access to protected member
is inside a special method even if it is deeply nested
"""
if 2 > 1:
if isinstance(other, self.__class__):
if "answer" == "42":
return self._foo == other._foo
return False
def __fake_special__(self, other):
"""
Test an incorrect access as the access
to protected member is not inside a licit special method
"""
if isinstance(other, self.__class__):
return self._foo == other._foo # [protected-access]
return False
| [
2,
279,
2645,
600,
25,
15560,
28,
18820,
12,
32146,
12,
11377,
12,
24396,
82,
11,
370,
15,
25667,
11,
3601,
12,
26090,
11,
13894,
12,
15252,
12,
259,
372,
42942,
198,
2,
279,
2645,
600,
25,
15560,
28,
3919,
12,
4871,
24396,
12,
... | 2.544939 | 1,057 |
from .html_generator import build_html_generator
if __name__ == "__main__":
image2html()
| [
6738,
764,
6494,
62,
8612,
1352,
1330,
1382,
62,
6494,
62,
8612,
1352,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
2939,
17,
6494,
3419,
198
] | 2.771429 | 35 |
bob = 0
for i in range(len(s)-2):
if s[i:i+3] == 'bob':
bob += 1
print('Number of times bob occurs is:', bob)
| [
65,
672,
796,
657,
198,
1640,
1312,
287,
2837,
7,
11925,
7,
82,
13219,
17,
2599,
198,
220,
220,
220,
611,
264,
58,
72,
25,
72,
10,
18,
60,
6624,
705,
65,
672,
10354,
198,
220,
220,
220,
220,
220,
220,
220,
29202,
15853,
352,
1... | 2.067797 | 59 |
from marshmallow import ValidationError
from src.app.ma import ma
from src.app.db import db
from src.app.controllers.cliente import Cliente, ClienteList
from src.app.controllers.favoritos import FavoritoList, Favorito
from src.app.controllers.usuario import UsuarioAuth, Usuario
from src.app.server.instance import server
import logging
api = server.api
app = server.app
log = logging.getLogger(__name__)
@app.before_first_request
server.cliente_ns.add_resource(ClienteList, '/clientes')
server.cliente_ns.add_resource(Cliente, '/clientes/<string:email>')
server.favoritos_ns.add_resource(FavoritoList, '/clientes/<string:email>/favoritos')
server.favoritos_ns.add_resource(Favorito, '/clientes/<string:email>/favoritos/<string:id_produto>')
server.usuario_ns.add_resource(UsuarioAuth, '/usuario/auth')
server.usuario_ns.add_resource(Usuario, '/usuario')
if __name__ == '__main__':
log.info('API inicializada')
db.init_app(app)
ma.init_app(app)
server.run() | [
6738,
22397,
42725,
1330,
3254,
24765,
12331,
198,
198,
6738,
12351,
13,
1324,
13,
2611,
1330,
17266,
198,
6738,
12351,
13,
1324,
13,
9945,
1330,
20613,
198,
6738,
12351,
13,
1324,
13,
3642,
36667,
13,
16366,
68,
1330,
20985,
68,
11,
... | 2.692308 | 364 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['BehaviourArgs', 'Behaviour']
@pulumi.input_type
@pulumi.input_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 3.382114 | 123 |
import logging
from .ranker import AbstractAndArticle
from .tfidf import TfIdf
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG)
__all__ = ("AbstractAndArticle", "TfIdf")
| [
11748,
18931,
198,
198,
6738,
764,
43027,
263,
1330,
27741,
1870,
14906,
198,
6738,
764,
27110,
312,
69,
1330,
309,
69,
7390,
69,
198,
198,
6404,
2667,
13,
35487,
16934,
7,
18982,
11639,
4,
7,
292,
310,
524,
8,
82,
532,
4064,
7,
3... | 2.682353 | 85 |
from random import choice
banner1=('''
██
██████ ████ ██
██████ ██ ████ ██ ██
██ ██ ██ ██
██ ██░████ ▒█████░███████ ▒████▓ ██░███▒ ██ ░████░ ████ ███████
██ ███████▓███████████████ ██████▓███████▒██ ░██████░ ████ ███████
██ ███ ▒████▒ ░▒█ ██ █▒ ▒█████ █████ ███ ███ ██ ██
██ ██ ███████▓░ ██ ▒███████░ ░████ ██░ ░██ ██ ██
██ ██ ██░██████▒ ██ ░█████████ ████ ██ ██ ██ ██
██ ██ ██ ░▒▓██ ██ ██▓░ ████░ ░████ ██░ ░██ ██ ██
██ ██ ███▒░ ▒██ ██░ ██▒ ██████ █████▒ ███ ███ ██ ██░
████████ ██████████ ████████████████████▒█████░██████░█████████████
████████ ██░▓████▓ ░████ ▓███░████░███▒ ░████ ░████░ ████████░████
██
██
██
''')
banner2=("""
░▀█▀░█▀█░█▀▀░▀█▀░█▀█░█▀█░█░░░█▀█░▀█▀░▀█▀
░░█░░█░█░▀▀█░░█░░█▀█░█▀▀░█░░░█░█░░█░░░█░
░▀▀▀░▀░▀░▀▀▀░░▀░░▀░▀░▀░░░▀▀▀░▀▀▀░▀▀▀░░▀░
""")
banner3=("""
___ _ _ _ _
|_ _| _ _ ___| |_ __ _ _ __ | | ___ (_)| |_
| | | ' \ (_-<| _|/ _` || '_ \| |/ _ \| || _|
|___||_||_|/__/ \__|\__,_|| .__/|_|\___/|_| \__|
|_|
""")
banner4=('''
mmmmmm mmmm ##
""##"" ## ""## "" ##
## ##m####mmm#####m####### m#####m##m###m ## m####m #### #######
## ##" ####mmmm " ## " mmm####" "#### ##" "## ## ##
## ## ## """"##m ## m##"""#### #### ## ## ## ##
mm##mm## ###mmmmm## ##mmm##mmm######mm##"##mmm"##mm##"mmm##mmm##mmm
"""""""" "" """""" """" """" ""## """ """" """" """""""" """"
##
''')
banner5=("""
##
###### #### ##
###### ## #### ## ##
## ## ## ##
## ##.#### :#####.####### :#### ##.###: ## .####. #### #######
## ####### ############### ###### #######:## .######. #### #######
## ### :####: .:# ## #: :##### ##### ### ### ## ##
## ## ####### . ## :#######. .#### ##. .## ## ##
## ## ##.######: ## .######### #### ## ## ## ##
## ## ## .: ## ## ## . ####. .#### ##. .## ## ##
## ## ###:. :## ##. ##: ###### #####: ### ### ## ##.
######## ########## ####################:#####.######.#############
######## ##. #### .#### ###.####.###: .#### .####. ########.####
##
##
##
""")
banner6=("""
____ __ __ _ __
/ _/____ _____ / /_ ____ _ ____ / /____ (_)/ /_
/ / / __ \ / ___// __// __ `// __ \ / // __ \ / // __/
_/ / / / / /(__ )/ /_ / /_/ // /_/ // // /_/ // // /_
/___//_/ /_//____/ \__/ \__,_// .___//_/ \____//_/ \__/
/_/
""")
banner7=('''
___ _ _ _ _
|_ _|_ __ ___| |_ __ _ _ __ | | ___ (_) |_
| || '_ \/ __| __/ _` | '_ \| |/ _ \| | __|
| || | | \__ \ || (_| | |_) | | (_) | | |_
|___|_| |_|___/\__\__,_| .__/|_|\___/|_|\__|
|_|
''')
banner8=("""
███ █
█████ █ █ █
█ █ █ █
█ █▒██▒▒███▒█████░███░█▓██ █ ███████████
█ █▓ ▒██▒ ░█ █ █▒ ▒██▓ ▓██ █▓ ▓█ █ █
█ █ ██▒░ █ ██ ██ █ █ █ █
█ █ █░███▒ █ ▒█████ ██ █ █ █ █
█ █ █ ▒█ █ █▒ ██ ██ █ █ █ █
█ █ ██░ ▒█ █░ █░ ▓██▓ ▓██░█▓ ▓█ █ █░
██████ █▒███▒ ▒██▒██▒██▓██ ▒██████████▒██
█
█
█
""")
banner9=("""
###
# # # #### ##### ## ##### # #### # #####
# ## # # # # # # # # # # # #
# # # # #### # # # # # # # # # #
# # # # # # ###### ##### # # # # #
# # ## # # # # # # # # # # #
### # # #### # # # # ###### #### # #
""")
banner10=("""
▄▄▄▄▄▄ ▄▄▄▄ ██
▀▀██▀▀ ██ ▀▀██ ▀▀ ██
██ ██▄████▄▄▄█████▄███████ ▄█████▄██▄███▄ ██ ▄████▄ ████ ███████
██ ██▀ ████▄▄▄▄ ▀ ██ ▀ ▄▄▄████▀ ▀████ ██▀ ▀██ ██ ██
██ ██ ██ ▀▀▀▀██▄ ██ ▄██▀▀▀████ ████ ██ ██ ██ ██
▄▄██▄▄██ ███▄▄▄▄▄██ ██▄▄▄██▄▄▄██████▄▄██▀██▄▄▄▀██▄▄██▀▄▄▄██▄▄▄██▄▄▄
▀▀▀▀▀▀▀▀ ▀▀ ▀▀▀▀▀▀ ▀▀▀▀ ▀▀▀▀ ▀▀██ ▀▀▀ ▀▀▀▀ ▀▀▀▀ ▀▀▀▀▀▀▀▀ ▀▀▀▀
██
""") | [
6738,
4738,
1330,
3572,
198,
198,
3820,
1008,
16,
16193,
7061,
6,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 1.238682 | 6,406 |
# Generated by Django 3.1.2 on 2020-10-20 18:25
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
17,
319,
12131,
12,
940,
12,
1238,
1248,
25,
1495,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# This should define the abstract regulator
import threading
from LocalTimer import LocalTimerClass
from DummyMeter import Bedna
from numpy import array
| [
2,
770,
815,
8160,
262,
12531,
24161,
198,
11748,
4704,
278,
198,
6738,
10714,
48801,
1330,
10714,
48801,
9487,
198,
6738,
360,
13513,
44,
2357,
1330,
15585,
2616,
198,
6738,
299,
32152,
1330,
7177,
198,
220,
220,
220,
220,
628
] | 3.975 | 40 |
import json
from os import abort
from flask import Flask, request
from flask_cors import CORS
from bson import ObjectId
import flask_login
from requests import api
import permission
import requests
from Util.db import rule_db, keywords_db, bili_mtr_db, user_db, permission_db, api_db
from rule import keywords, update_keywords_list, update_rules
from qqbot import send, get
from user import current_login_user, register_user_module
from flask_login import login_required
import secrets
from flask_login import current_user
app = Flask(__name__)
app.secret_key = secrets.token_urlsafe(16)
register_user_module(app)
CORS(app)
@app.route("/api/rules/", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/rules/<rule_id>", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/rules/<rule_id>", methods=["DELETE"], strict_slashes=False)
@login_required
@app.route("/api/rules/", methods=["GET"], strict_slashes=False)
@login_required
@app.route("/api/keywords/", methods=["GET"], strict_slashes=False)
@login_required
@app.route("/api/keywords/", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/keywords/<keyword_id>", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/keywords/<keyword_id>", methods=["DELETE"], strict_slashes=False)
@login_required
@app.route("/api/send_group/", methods=["POST"], strict_slashes=False)
@app.route("/api/send/", methods=["POST"], strict_slashes=False)
@app.route("/api/groups", methods=["GET"])
@login_required
@app.route("/api/friends", methods=["GET"])
@login_required
@app.route("/api/bili_monitor", methods=["GET"])
@login_required
@app.route("/api/bili_monitor/<rule_id>", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/bili_monitor/<rule_id>", methods=["DELETE"], strict_slashes=False)
@login_required
@app.route("/api/bili_monitor/", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/permission/", methods=["GET"])
@login_required
@app.route("/api/permission/<username>", methods=["POST"])
@login_required
@app.route("/api/self_permission/", methods=["GET"])
@login_required
@app.route("/api/key/<username>", methods=["POST"])
@login_required
@app.route("/api/api/", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/api/", methods=["GET"], strict_slashes=False)
@login_required
@app.route("/api/api/<api_id>", methods=["POST"], strict_slashes=False)
@login_required
@app.route("/api/api/<api_id>", methods=["DELETE"], strict_slashes=False)
@login_required
| [
11748,
33918,
198,
6738,
28686,
1330,
15614,
198,
6738,
42903,
1330,
46947,
11,
2581,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,
198,
6738,
275,
1559,
1330,
9515,
7390,
198,
11748,
42903,
62,
38235,
198,
6738,
7007,
1330,
40391,
1... | 2.81014 | 927 |
# Autogenerated file. Do not edit.
from jacdac.bus import Bus, Client, EventHandlerFn, UnsubscribeFn
from .constants import *
from typing import Optional
class JacscriptManagerClient(Client):
"""
Allows for deployment and control over Jacscript virtual machine.
*
* Programs start automatically after device restart or uploading of new program.
* You can stop programs until next reset by setting the `running` register to `0`.
*
* TODO - debug interface:
* * read-globals command/register
* * globals-changed pipe
* * breakpoint command
* * some performance monitoring?
Implements a client for the `Jacscript Manager <https://microsoft.github.io/jacdac-docs/services/jacscriptmanager>`_ service.
"""
@property
def running(self) -> Optional[bool]:
"""
Indicates if the program is currently running.
To restart the program, stop it (write `0`), read back the register to make sure it's stopped,
start it, and read back.,
"""
return self.register(JD_JACSCRIPT_MANAGER_REG_RUNNING).bool_value()
@running.setter
@property
def autostart(self) -> Optional[bool]:
"""
Indicates wheather the program should be re-started upon `reboot()` or `panic()`.
Defaults to `true`.,
"""
return self.register(JD_JACSCRIPT_MANAGER_REG_AUTOSTART).bool_value()
@autostart.setter
@property
def logging(self) -> Optional[bool]:
"""
`log_message` reports are only sent when this is `true`.
It defaults to `false`.,
"""
return self.register(JD_JACSCRIPT_MANAGER_REG_LOGGING).bool_value()
@logging.setter
@property
def program_size(self) -> Optional[int]:
"""
The size of current program.,
"""
return self.register(JD_JACSCRIPT_MANAGER_REG_PROGRAM_SIZE).value()
@property
def program_hash(self) -> Optional[int]:
"""
Return FNV1A hash of the current bytecode.,
"""
return self.register(JD_JACSCRIPT_MANAGER_REG_PROGRAM_HASH).value()
def on_program_panic(self, handler: EventHandlerFn) -> UnsubscribeFn:
"""
Emitted when the program calls `panic(panic_code)` or `reboot()` (`panic_code == 0` in that case).
The byte offset in byte code of the call is given in `program_counter`.
The program will restart immediately when `panic_code == 0` or in a few seconds otherwise.
"""
return self.on_event(JD_JACSCRIPT_MANAGER_EV_PROGRAM_PANIC, handler)
def on_program_change(self, handler: EventHandlerFn) -> UnsubscribeFn:
"""
Emitted after bytecode of the program has changed.
"""
return self.on_event(JD_JACSCRIPT_MANAGER_EV_PROGRAM_CHANGE, handler)
def deploy_bytecode(self, bytecode_size: int) -> None:
"""
Open pipe for streaming in the bytecode of the program. The size of the bytecode has to be declared upfront.
To clear the program, use `bytecode_size == 0`.
The bytecode is streamed over regular pipe data packets.
The bytecode shall be fully written into flash upon closing the pipe.
If `autostart` is true, the program will start after being deployed.
The data payloads, including the last one, should have a size that is a multiple of 32 bytes.
Thus, the initial bytecode_size also needs to be a multiple of 32.
"""
self.send_cmd_packed(JD_JACSCRIPT_MANAGER_CMD_DEPLOY_BYTECODE, bytecode_size)
| [
2,
5231,
519,
877,
515,
2393,
13,
2141,
407,
4370,
13,
198,
6738,
474,
330,
67,
330,
13,
10885,
1330,
5869,
11,
20985,
11,
8558,
25060,
37,
77,
11,
791,
7266,
12522,
37,
77,
198,
6738,
764,
9979,
1187,
1330,
1635,
198,
6738,
19720... | 2.616679 | 1,367 |
#example:
a = range(1,25)
print(binarySearch(a,1,debug=True)) | [
2,
20688,
25,
198,
64,
796,
2837,
7,
16,
11,
1495,
8,
198,
4798,
7,
39491,
18243,
7,
64,
11,
16,
11,
24442,
28,
17821,
4008
] | 2.346154 | 26 |
# Copyright (c) 2021 Food-X Technologies
#
# This file is part of foodx_devops_tools.
#
# You should have received a copy of the MIT License along with
# foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>.
"""Subscriptions deployment configuration I/O."""
import pathlib
import typing
import pydantic
from ._exceptions import DeploymentsDefinitionError
from ._loader import load_configuration
ENTITY_NAME = "deployments"
class DeploymentLocations(pydantic.BaseModel):
"""Define primary and/or secondary locations for deployment."""
primary: str
secondary: typing.Optional[str]
class DeploymentSubscriptionReference(pydantic.BaseModel):
"""A subscription reference in a deployment definition."""
gitref_patterns: typing.Optional[typing.List[str]]
locations: typing.List[DeploymentLocations]
root_fqdn: str
class SingularDeployment(pydantic.BaseModel):
"""Definition of a singular deployment."""
subscriptions: typing.Dict[str, DeploymentSubscriptionReference]
ValueType = typing.Dict[str, SingularDeployment]
class DeploymentsEndpointsDefinitions(pydantic.BaseModel):
"""Definition of deployment tuples and URL endpoints."""
url_endpoints: typing.List[str]
deployment_tuples: ValueType
T = typing.TypeVar("T", bound="DeploymentsDefinition")
class DeploymentsDefinition(pydantic.BaseModel):
"""Definition of deployments."""
deployments: DeploymentsEndpointsDefinitions
@pydantic.validator(ENTITY_NAME)
def check_deployments(
cls: pydantic.BaseModel, value: DeploymentsEndpointsDefinitions
) -> DeploymentsEndpointsDefinitions:
"""Validate ``deployment_tuples`` field."""
if not value:
raise ValueError("Empty deployment prohibited")
if not value.deployment_tuples:
raise ValueError("Empty deployment names prohibited")
return value
def load_deployments(file_path: pathlib.Path) -> DeploymentsDefinition:
"""
Load client definitions from file.
Args:
file_path: Path to client definitions file.
Returns:
Deployment definitions.
Raises:
DeploymentsDefinitionError: If an error occurs loading the file.
"""
result = load_configuration(
file_path,
DeploymentsDefinition,
DeploymentsDefinitionError,
ENTITY_NAME,
)
return typing.cast(DeploymentsDefinition, result)
| [
2,
220,
15069,
357,
66,
8,
33448,
7318,
12,
55,
21852,
198,
2,
198,
2,
220,
770,
2393,
318,
636,
286,
2057,
87,
62,
7959,
2840,
62,
31391,
13,
198,
2,
198,
2,
220,
921,
815,
423,
2722,
257,
4866,
286,
262,
17168,
13789,
1863,
... | 3.032379 | 803 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 28 15:39:49 2021
Copyright 2021 by Hadrien Montanelli.
"""
import numpy as np
from numpy.linalg import norm
from scipy.optimize import minimize
def singint(A, x0, n, p, trans=[True, True, True], optim='BFGS', quad='numerical'):
"""
Compute a singular or near-singular integral over a quadratic triangle via
the algorithm presented in [1]. Mathematically, it computes the integral
I = int_T dS(x)/|x-x0|
over a quadratic triangle T.
Inputs
------
A : numpy.ndarray
The six points that define the quadratic triangle as a 6x3 matrix.
x0 : numpy.ndarray
The singularity as a 3x1 vector.
n : int
The quadrature size; 2D integrals will use n*n points, 1D integrals
will use 10*n points.
p : int
The regularization order; -1, 0, or 1 for T_{-1}, T_0 or T_1
regularization.
trans : list
A list of bool variables that specify to if one wants to use
transplanted quadrature for T_{-1}, T_0 and T_1 regularization.
optim : str
The method used for locating the singularity ('BFGS' or 'Newton').
quad : str
The method for computing the integral of T_{-1} ('numerical or 'exact').
Output
------
I : float
The value of the integral.
References
----------
[1] H. Montanelli, M. Aussal and H. Haddar, Computing weakly singular and
near-singular integrals in high-order boundary elements, submitted.
"""
# Tolerance for optimization and near-singularities:
tol = 1e-12
# Step 1: Map back to reference triangle.
F = map_func(A)
J = map_jac(A)
H = map_hess(A)
# Step 2: Locating the singularity or near-singularity.
x0h = locate_sing(x0, F, J, H, optim, tol)
eh = F(x0h) - x0
h = norm(eh)
if (h > tol):
eh /= h
else:
h = 0
eh = np.zeros(3)
# Step 3: Compute regularized part with 2D quadrature.
Ireg = compute_Ireg(x0, x0h, h, eh, F, J, H, n, p)
# Step 4: Integrate Taylor terms.
scl = 10
In1 = compute_In1(x0h, h, J, scl*n, quad, trans, tol)
if (p == -1):
I = In1 + Ireg
elif (p > -1):
I0 = compute_I0(x0h, h, eh, J, H, scl*n, trans, tol)
if (p == 0):
I = In1 + I0 + Ireg
elif (p == 1):
I1 = compute_I1(x0h, h, eh, J, H, scl*n, trans, tol)
I = In1 + I0 + I1 + Ireg
return I | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
7653,
2579,
1315,
25,
2670,
25,
2920,
33448,
198,
198,
15269,
33448,
416,
11161,
1535... | 2.133333 | 1,215 |
from Activities.advice_commissions_consulting import Advice_Commission_Consulting
from Activities.dir_mgmt import Direction_Management
from Activities.internship import Intership
from Activities.research import Research
from Activities.teaching import Teaching
from Activities.technical_scientific import TechnicalScientific
from Activities.training import Training
| [
6738,
36270,
13,
324,
28281,
62,
785,
8481,
62,
5936,
586,
278,
1330,
42708,
62,
50246,
62,
9444,
586,
278,
198,
6738,
36270,
13,
15908,
62,
11296,
16762,
1330,
41837,
62,
48032,
198,
6738,
36270,
13,
23124,
6720,
1330,
554,
1010,
105... | 4.815789 | 76 |
# tools for analysing topologial quantities for various systems
import numpy as np
from scipy.integrate import quad, dblquad
from scipy import pi, log, imag
from quantum_util.operators import ParameterizedWavefunction
# 1D topology
class TorusState2D(object):
"""Docstring for TorusState2D.
Todo: Add some memorization and/or interpolation
"""
def __init__(self, state_fun, shape=None, ham_f=None):
"""TODO: to be defined1. """
self._state_f = state_fun
self.shape = shape if shape is not None else state_fun(0,0).shape
self.ham_f = ham_f
class TorusState1D(object):
"""Docstring for TorusState. """
def __init__(self):
"""TODO: to be defined1. """
pass
def polarization(wf, d_phi=1.0e-10):
"""
Polarization from Resta formula.
"""
L = states.shape[0]
X = diag(exp(-1.0j*linspace(0,2*pi,L)))
return -imag(log(det(conj(states.T) @ X @ states)))
# Where do these differ. And why does one involve inverting the state.
## Derivitive with respect to potential
# 2D topology
| [
2,
4899,
329,
11090,
278,
1353,
928,
498,
17794,
329,
2972,
3341,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
18908,
4873,
1330,
15094,
11,
288,
2436,
47003,
198,
6738,
629,
541,
88,
1330,
31028,
11,
2604,
11,
3... | 2.483146 | 445 |
import csv
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils import timezone
from dfirtrack_config.models import SystemImporterFileCsvFormbasedConfigModel
from dfirtrack_main.importer.file.csv_check_data import check_config, check_file, check_row
from dfirtrack_main.importer.file.csv_importer_forms import SystemImporterFileCsvFormbasedForm
from dfirtrack_main.importer.file.csv_messages import final_messages
from dfirtrack_main.importer.file.csv_set_system_attributes import case_attributes_form_based, company_attributes_form_based, ip_attributes, tag_attributes_form_based
from dfirtrack_main.logger.default_logger import debug_logger
from dfirtrack_main.models import System
from io import TextIOWrapper
@login_required(login_url="/login")
# deprecated, TODO: check for useful stuff regarding tag handling
#
#from dfirtrack.config import TAGLIST
#from dfirtrack.config import TAGPREFIX
#
# """
# - remove all tags for systems beginning with 'TAGPREFIX' (if there are any)
# - evaluate given CSV line by line (without first row)
# - check whether this line has relevant tags (leave loop if not)
# - add relevant tags to this system
# """
#
# # check TAGLIST (from settings.config) for empty list
# if not TAGLIST:
# messages.error(request, "No relevant tags defined. Check `TAGLIST` in `dfirtrack.config`!")
# # call logger
# error_logger(str(request.user), " SYSTEM_TAG_IMPORTER_NO_TAGS_DEFINED.")
# return redirect('/system/')
# else:
# taglist = TAGLIST
#
# # check TAGPREFIX (from settings.config) for empty string
# if TAGPREFIX is "":
# messages.error(request, "No prefix string defined. Check `TAGPREFIX` in `dfirtrack.config`!")
# # call logger
# error_logger(str(request.user), " SYSTEM_TAG_IMPORTER_NO_TAGPREFIX_DEFINED.")
# return redirect('/system/')
# # expand the string by an underscore
# else:
## tagprefix = TAGPREFIX + "_"
# tagprefix = TAGPREFIX + "-"
#
# # create tagaddlist to append for every new system
# tagaddlist = []
# for tag in taglist:
# tagaddlist.append(tagprefix + tag)
#
# """ remove all tags for systems beginning with 'TAGPREFIX' (if there are any) """
#
# # get all systems that have tags beginning with 'TAGPREFIX' | prefixtagsystems -> queryset
# prefixtagsystems=System.objects.filter(tag__tag_name__startswith=tagprefix)
#
# # iterate over systems in queryset | prefixtagsystem -> system object
# for prefixtagsystem in prefixtagsystems:
#
# # get all tags beginning with 'TAGPREFIX' that belong to the actual system | systemprefixtags -> queryset
# systemprefixtags=prefixtagsystem.tag.filter(tag_name__startswith=tagprefix)
#
# # iterate over queryset | systemprefixtag -> tag object
# for systemprefixtag in systemprefixtags:
# # delete all existing tags (the m2m relationship) beginning with 'TAGPREFIX' for this system (so that removed tags from csv will be removed as well)
# systemprefixtag.system_set.remove(prefixtagsystem)
#
# # get tags from csv
# tagcsvstring = row[9]
# if tagcsvstring == '':
# # autoincrement systems_skipped_counter
# systems_skipped_counter += 1
# # autoincrement row_counter
# row_counter += 1
# # leave because systems without tags are not relevant
# continue
# else:
# # convert string (at whitespaces) to list
# tagcsvlist = tagcsvstring.split()
#
# # create empty list for mapping
# tagaddlist = []
# # check for relevant tags and add to list
# for tag in taglist:
# if tag in tagcsvlist:
# tagaddlist.append(tagprefix + tag)
#
# # check if tagaddlist is empty
# if not tagaddlist:
# # autoincrement systems_skipped_counter
# systems_skipped_counter += 1
# # autoincrement row_counter
# row_counter += 1
# # leave because there are no relevant tags
# continue
#
# if not row[10]:
# # continue if there is an empty string
# pass
# else:
# # get object
# tag_error = Tag.objects.get(tag_name=tagprefix + 'Error')
# # add error tag to system
# tag_error.system_set.add(system)
#
# # iterate over tags in tagaddlist
# for tag_name in tagaddlist:
# # get object
# tag = Tag.objects.get(tag_name=tag_name)
# # add tag to system
# tag.system_set.add(system)
# # get tagcolor object
# tagcolor = Tagcolor.objects.get(tagcolor_name='primary')
#
# # create tag if needed
# tag, created = Tag.objects.get_or_create(tag_name=tag_name, tagcolor=tagcolor)
# # call logger if created
# if created == True:
# tag.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_TAG_CREATED")
# messages.success(request, 'Tag "' + tag.tag_name + '" created.')
#
# # add tag to system
# tag.system_set.add(system)
| [
11748,
269,
21370,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
18941,
11,
8543,
19... | 2.214895 | 2,578 |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
| [
11748,
28686,
198,
3106,
343,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198
] | 2.423077 | 26 |
import os
from pathlib import Path
import warnings
import pygeos as pg
import geopandas as gp
from pyogrio import read_dataframe, write_dataframe
from analysis.constants import CRS, STATES
PREVIOUS_STATES = {
"AL",
"AR",
"AZ",
"CO",
"FL",
"GA",
"IA",
"KS",
"KY",
"LA",
"MO",
"MS",
"MT",
"NC",
"ND",
"NE",
"NM",
"OK",
"PR",
"SC",
"SD",
"TN",
"TX",
"UT",
"VA",
"WY",
}
NEW_STATES = sorted(set(STATES) - PREVIOUS_STATES)
warnings.filterwarnings("ignore", message=".*initial implementation of Parquet.*")
data_dir = Path("data")
src_dir = data_dir / "barriers/source"
out_dir = Path("/tmp/sarp")
if not out_dir.exists():
os.makedirs(out_dir)
### Create initial version of snapping dataset for states outside previous region (SE + R2 / R6)
# load NABD dams (drop any that have duplicate NIDID)
nabd = (
read_dataframe(src_dir / "NABD_V2_beta/NABD_V2_beta.shp", columns=["NIDID"])
.dropna(subset=["NIDID"])
.to_crs(CRS)
.dropna(subset=["NIDID"])
.drop_duplicates(subset=["NIDID"], keep=False)
.set_index("NIDID")
)
# load previously snapped dams
prev = gp.read_feather(src_dir / "manually_snapped_dams.feather",)
prev["SourceState"] = prev.SARPID.str[:2]
prev.ManualReview = prev.ManualReview.astype("uint8")
prev = prev.loc[
prev.SourceState.isin(NEW_STATES) & prev.ManualReview.isin([4, 5, 13])
].copy()
# load latest dams downloaded from state-level feature services
# limited to non-SARP states
df = gp.read_feather(src_dir / "sarp_dams.feather")
df = df.loc[df.SourceState.isin(NEW_STATES)].drop_duplicates(
subset=["NIDID"], keep=False
)
df.ManualReview = df.ManualReview.fillna(0).astype("uint8")
df = df.join(prev[["ManualReview", "geometry"]], on="NIDID", rsuffix="_prev",).join(
nabd.geometry.rename("nabd_geometry"), on="NIDID"
)
# if previously reviewed, use that directly
ix = (df.ManualReview == 0) & df.geometry_prev.notnull()
df.loc[ix, "ManualReview"] = df.loc[ix].ManualReview_prev
df.loc[ix, "geometry"] = df.loc[ix].geometry_prev
# update location from NABD if within 5,000 meters
ix = (df.ManualReview == 0) & (
pg.distance(df.geometry.values.data, df.nabd_geometry.values.data) <= 5000
)
df.loc[ix, "ManualReview"] = 2
df.loc[ix, "geometry"] = df.loc[ix].nabd_geometry
df = df.drop(columns=["ManualReview_prev", "geometry_prev", "nabd_geometry"])
# drop anything that wasn't snapped
df = df.loc[df.ManualReview > 0].copy()
df.ManualReview = df.ManualReview.astype("uint8")
df.to_feather(src_dir / "snapped_outside_prev_v1.feather")
write_dataframe(df, src_dir / "snapped_outside_prev_v1.fgb")
| [
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
14601,
198,
198,
11748,
12972,
469,
418,
355,
23241,
198,
11748,
30324,
392,
292,
355,
27809,
198,
6738,
12972,
519,
27250,
1330,
1100,
62,
7890,
14535,
11,
3551,
62,
7890,
1... | 2.369912 | 1,130 |
from pathlib import Path
from deliverable_model.serving.remote_model.model_endpoint_base import ModelEndpointBase
_endpoint_type_registry = {}
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
5203,
540,
62,
19849,
13,
31293,
13,
47960,
62,
19849,
13,
19849,
62,
437,
4122,
62,
8692,
1330,
9104,
12915,
4122,
14881,
198,
198,
62,
437,
4122,
62,
4906,
62,
2301,
4592,
796,
23884,
... | 3.363636 | 44 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-08-13 23:50
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
18,
319,
2177,
12,
2919,
12,
1485,
2242,
25,
1120,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.736842 | 57 |
from ts_app import run_in_cli
if __name__ == "__main__":
run_in_cli()
| [
6738,
40379,
62,
1324,
1330,
1057,
62,
259,
62,
44506,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1057,
62,
259,
62,
44506,
3419,
198
] | 2.272727 | 33 |
import subprocess
monitor_deamons() | [
11748,
850,
14681,
198,
198,
41143,
62,
2934,
321,
684,
3419
] | 3.272727 | 11 |
em_default_config = {
# GENERAL
"seed": None,
"verbose": True,
"show_plots": True,
# Network
# The first value is the output dim of the input module (or the sum of
# the output dims of all the input modules if multitask=True and
# multiple input modules are provided). The last value is the
# output dim of the head layer (i.e., the cardinality of the
# classification task). The remaining values are the output dims of
# middle layers (if any). The number of middle layers will be inferred
# from this list.
"layer_out_dims": [10, 2],
# Input layer configs
"input_layer_config": {
"input_relu": True,
"input_batchnorm": False,
"input_dropout": 0.0,
},
# Middle layer configs
"middle_layer_config": {
"middle_relu": True,
"middle_batchnorm": False,
"middle_dropout": 0.0,
},
# Can optionally skip the head layer completely, for e.g. running baseline
# models...
"skip_head": False,
# Device
"device": "cpu",
# TRAINING
"train_config": {
# Loss function config
"loss_fn_reduction": "mean",
# Display
"progress_bar": False,
# Dataloader
"data_loader_config": {"batch_size": 32, "num_workers": 1, "shuffle": True},
# Loss weights
"loss_weights": None,
# Train Loop
"n_epochs": 10,
# 'grad_clip': 0.0,
"l2": 0.0,
"validation_metric": "accuracy",
"validation_freq": 1,
"validation_scoring_kwargs": {},
# Evaluate dev for during training every this many epochs
# Optimizer
"optimizer_config": {
"optimizer": "adam",
"optimizer_common": {"lr": 0.01},
# Optimizer - SGD
"sgd_config": {"momentum": 0.9},
# Optimizer - Adam
"adam_config": {"betas": (0.9, 0.999)},
# Optimizer - RMSProp
"rmsprop_config": {}, # Use defaults
},
# LR Scheduler (for learning rate)
"lr_scheduler": "reduce_on_plateau",
# [None, 'exponential', 'reduce_on_plateau']
# 'reduce_on_plateau' uses checkpoint_metric to assess plateaus
"lr_scheduler_config": {
# Freeze learning rate initially this many epochs
"lr_freeze": 0,
# Scheduler - exponential
"exponential_config": {"gamma": 0.9}, # decay rate
# Scheduler - reduce_on_plateau
"plateau_config": {
"factor": 0.5,
"patience": 10,
"threshold": 0.0001,
"min_lr": 1e-4,
},
},
# Logger (see metal/logging/logger.py for descriptions)
"logger": True,
"logger_config": {
"log_unit": "epochs", # ['seconds', 'examples', 'batches', 'epochs']
"log_train_every": 1, # How often train metrics are calculated (optionally logged to TB)
"log_train_metrics": [
"loss"
], # Metrics to calculate and report every `log_train_every` units. This can include built-in and user-defined metrics.
"log_train_metrics_func": None, # A function or list of functions that map a model + train_loader to a dictionary of custom metrics
"log_valid_every": 1, # How frequently to evaluate on valid set (must be multiple of log_freq)
"log_valid_metrics": [
"accuracy"
], # Metrics to calculate and report every `log_valid_every` units; this can include built-in and user-defined metrics
"log_valid_metrics_func": None, # A function or list of functions that maps a model + valid_loader to a dictionary of custom metrics
},
# LogWriter/Tensorboard (see metal/logging/writer.py for descriptions)
"writer": None, # [None, "json", "tensorboard"]
"writer_config": { # Log (or event) file stored at log_dir/run_dir/run_name
"log_dir": None,
"run_dir": None,
"run_name": None,
"writer_metrics": None, # May specify a subset of metrics in metrics_dict to be written
"include_config": True, # If True, include model config in log
},
# Checkpointer (see metal/logging/checkpointer.py for descriptions)
"checkpoint": True, # If True, checkpoint models when certain conditions are met
"checkpoint_config": {
"checkpoint_best": True,
"checkpoint_every": None, # uses log_valid_unit for units; if not None, checkpoint this often regardless of performance
"checkpoint_metric": "accuracy", # Must be in metrics dict; assumes valid split unless appended with "train/"
"checkpoint_metric_mode": "max", # ['max', 'min']
"checkpoint_dir": "checkpoints",
"checkpoint_runway": 0,
},
},
}
| [
368,
62,
12286,
62,
11250,
796,
1391,
198,
220,
220,
220,
1303,
41877,
198,
220,
220,
220,
366,
28826,
1298,
6045,
11,
198,
220,
220,
220,
366,
19011,
577,
1298,
6407,
11,
198,
220,
220,
220,
366,
12860,
62,
489,
1747,
1298,
6407,
... | 2.271639 | 2,172 |
LOG_STDOUT = True
AUTHOR_AFFILIATION_SOLRQUERY_URL = "http://api.adsabs.harvard.edu/v1/search/bigquery"
AUTHOR_AFFILATION_SERVICE_MAX_RECORDS_SOLR = 1000
# must be here for adsmutils to override it using env vars
# but if left empty (resolving to False) it won't be used
SERVICE_TOKEN = None
| [
198,
25294,
62,
36886,
796,
6407,
198,
198,
32,
24318,
1581,
62,
32,
5777,
4146,
40,
6234,
62,
50,
3535,
49,
10917,
19664,
62,
21886,
796,
366,
4023,
1378,
15042,
13,
5643,
8937,
13,
9869,
10187,
13,
15532,
14,
85,
16,
14,
12947,
... | 2.573913 | 115 |
# Generated by Django 2.0.3 on 2018-04-22 21:04
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
18,
319,
2864,
12,
3023,
12,
1828,
2310,
25,
3023,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# A framework to run regression tests on mesh modifiers and operators based on howardt's mesh_ops_test.py
#
# General idea:
# A test is:
# Object mode
# Select <test_object>
# Duplicate the object
# Select the object
# Apply operation for each operation in <operations_stack> with given parameters
# (an operation is either a modifier or an operator)
# test_mesh = <test_object>.data
# run test_mesh.unit_test_compare(<expected object>.data)
# delete the duplicate object
#
# The words in angle brackets are parameters of the test, and are specified in
# the main class MeshTest.
#
# If the environment variable BLENDER_TEST_UPDATE is set to 1, the <expected_object>
# is updated with the new test result.
# Tests are verbose when the environment variable BLENDER_VERBOSE is set.
import bpy
import functools
import inspect
import os
# Output from this module and from blender itself will occur during tests.
# We need to flush python so that the output is properly interleaved, otherwise
# blender's output for one test will end up showing in the middle of another test...
print = functools.partial(print, flush=True)
class ModifierSpec:
"""
Holds a Generate or Deform or Physics modifier type and its parameters.
"""
def __init__(self, modifier_name: str, modifier_type: str, modifier_parameters: dict, frame_end=0):
"""
Constructs a modifier spec.
:param modifier_name: str - name of object modifier, e.g. "myFirstSubsurfModif"
:param modifier_type: str - type of object modifier, e.g. "SUBSURF"
:param modifier_parameters: dict - {name : val} dictionary giving modifier parameters, e.g. {"quality" : 4}
:param frame_end: int - frame at which simulation needs to be baked or modifier needs to be applied.
"""
self.modifier_name = modifier_name
self.modifier_type = modifier_type
self.modifier_parameters = modifier_parameters
self.frame_end = frame_end
class ParticleSystemSpec:
"""
Holds a Particle System modifier and its parameters.
"""
def __init__(self, modifier_name: str, modifier_type: str, modifier_parameters: dict, frame_end: int):
"""
Constructs a particle system spec.
:param modifier_name: str - name of object modifier, e.g. "Particles"
:param modifier_type: str - type of object modifier, e.g. "PARTICLE_SYSTEM"
:param modifier_parameters: dict - {name : val} dictionary giving modifier parameters, e.g. {"seed" : 1}
:param frame_end: int - the last frame of the simulation at which the modifier is applied
"""
self.modifier_name = modifier_name
self.modifier_type = modifier_type
self.modifier_parameters = modifier_parameters
self.frame_end = frame_end
class OperatorSpecEditMode:
"""
Holds one operator and its parameters.
"""
def __init__(self, operator_name: str, operator_parameters: dict, select_mode: str, selection: set):
"""
Constructs an OperatorSpecEditMode. Raises ValueError if selec_mode is invalid.
:param operator_name: str - name of mesh operator from bpy.ops.mesh, e.g. "bevel" or "fill"
:param operator_parameters: dict - {name : val} dictionary containing operator parameters.
:param select_mode: str - mesh selection mode, must be either 'VERT', 'EDGE' or 'FACE'
:param selection: set - set of vertices/edges/faces indices to select, e.g. [0, 9, 10].
"""
self.operator_name = operator_name
self.operator_parameters = operator_parameters
if select_mode not in ['VERT', 'EDGE', 'FACE']:
raise ValueError("select_mode must be either {}, {} or {}".format('VERT', 'EDGE', 'FACE'))
self.select_mode = select_mode
self.selection = selection
class OperatorSpecObjectMode:
"""
Holds an object operator and its parameters. Helper class for DeformModifierSpec.
Needed to support operations in Object Mode and not Edit Mode which is supported by OperatorSpecEditMode.
"""
def __init__(self, operator_name: str, operator_parameters: dict):
"""
:param operator_name: str - name of the object operator from bpy.ops.object, e.g. "shade_smooth" or "shape_keys"
:param operator_parameters: dict - contains operator parameters.
"""
self.operator_name = operator_name
self.operator_parameters = operator_parameters
class DeformModifierSpec:
"""
Holds a list of deform modifier and OperatorSpecObjectMode.
For deform modifiers which have an object operator
"""
def __init__(self, frame_number: int, modifier_list: list, object_operator_spec: OperatorSpecObjectMode = None):
"""
Constructs a Deform Modifier spec (for user input)
:param frame_number: int - the frame at which animated keyframe is inserted
:param modifier_list: ModifierSpec - contains modifiers
:param object_operator_spec: OperatorSpecObjectMode - contains object operators
"""
self.frame_number = frame_number
self.modifier_list = modifier_list
self.object_operator_spec = object_operator_spec
class MeshTest:
"""
A mesh testing class targeted at testing modifiers and operators on a single object.
It holds a stack of mesh operations, i.e. modifiers or operators. The test is executed using
the public method run_test().
"""
def __init__(
self,
test_name: str,
test_object_name: str,
expected_object_name: str,
operations_stack=None,
apply_modifiers=False,
do_compare=False,
threshold=None
):
"""
Constructs a MeshTest object. Raises a KeyError if objects with names expected_object_name
or test_object_name don't exist.
:param test_name: str - unique test name identifier.
:param test_object_name: str - Name of object of mesh type to run the operations on.
:param expected_object_name: str - Name of object of mesh type that has the expected
geometry after running the operations.
:param operations_stack: list - stack holding operations to perform on the test_object.
:param apply_modifiers: bool - True if we want to apply the modifiers right after adding them to the object.
- True if we want to apply the modifier to a list of modifiers, after some operation.
This affects operations of type ModifierSpec and DeformModifierSpec.
:param do_compare: bool - True if we want to compare the test and expected objects, False otherwise.
:param threshold : exponent: To allow variations and accept difference to a certain degree.
"""
if operations_stack is None:
operations_stack = []
for operation in operations_stack:
if not (isinstance(operation, ModifierSpec) or isinstance(operation, OperatorSpecEditMode)
or isinstance(operation, OperatorSpecObjectMode) or isinstance(operation, DeformModifierSpec)
or isinstance(operation, ParticleSystemSpec)):
raise ValueError("Expected operation of type {} or {} or {} or {}. Got {}".
format(type(ModifierSpec), type(OperatorSpecEditMode),
type(DeformModifierSpec), type(ParticleSystemSpec),
type(operation)))
self.operations_stack = operations_stack
self.apply_modifier = apply_modifiers
self.do_compare = do_compare
self.threshold = threshold
self.test_name = test_name
self.verbose = os.environ.get("BLENDER_VERBOSE") is not None
self.update = os.getenv('BLENDER_TEST_UPDATE') is not None
# Initialize test objects.
objects = bpy.data.objects
self.test_object = objects[test_object_name]
self.expected_object = objects[expected_object_name]
if self.verbose:
print("Found test object {}".format(test_object_name))
print("Found test object {}".format(expected_object_name))
# Private flag to indicate whether the blend file was updated after the test.
self._test_updated = False
def set_test_object(self, test_object_name):
"""
Set test object for the test. Raises a KeyError if object with given name does not exist.
:param test_object_name: name of test object to run operations on.
"""
objects = bpy.data.objects
self.test_object = objects[test_object_name]
def set_expected_object(self, expected_object_name):
"""
Set expected object for the test. Raises a KeyError if object with given name does not exist
:param expected_object_name: Name of expected object.
"""
objects = bpy.data.objects
self.expected_object = objects[expected_object_name]
def is_test_updated(self):
"""
Check whether running the test with BLENDER_TEST_UPDATE actually modified the .blend test file.
:return: Bool - True if blend file has been updated. False otherwise.
"""
return self._test_updated
def _set_parameters_impl(self, modifier, modifier_parameters, nested_settings_path, modifier_name):
"""
Doing a depth first traversal of the modifier parameters and setting their values.
:param: modifier: Of type modifier, its altered to become a setting in recursion.
:param: modifier_parameters : dict or sequence, a simple/nested dictionary of modifier parameters.
:param: nested_settings_path : list(stack): helps in tracing path to each node.
"""
if not isinstance(modifier_parameters, dict):
param_setting = None
for i, setting in enumerate(nested_settings_path):
# We want to set the attribute only when we have reached the last setting.
# Applying of intermediate settings is meaningless.
if i == len(nested_settings_path) - 1:
setattr(modifier, setting, modifier_parameters)
elif hasattr(modifier, setting):
param_setting = getattr(modifier, setting)
# getattr doesn't accept canvas_surfaces["Surface"], but we need to pass it to setattr.
if setting == "canvas_surfaces":
modifier = param_setting.active
else:
modifier = param_setting
else:
# Clean up first
bpy.ops.object.delete()
raise Exception("Modifier '{}' has no parameter named '{}'".
format(modifier_name, setting))
# It pops the current node before moving on to its sibling.
nested_settings_path.pop()
return
for key in modifier_parameters:
nested_settings_path.append(key)
self._set_parameters_impl(modifier, modifier_parameters[key], nested_settings_path, modifier_name)
if nested_settings_path:
nested_settings_path.pop()
def set_parameters(self, modifier, modifier_parameters):
"""
Wrapper for _set_parameters_util
"""
settings = []
modifier_name = modifier.name
self._set_parameters_impl(modifier, modifier_parameters, settings, modifier_name)
def _add_modifier(self, test_object, modifier_spec: ModifierSpec):
"""
Add modifier to object.
:param test_object: bpy.types.Object - Blender object to apply modifier on.
:param modifier_spec: ModifierSpec - ModifierSpec object with parameters
"""
bakers_list = ['CLOTH', 'SOFT_BODY', 'DYNAMIC_PAINT', 'FLUID']
scene = bpy.context.scene
scene.frame_set(1)
modifier = test_object.modifiers.new(modifier_spec.modifier_name,
modifier_spec.modifier_type)
if modifier is None:
raise Exception("This modifier type is already added on the Test Object, please remove it and try again.")
if self.verbose:
print("Created modifier '{}' of type '{}'.".
format(modifier_spec.modifier_name, modifier_spec.modifier_type))
# Special case for Dynamic Paint, need to toggle Canvas on.
if modifier.type == "DYNAMIC_PAINT":
bpy.ops.dpaint.type_toggle(type='CANVAS')
self.set_parameters(modifier, modifier_spec.modifier_parameters)
if modifier.type in bakers_list:
self._bake_current_simulation(test_object, modifier.name, modifier_spec.frame_end)
scene.frame_set(modifier_spec.frame_end)
def _bake_current_simulation(self, test_object, test_modifier_name, frame_end):
"""
FLUID: Bakes the simulation
SOFT BODY, CLOTH, DYNAMIC PAINT: Overrides the point_cache context and then bakes.
"""
for scene in bpy.data.scenes:
for modifier in test_object.modifiers:
if modifier.type == 'FLUID':
bpy.ops.fluid.bake_all()
break
elif modifier.type == 'CLOTH' or modifier.type == 'SOFT_BODY':
test_object.modifiers[test_modifier_name].point_cache.frame_end = frame_end
override_setting = modifier.point_cache
override = {'scene': scene, 'active_object': test_object, 'point_cache': override_setting}
bpy.ops.ptcache.bake(override, bake=True)
break
elif modifier.type == 'DYNAMIC_PAINT':
dynamic_paint_setting = modifier.canvas_settings.canvas_surfaces.active
override_setting = dynamic_paint_setting.point_cache
override = {'scene': scene, 'active_object': test_object, 'point_cache': override_setting}
bpy.ops.ptcache.bake(override, bake=True)
break
def _apply_particle_system(self, test_object, particle_sys_spec: ParticleSystemSpec):
"""
Applies Particle System settings to test objects
"""
bpy.context.scene.frame_set(1)
bpy.ops.object.select_all(action='DESELECT')
test_object.modifiers.new(particle_sys_spec.modifier_name, particle_sys_spec.modifier_type)
settings_name = test_object.particle_systems.active.settings.name
particle_setting = bpy.data.particles[settings_name]
if self.verbose:
print("Created modifier '{}' of type '{}'.".
format(particle_sys_spec.modifier_name, particle_sys_spec.modifier_type))
for param_name in particle_sys_spec.modifier_parameters:
try:
if param_name == "seed":
system_setting = test_object.particle_systems[particle_sys_spec.modifier_name]
setattr(system_setting, param_name, particle_sys_spec.modifier_parameters[param_name])
else:
setattr(particle_setting, param_name, particle_sys_spec.modifier_parameters[param_name])
if self.verbose:
print("\t set parameter '{}' with value '{}'".
format(param_name, particle_sys_spec.modifier_parameters[param_name]))
except AttributeError:
# Clean up first
bpy.ops.object.delete()
raise AttributeError("Modifier '{}' has no parameter named '{}'".
format(particle_sys_spec.modifier_type, param_name))
bpy.context.scene.frame_set(particle_sys_spec.frame_end)
test_object.select_set(True)
bpy.ops.object.duplicates_make_real()
test_object.select_set(True)
bpy.ops.object.join()
if self.apply_modifier:
self._apply_modifier(test_object, particle_sys_spec.modifier_name)
def _do_selection(self, mesh: bpy.types.Mesh, select_mode: str, selection: set):
"""
Do selection on a mesh
:param mesh: bpy.types.Mesh - input mesh
:param: select_mode: str - selection mode. Must be 'VERT', 'EDGE' or 'FACE'
:param: selection: set - indices of selection.
Example: select_mode='VERT' and selection={1,2,3} selects veritces 1, 2 and 3 of input mesh
"""
# deselect all
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.tool_settings.mesh_select_mode = (select_mode == 'VERT',
select_mode == 'EDGE',
select_mode == 'FACE')
items = (mesh.vertices if select_mode == 'VERT'
else mesh.edges if select_mode == 'EDGE'
else mesh.polygons if select_mode == 'FACE'
else None)
if items is None:
raise ValueError("Invalid selection mode")
for index in selection:
items[index].select = True
def _apply_operator_edit_mode(self, test_object, operator: OperatorSpecEditMode):
"""
Apply operator on test object.
:param test_object: bpy.types.Object - Blender object to apply operator on.
:param operator: OperatorSpecEditMode - OperatorSpecEditMode object with parameters.
"""
self._do_selection(test_object.data, operator.select_mode, operator.selection)
# Apply operator in edit mode.
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type=operator.select_mode)
mesh_operator = getattr(bpy.ops.mesh, operator.operator_name)
try:
retval = mesh_operator(**operator.operator_parameters)
except AttributeError:
raise AttributeError("bpy.ops.mesh has no attribute {}".format(operator.operator_name))
except TypeError as ex:
raise TypeError("Incorrect operator parameters {!r} raised {!r}".format(operator.operator_parameters, ex))
if retval != {'FINISHED'}:
raise RuntimeError("Unexpected operator return value: {}".format(retval))
if self.verbose:
print("Applied {}".format(operator))
bpy.ops.object.mode_set(mode='OBJECT')
def _apply_operator_object_mode(self, operator: OperatorSpecObjectMode):
"""
Applies the object operator.
"""
bpy.ops.object.mode_set(mode='OBJECT')
object_operator = getattr(bpy.ops.object, operator.operator_name)
try:
retval = object_operator(**operator.operator_parameters)
except AttributeError:
raise AttributeError("bpy.ops.object has no attribute {}".format(operator.operator_name))
except TypeError as ex:
raise TypeError("Incorrect operator parameters {!r} raised {!r}".format(operator.operator_parameters, ex))
if retval != {'FINISHED'}:
raise RuntimeError("Unexpected operator return value: {}".format(retval))
if self.verbose:
print("Applied operator {}".format(operator))
def _apply_deform_modifier(self, test_object, operation: list):
"""
param: operation: list: List of modifiers or combination of modifier and object operator.
"""
scene = bpy.context.scene
scene.frame_set(1)
bpy.ops.object.mode_set(mode='OBJECT')
modifier_operations_list = operation.modifier_list
modifier_names = []
object_operations = operation.object_operator_spec
for modifier_operations in modifier_operations_list:
if isinstance(modifier_operations, ModifierSpec):
self._add_modifier(test_object, modifier_operations)
modifier_names.append(modifier_operations.modifier_name)
if isinstance(object_operations, OperatorSpecObjectMode):
self._apply_operator_object_mode(object_operations)
scene.frame_set(operation.frame_number)
if self.apply_modifier:
for mod_name in modifier_names:
self._apply_modifier(test_object, mod_name)
def run_test(self):
"""
Apply operations in self.operations_stack on self.test_object and compare the
resulting mesh with self.expected_object.data
:return: bool - True if the test passed, False otherwise.
"""
self._test_updated = False
bpy.context.view_layer.objects.active = self.test_object
# Duplicate test object.
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.select_all(action="DESELECT")
bpy.context.view_layer.objects.active = self.test_object
self.test_object.select_set(True)
bpy.ops.object.duplicate()
evaluated_test_object = bpy.context.active_object
evaluated_test_object.name = "evaluated_object"
if self.verbose:
print()
print(evaluated_test_object.name, "is set to active")
# Add modifiers and operators.
for operation in self.operations_stack:
if isinstance(operation, ModifierSpec):
self._add_modifier(evaluated_test_object, operation)
if self.apply_modifier:
self._apply_modifier(evaluated_test_object, operation.modifier_name)
elif isinstance(operation, OperatorSpecEditMode):
self._apply_operator_edit_mode(evaluated_test_object, operation)
elif isinstance(operation, OperatorSpecObjectMode):
self._apply_operator_object_mode(operation)
elif isinstance(operation, DeformModifierSpec):
self._apply_deform_modifier(evaluated_test_object, operation)
elif isinstance(operation, ParticleSystemSpec):
self._apply_particle_system(evaluated_test_object, operation)
else:
raise ValueError("Expected operation of type {} or {} or {} or {}. Got {}".
format(type(ModifierSpec), type(OperatorSpecEditMode),
type(OperatorSpecObjectMode), type(ParticleSystemSpec), type(operation)))
# Compare resulting mesh with expected one.
# Compare only when self.do_compare is set to True, it is set to False for run-test and returns.
if not self.do_compare:
print("Meshes/objects are not compared, compare evaluated and expected object in Blender for "
"visualization only.")
return False
if self.verbose:
print("Comparing expected mesh with resulting mesh...")
evaluated_test_mesh = evaluated_test_object.data
expected_mesh = self.expected_object.data
if self.threshold:
compare_result = evaluated_test_mesh.unit_test_compare(mesh=expected_mesh, threshold=self.threshold)
else:
compare_result = evaluated_test_mesh.unit_test_compare(mesh=expected_mesh)
compare_success = (compare_result == 'Same')
selected_evaluatated_verts = [v.index for v in evaluated_test_mesh.vertices if v.select]
selected_expected_verts = [v.index for v in expected_mesh.vertices if v.select]
if selected_evaluatated_verts != selected_expected_verts:
compare_result = "Selection doesn't match"
compare_success = False
# Also check if invalid geometry (which is never expected) had to be corrected...
validation_success = not evaluated_test_mesh.validate(verbose=True)
if compare_success and validation_success:
if self.verbose:
print("Success!")
# Clean up.
if self.verbose:
print("Cleaning up...")
# Delete evaluated_test_object.
bpy.ops.object.delete()
return True
else:
return self._on_failed_test(compare_result, validation_success, evaluated_test_object)
class RunTest:
"""
Helper class that stores and executes modifier tests.
Example usage:
>>> modifier_list = [
>>> ModifierSpec("firstSUBSURF", "SUBSURF", {"quality": 5}),
>>> ModifierSpec("firstSOLIDIFY", "SOLIDIFY", {"thickness_clamp": 0.9, "thickness": 1})
>>> ]
>>> operator_list = [
>>> OperatorSpecEditMode("delete_edgeloop", {}, "EDGE", MONKEY_LOOP_EDGE),
>>> ]
>>> tests = [
>>> MeshTest("Test1", "testCube", "expectedCube", modifier_list),
>>> MeshTest("Test2", "testCube_2", "expectedCube_2", modifier_list),
>>> MeshTest("MonkeyDeleteEdge", "testMonkey","expectedMonkey", operator_list)
>>> ]
>>> modifiers_test = RunTest(tests)
>>> modifiers_test.run_all_tests()
"""
def __init__(self, tests, apply_modifiers=False, do_compare=False):
"""
Construct a modifier test.
:param tests: list - list of modifier or operator test cases. Each element in the list must contain the
following
in the correct order:
0) test_name: str - unique test name
1) test_object_name: bpy.Types.Object - test object
2) expected_object_name: bpy.Types.Object - expected object
3) modifiers or operators: list - list of mesh_test.ModifierSpec objects or
mesh_test.OperatorSpecEditMode objects
"""
self.tests = tests
self._ensure_unique_test_name_or_raise_error()
self.apply_modifiers = apply_modifiers
self.do_compare = do_compare
self.verbose = os.environ.get("BLENDER_VERBOSE") is not None
self._failed_tests_list = []
def _ensure_unique_test_name_or_raise_error(self):
"""
Check if the test name is unique else raise an error.
"""
all_test_names = []
for each_test in self.tests:
test_name = each_test.test_name
all_test_names.append(test_name)
seen_name = set()
for ele in all_test_names:
if ele in seen_name:
raise ValueError("{} is a duplicate, write a new unique name.".format(ele))
else:
seen_name.add(ele)
def run_all_tests(self):
"""
Run all tests in self.tests list. Raises an exception if one the tests fails.
"""
for test_number, each_test in enumerate(self.tests):
test_name = each_test.test_name
if self.verbose:
print()
print("Running test {}/{}: {}...".format(test_number+1, len(self.tests), test_name))
success = self.run_test(test_name)
if not success:
self._failed_tests_list.append(test_name)
if len(self._failed_tests_list) != 0:
print("\nFollowing tests failed: {}".format(self._failed_tests_list))
blender_path = bpy.app.binary_path
blend_path = bpy.data.filepath
frame = inspect.stack()[1]
module = inspect.getmodule(frame[0])
python_path = module.__file__
print("Run following command to open Blender and run the failing test:")
print("{} {} --python {} -- {} {}"
.format(blender_path, blend_path, python_path, "--run-test", "<test_name>"))
raise Exception("Tests {} failed".format(self._failed_tests_list))
def run_test(self, test_name: str):
"""
Run a single test from self.tests list
:param test_name: int - name of test
:return: bool - True if test passed, False otherwise.
"""
case = None
for index, each_test in enumerate(self.tests):
if test_name == each_test.test_name:
case = self.tests[index]
break
if case is None:
raise Exception('No test called {} found!'.format(test_name))
test = case
if self.apply_modifiers:
test.apply_modifier = True
if self.do_compare:
test.do_compare = True
success = test.run_test()
if test.is_test_updated():
# Run the test again if the blend file has been updated.
success = test.run_test()
return success
| [
2,
46424,
347,
43312,
38644,
38559,
24290,
9878,
11290,
46424,
198,
2,
198,
2,
220,
770,
1430,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
198,
2,
220,
13096,
340,
739,
262,
2846,
286,
262,
22961,
3611,
5094,
1378... | 2.418417 | 12,141 |
import copy
from functools import reduce
from operator import mul
from polyaxon.automl.matrix.utils import get_length, sample
from polyaxon.automl.search_managers.base import BaseManager
from polyaxon.automl.search_managers.spec import SuggestionSpec
from polyaxon.automl.search_managers.utils import get_random_generator
from polyaxon.schemas.polyflow.workflows import RandomSearchConfig
class RandomSearchManager(BaseManager):
"""Random search strategy manager for hyperparameter optimization."""
CONFIG = RandomSearchConfig
| [
11748,
4866,
198,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
10088,
1330,
35971,
198,
198,
6738,
7514,
897,
261,
13,
2306,
296,
75,
13,
6759,
8609,
13,
26791,
1330,
651,
62,
13664,
11,
6291,
198,
6738,
7514,
897,
261,
13,
2... | 3.529412 | 153 |
# Generated by Django 3.0.7 on 2020-07-18 18:17
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
22,
319,
12131,
12,
2998,
12,
1507,
1248,
25,
1558,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#! /usr/bin/env python
"""
Początkowy moduł
"""
import argparse
import numpy
from bitmap_mapper.min_max_difference_coordinates_bitmap_mapper import MinMaxDifferenceCoordinatesBitmapMapper
from data_parsers.classify_data import ClassifyData
from data_parsers.learning_data import LearningData
from feature.simple_features.avg_size_of_hole_feature import AvgSizeOfHoleFeature
from feature.simple_features.avg_size_of_island_feature import AvgSizeOfIslandFeature
from feature.simple_features.first_quart_feature import FirstQuartFeature
from feature.simple_features.first_raw_moment_horizontal import FirstRawMomentHorizontalFeature
from feature.simple_features.first_raw_moment_vertical import FirstRawMomentVerticalFeature
from feature.simple_features.fourth_quart_feature import FourthQuartFeature
from feature.simple_features.longest_non_empty_antidiagonal_feature import LongestNonEmptyAntidiagonalFeature
from feature.simple_features.longest_non_empty_column_feature import LongestNonEmptyColumnFeature
from feature.simple_features.longest_non_empty_diagonal_feature import LongestNonEmptyDiagonalFeature
from feature.simple_features.longest_non_empty_row_feature import LongestNonEmptyRowFeature
from feature.simple_features.max_feature import MaxFeature
from feature.simple_features.max_histogram_feature import MaxHistogramFeature
from feature.simple_features.max_projection_horizontal_feature import MaxProjectionHorizontalFeature
from feature.simple_features.max_projection_horizontal_value_feature import MaxProjectionHorizontalValueFeature
from feature.simple_features.max_projection_vertical_feature import MaxProjectionVerticalFeature
from feature.simple_features.max_projection_vertical_value_feature import MaxProjectionVerticalValueFeature
from feature.simple_features.max_value_histogram_feature import MaxValueHistogramFeature
from feature.simple_features.mean_feature import MeanFeature
from feature.simple_features.median_feature import MedianFeature
from feature.simple_features.min_feature import MinFeature
from feature.simple_features.min_projection_horizontal_feature import MinProjectionHorizontalFeature
from feature.simple_features.min_projection_horizontal_value_feature import MinProjectionHorizontalValueFeature
from feature.simple_features.min_projection_vertical_feature import MinProjectionVerticalFeature
from feature.simple_features.min_projection_vertical_value_feature import MinProjectionVerticalValueFeature
from feature.simple_features.non_empty_columns_feature import NonEmptyColumnsFeature
from feature.simple_features.non_empty_rows_feature import NonEmptyRowsFeature
from feature.simple_features.number_of_holes_feature import NumberOfHolesFeature
from feature.simple_features.number_of_islands_feature import NumberOfIslandsFeature
from feature.simple_features.second_central_moment_horizontal import SecondCentralMomentHorizontalFeature
from feature.simple_features.second_central_moment_vertical import SecondCentralMomentVerticalFeature
from feature.simple_features.second_quart_feature import SecondQuartFeature
from feature.simple_features.third_quart_feature import ThirdQuartFeature
from feature_extractor.feature_extractor import FeatureExtractor
from learning import Learning, LearningClassify
def define_features() -> FeatureExtractor:
"""
Funkcja inicjalizuje extractor wszystkimi feature'ami jakie mamy
:return:
"""
extractor = FeatureExtractor()
extractor.add_feature(MaxFeature())#1
extractor.add_feature(MinFeature())
extractor.add_feature(MeanFeature())
extractor.add_feature(MedianFeature())
extractor.add_feature(NonEmptyColumnsFeature(0.05))#5 - blisko czarnego
extractor.add_feature(NonEmptyRowsFeature(0.05))# blisko czarnego
extractor.add_feature(ThirdQuartFeature())
extractor.add_feature(SecondQuartFeature())
extractor.add_feature(SecondCentralMomentVerticalFeature())
extractor.add_feature(SecondCentralMomentHorizontalFeature())#10
extractor.add_feature(NumberOfIslandsFeature(0.05))# blisko czarnego
extractor.add_feature(NumberOfHolesFeature(0.95))# blisko bialego
extractor.add_feature(FirstRawMomentVerticalFeature())
extractor.add_feature(FirstRawMomentHorizontalFeature())
extractor.add_feature(AvgSizeOfIslandFeature(0.05))# blisko czarnego # 15
extractor.add_feature(AvgSizeOfHoleFeature(0.95))# blisko bialego
extractor.add_feature(FourthQuartFeature())
extractor.add_feature(LongestNonEmptyRowFeature(0.05))# blisko czarnego
extractor.add_feature(LongestNonEmptyDiagonalFeature(0.05))# blisko czarnego
extractor.add_feature(LongestNonEmptyColumnFeature(0.05))# blisko czarnego # 20
extractor.add_feature(LongestNonEmptyAntidiagonalFeature(0.05))# blisko czarnego
extractor.add_feature(FirstQuartFeature())# 22
extractor.add_feature(MaxProjectionHorizontalFeature())
extractor.add_feature(MaxProjectionHorizontalValueFeature())
extractor.add_feature(MaxProjectionVerticalFeature())
extractor.add_feature(MaxProjectionVerticalValueFeature())
extractor.add_feature(MinProjectionHorizontalFeature())
extractor.add_feature(MinProjectionHorizontalValueFeature())
extractor.add_feature(MinProjectionVerticalFeature())
extractor.add_feature(MinProjectionVerticalValueFeature())
extractor.add_feature(MaxHistogramFeature())
extractor.add_feature(MaxValueHistogramFeature())
return extractor
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='TAIO obrazki w skali szarosci')
subparser = parser.add_subparsers(dest='mode')
parser_training = subparser.add_parser('training', help="Training mode")
parser_training.add_argument("train_path", help="path to training dataset")
parser_training.add_argument("test_path", help="path to testing dataset")
parser_training.add_argument("-o", "--output", help="Output path for model from learning process",
default="model.keras")
parser_classify = subparser.add_parser('classify', help="Classification mode")
parser_classify.add_argument("model_path", help="path to model file")
parser_classify.add_argument("classification_path", help="path to objects to classify")
parser_classify.add_argument("-o", "--output", help="Output path for classification result",
default="output.txt")
args = parser.parse_args()
if args.mode == "classify":
classify_main(args.model_path, args.classification_path, args.output)
elif args.mode == "training":
train_main(args.train_path, args.test_path, args.output)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
47,
420,
89,
128,
227,
30488,
322,
88,
953,
84,
41615,
198,
37811,
198,
11748,
1822,
29572,
198,
198,
11748,
299,
32152,
198,
198,
6738,
1643,
8899,
62,
76,
11463,... | 3.239726 | 2,044 |
def truncate(number, decimals=0):
"""
Returns a value truncated to a specific number of decimal places.
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer.")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more.")
elif decimals == 0:
return math.trunc(number)
factor = 10.0 ** decimals
return math.trunc(number * factor) / factor
| [
198,
4299,
40122,
378,
7,
17618,
11,
875,
320,
874,
28,
15,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
16409,
257,
1988,
40122,
515,
284,
257,
2176,
1271,
286,
32465,
4113,
13,
198,
220,
220,
220,
37227,
198,
220,
220,
2... | 2.656805 | 169 |
#!/usr/bin/env python3
import sys
import os
import subprocess
import hashlib
import typing
import dataclasses
DOCKERFILE_BASIC = """
FROM archlinux
# Update the system
RUN pacman --noconfirm -Syu
# We will append commands to install packages later. See the function `construct_dockerfile()`
"""
@dataclasses.dataclass
def calc_image_hash(config: Config) -> str:
"""Calculate image hash from given a config."""
return hashlib.md5(config.packages.encode()).hexdigest()
def docker_images(config: Config) -> typing.Iterator[str]:
"""Get all present docker images."""
toexec = [config.docker_cmd, "image", "ls",
"--format", "{{.Repository}}:{{.Tag}}"]
p = subprocess.run(toexec, capture_output=True, text=True)
p.check_returncode()
for tag in p.stdout.splitlines():
yield tag
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
12234,
8019,
198,
11748,
19720,
198,
11748,
4818,
330,
28958,
198,
198,
35,
11290,
1137,
25664,
62,
33,
1921... | 2.77377 | 305 |
import requests
from .models import TestResponse
from typing import Union,Dict
| [
11748,
7007,
198,
6738,
764,
27530,
1330,
6208,
31077,
198,
6738,
19720,
1330,
4479,
11,
35,
713,
628,
628,
628,
628,
628,
628
] | 3.913043 | 23 |
import os
import xarray as xr
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_path = os.path.dirname(dir_path)
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from utils import plot_averaged_errors, plot_windowed_errors, plot_contours, plot_bars
if __name__ == '__main__':
print('Analysis module') | [
11748,
28686,
198,
11748,
2124,
18747,
355,
2124,
81,
220,
198,
15908,
62,
6978,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
5305,
6978,
7,
834,
7753,
834,
4008,
198,
8000,
62,
6978,
796,
28686,
13,
6978,
13,
15908,... | 2.738095 | 126 |
import os
import subprocess
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.functional import interpolate
from loguru import logger
from tqdm import tqdm
import numpy as np
import wandb
from draw_concat import draw_concat
from generate_noise import generate_spatial_noise
from minecraft.level_utils import one_hot_to_blockdata_level, save_level_to_world, clear_empty_world
from minecraft.level_renderer import render_minecraft
from models import calc_gradient_penalty, save_networks
from utils import interpolate3D
def update_noise_amplitude(z_prev, real, opt):
""" Update the amplitude of the noise for the current scale according to the previous noise map. """
RMSE = torch.sqrt(F.mse_loss(real, z_prev))
return opt.noise_update * RMSE
def train_single_scale(D, G, reals, generators, noise_maps, input_from_prev_scale, noise_amplitudes, opt):
""" Train one scale. D and G are the current discriminator and generator, reals are the scaled versions of the
original level, generators and noise_maps contain information from previous scales and will receive information in
this scale, input_from_previous_scale holds the noise map and images from the previous scale, noise_amplitudes hold
the amplitudes for the noise in all the scales. opt is a namespace that holds all necessary parameters. """
current_scale = len(generators)
clear_empty_world(opt.output_dir, 'Curr_Empty_World') # reset tmp world
if opt.use_multiple_inputs:
real_group = []
nzx_group = []
nzy_group = []
nz_group = []
for scale_group in reals:
real_group.append(scale_group[current_scale])
nzx_group.append(scale_group[current_scale].shape[2])
nzy_group.append(scale_group[current_scale].shape[3])
nz_group.append((scale_group[current_scale].shape[2], scale_group[current_scale].shape[3]))
curr_noises = [0 for _ in range(len(real_group))]
curr_prevs = [0 for _ in range(len(real_group))]
curr_z_prevs = [0 for _ in range(len(real_group))]
else:
real = reals[current_scale]
nz = real.shape[2:]
padsize = int(1 * opt.num_layer) # As kernel size is always 3 currently, padsize goes up by one per layer
if not opt.pad_with_noise:
# pad_noise = nn.ConstantPad3d(padsize, 0)
# pad_image = nn.ConstantPad3d(padsize, 0)
pad_noise = nn.ReplicationPad3d(padsize)
pad_image = nn.ReplicationPad3d(padsize)
else:
pad_noise = nn.ReplicationPad3d(padsize)
pad_image = nn.ReplicationPad3d(padsize)
# setup optimizer
optimizerD = optim.Adam(D.parameters(), lr=opt.lr_d, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(G.parameters(), lr=opt.lr_g, betas=(opt.beta1, 0.999))
schedulerD = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerD, milestones=[1600, 2500], gamma=opt.gamma)
schedulerG = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerG, milestones=[1600, 2500], gamma=opt.gamma)
if current_scale == 0: # Generate new noise
if opt.use_multiple_inputs:
z_opt_group = []
for nzx, nzy in zip(nzx_group, nzy_group):
z_opt = generate_spatial_noise([1, opt.nc_current, nzx, nzy], device=opt.device)
z_opt = pad_noise(z_opt)
z_opt_group.append(z_opt)
else:
z_opt = generate_spatial_noise((1, opt.nc_current) + nz, device=opt.device)
z_opt = pad_noise(z_opt)
else: # Add noise to previous output
if opt.use_multiple_inputs:
z_opt_group = []
for nzx, nzy in zip(nzx_group, nzy_group):
z_opt = torch.zeros([1, opt.nc_current, nzx, nzy]).to(opt.device)
z_opt = pad_noise(z_opt)
z_opt_group.append(z_opt)
else:
z_opt = torch.zeros((1, opt.nc_current) + nz).to(opt.device)
z_opt = pad_noise(z_opt)
logger.info("Training at scale {}", current_scale)
grad_d_real = []
grad_d_fake = []
grad_g = []
for p in D.parameters():
grad_d_real.append(torch.zeros(p.shape).to(opt.device))
grad_d_fake.append(torch.zeros(p.shape).to(opt.device))
for p in G.parameters():
grad_g.append(torch.zeros(p.shape).to(opt.device))
for epoch in tqdm(range(opt.niter)):
step = current_scale * opt.niter + epoch
if opt.use_multiple_inputs:
group_steps = len(real_group)
noise_group = []
for nzx, nzy in zip(nzx_group, nzy_group):
noise_ = generate_spatial_noise([1, opt.nc_current, nzx, nzy], device=opt.device)
noise_ = pad_noise(noise_)
noise_group.append(noise_)
else:
group_steps = 1
noise_ = generate_spatial_noise((1, opt.nc_current) + nz, device=opt.device)
noise_ = pad_noise(noise_)
for curr_inp in range(group_steps):
if opt.use_multiple_inputs:
real = real_group[curr_inp]
nz = nz_group[curr_inp]
z_opt = z_opt_group[curr_inp]
noise_ = noise_group[curr_inp]
prev_scale_results = input_from_prev_scale[curr_inp]
opt.curr_inp = curr_inp
else:
prev_scale_results = input_from_prev_scale
############################
# (1) Update D network: maximize D(x) + D(G(z))
###########################
for j in range(opt.Dsteps):
# train with real
D.zero_grad()
output = D(real).to(opt.device)
errD_real = -output.mean()
errD_real.backward(retain_graph=True)
grads_after = []
cos_sim = []
for i, p in enumerate(D.parameters()):
grads_after.append(p.grad)
cos_sim.append(nn.CosineSimilarity(-1)(grad_d_real[i], p.grad).mean().item())
diff_d_real = np.mean(cos_sim)
grad_d_real = grads_after
# train with fake
if (j == 0) & (epoch == 0):
if current_scale == 0: # If we are in the lowest scale, noise is generated from scratch
prev = torch.zeros((1, opt.nc_current) + nz).to(opt.device)
prev_scale_results = prev
prev = pad_image(prev)
z_prev = torch.zeros((1, opt.nc_current) + nz).to(opt.device)
z_prev = pad_noise(z_prev)
opt.noise_amp = 1
else: # First step in NOT the lowest scale
# We need to adapt our inputs from the previous scale and add noise to it
prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results,
"rand", pad_noise, pad_image, opt)
prev = interpolate3D(prev, real.shape[-3:], mode="bilinear", align_corners=True)
prev = pad_image(prev)
z_prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results,
"rec", pad_noise, pad_image, opt)
z_prev = interpolate3D(z_prev, real.shape[-3:], mode="bilinear", align_corners=True)
opt.noise_amp = update_noise_amplitude(z_prev, real, opt)
z_prev = pad_image(z_prev)
else: # Any other step
if opt.use_multiple_inputs:
z_prev = curr_z_prevs[curr_inp]
prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results,
"rand", pad_noise, pad_image, opt)
prev = interpolate3D(prev, real.shape[-3:], mode="bilinear", align_corners=False)
prev = pad_image(prev)
# After creating our correct noise input, we feed it to the generator:
noise = opt.noise_amp * noise_ + prev
fake = G(noise.detach(), prev)
# Then run the result through the discriminator
output = D(fake.detach())
errD_fake = output.mean()
# Backpropagation
errD_fake.backward(retain_graph=False)
# Gradient Penalty
gradient_penalty = calc_gradient_penalty(D, real, fake, opt.lambda_grad, opt.device)
gradient_penalty.backward(retain_graph=False)
grads_after = []
cos_sim = []
for i, p in enumerate(D.parameters()):
grads_after.append(p.grad)
cos_sim.append(nn.CosineSimilarity(-1)(grad_d_fake[i], p.grad).mean().item())
diff_d_fake = np.mean(cos_sim)
grad_d_fake = grads_after
# Logging:
if step % 10 == 0:
wandb.log({f"D(G(z))@{current_scale}": errD_fake.item(),
f"D(x)@{current_scale}": -errD_real.item(),
f"gradient_penalty@{current_scale}": gradient_penalty.item(),
f"D_real_grad@{current_scale}": diff_d_real,
f"D_fake_grad@{current_scale}": diff_d_fake,
},
step=step, sync=False)
optimizerD.step()
if opt.use_multiple_inputs:
z_opt_group[curr_inp] = z_opt
input_from_prev_scale[curr_inp] = prev_scale_results
curr_noises[curr_inp] = noise
curr_prevs[curr_inp] = prev
curr_z_prevs[curr_inp] = z_prev
############################
# (2) Update G network: maximize D(G(z))
###########################
for j in range(opt.Gsteps):
G.zero_grad()
fake = G(noise.detach(), prev.detach(), temperature=1)
output = D(fake)
errG = -output.mean()
errG.backward(retain_graph=False)
grads_after = []
cos_sim = []
for i, p in enumerate(G.parameters()):
grads_after.append(p.grad)
cos_sim.append(nn.CosineSimilarity(-1)(grad_g[i], p.grad).mean().item())
diff_g = np.mean(cos_sim)
grad_g = grads_after
if opt.alpha != 0: # i. e. we are trying to find an exact recreation of our input in the lat space
Z_opt = opt.noise_amp * z_opt + z_prev
G_rec = G(Z_opt.detach(), z_prev, temperature=1)
rec_loss = opt.alpha * F.mse_loss(G_rec, real)
rec_loss.backward(retain_graph=False) # TODO: Check for unexpected argument retain_graph=True
rec_loss = rec_loss.detach()
else: # We are not trying to find an exact recreation
rec_loss = torch.zeros([])
Z_opt = z_opt
optimizerG.step()
# More Logging:
if step % 10 == 0:
wandb.log({f"noise_amplitude@{current_scale}": opt.noise_amp,
f"rec_loss@{current_scale}": rec_loss.item(),
f"G_grad@{current_scale}": diff_g},
step=step, sync=False, commit=True)
# Rendering and logging images of levels
if epoch % 500 == 0 or epoch == (opt.niter - 1):
token_list = opt.token_list
to_level = one_hot_to_blockdata_level
try:
subprocess.call(["wine", '--version'])
real_scaled = to_level(real.detach(), token_list, opt.block2repr, opt.repr_type)
# Minecraft World
worldname = 'Curr_Empty_World'
clear_empty_world(opt.output_dir, worldname) # reset tmp world
to_render = [real_scaled, to_level(fake.detach(), token_list, opt.block2repr, opt.repr_type),
to_level(G(Z_opt.detach(), z_prev), token_list, opt.block2repr, opt.repr_type)]
render_names = [f"real@{current_scale}", f"G(z)@{current_scale}", f"G(z_opt)@{current_scale}"]
obj_pth = os.path.join(opt.out_, f"objects/{current_scale}")
os.makedirs(obj_pth, exist_ok=True)
for n, level in enumerate(to_render):
pos = n * (level.shape[0] + 5)
save_level_to_world(opt.output_dir, worldname, (pos, 0, 0), level, token_list, opt.props)
curr_coords = [[pos, pos + real_scaled.shape[0]],
[0, real_scaled.shape[1]],
[0, real_scaled.shape[2]]]
render_pth = render_minecraft(worldname, curr_coords, obj_pth, render_names[n])
wandb.log({render_names[n]: wandb.Object3D(open(render_pth))}, commit=False)
except OSError:
pass
# Learning Rate scheduler step
schedulerD.step()
schedulerG.step()
# Save networks
if opt.use_multiple_inputs:
z_opt = z_opt_group
torch.save(z_opt, "%s/z_opt.pth" % opt.outf)
save_networks(G, D, z_opt, opt)
wandb.save(opt.outf)
return z_opt, input_from_prev_scale, G
| [
11748,
28686,
198,
11748,
850,
14681,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
6738,
28034,
13,
20471,
13,
45124... | 1.927007 | 7,124 |
import shutil
import subprocess
from pathlib import Path
from appimagelint.models import TestResult
from ..models import AppImage
from . import CheckBase
| [
11748,
4423,
346,
198,
11748,
850,
14681,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
598,
48466,
417,
600,
13,
27530,
1330,
6208,
23004,
198,
6738,
11485,
27530,
1330,
2034,
5159,
198,
6738,
764,
1330,
6822,
14881,
628
] | 4 | 39 |
#!/usr/bin/env python
"""Plot all graph metrics as histograms"""
import itertools
import sys
import operator
import numpy as np
import argparse
from general_seq import conv
from general_seq import seq_IO
from plot import conv as pconv
from plot import hist
from collections import Counter
import matplotlib.pyplot as plt
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument ('--list_nodes', '-d', nargs=2, action='append', help="text file which contains sequences and the label you want to use for the set")
parser.add_argument ('--output_prefix', help='output file prefix')
parser.add_argument ('--metric', default="metrics", help='name of metric to plot. To plot all metrics, input metrics')
args = parser.parse_args()
main(args.list_nodes, args.output_prefix, args.metric)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
43328,
477,
4823,
20731,
355,
1554,
26836,
37811,
198,
11748,
340,
861,
10141,
198,
11748,
25064,
198,
11748,
10088,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1822,
29572... | 3.33463 | 257 |
# Left side navigator
import __global__,os
import PyQt5.QtCore as QtCore
import PyQt5.QtGui as QtGui
import PyQt5.QtWidgets as QtWidgets
NAVIGATION_STYLE_SHEET="""
QWidget
{
color: #1f1f1f;
background-color: #1f1f1f;
}
QPushButton
{
font-size: 32px;
border: none;
padding: 0px;
font-size: 32px;
padding-left: 0px;
padding-right: 0px;
background-color: #2f2f2f;
}
QPushButton:hover
{
background-color: #5f5f5f;
}
QLabel
{
background-color: transparent;
}
"""
NAVIGATION_STYLE_SHEET_ACTIVATED="""
QPushButton
{
font-size: 32px;
border: none;
padding: 0px;
font-size: 32px;
padding-left: 0px;
padding-right: 0px;
background-color: #5f5f5f;
}
QPushButton:hover
{
background-color: #5f5f5f;
}
QLabel
{
background-color: transparent;
}
"""
# clicked = QtCore.pyqtSignal()
# def paintEvent(self,event):
# painter = QtGui.QPainter(self)
# width = self.width()
# height = self.height()
# if self.__highlight: painter.fillRect(0,0,width,height,self.__highlightbrush)
# else : painter.fillRect(0,0,width,height,self.__brush)
# Internal Functions | [
2,
9578,
1735,
20436,
1352,
220,
198,
11748,
11593,
20541,
834,
11,
418,
198,
198,
11748,
9485,
48,
83,
20,
13,
48,
83,
14055,
220,
220,
220,
355,
33734,
14055,
198,
11748,
9485,
48,
83,
20,
13,
48,
83,
8205,
72,
220,
220,
220,
... | 2.099476 | 573 |
# -*- coding: utf-8 -*-
""" Exceptions that are used by SoCo """
class SoCoException(Exception):
""" base exception raised by SoCo, containing the UPnP error code """
class UnknownSoCoException(SoCoException):
""" raised if reason of the error can not be extracted
The exception object will contain the raw response sent back from the
speaker """
class SoCoUPnPException(SoCoException):
""" encapsulates UPnP Fault Codes raised in response to actions sent over
the network """
class CannotCreateDIDLMetadata(SoCoException):
""" Raised if a data container class cannot create the DIDL metadata due to
missing information
"""
class UnknownXMLStructure(SoCoException):
"""Raised if XML with and unknown or unexpected structure is returned"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
1475,
11755,
326,
389,
973,
416,
1406,
7222,
37227,
628,
198,
4871,
1406,
7222,
16922,
7,
16922,
2599,
198,
220,
220,
220,
37227,
2779,
6631,
4376,
416,
1406... | 3.58371 | 221 |
from chia_rs import Spend, SpendBundleConditions
__all__ = ["Spend", "SpendBundleConditions"]
| [
6738,
442,
544,
62,
3808,
1330,
48293,
11,
48293,
33,
31249,
25559,
1756,
198,
198,
834,
439,
834,
796,
14631,
4561,
437,
1600,
366,
4561,
437,
33,
31249,
25559,
1756,
8973,
198
] | 2.96875 | 32 |
# Copyright 2002 by Andrew Dalke. All rights reserved.
# Revisions 2007-2008 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Note that BioSQL (including the database schema and scripts) is
# available and licensed separately. Please consult www.biosql.org
_dbutils = {}
# Disabled: better safe than sorry
## def next_id(self, cursor, table):
## # XXX brain-dead! Hopefully, the database will enforce PK unicity..
## table = self.tname(table)
## sql = r"select 1+max(%s_id) from %s" % (table, table)
## cursor.execute(sql)
## rv = cursor.fetchone()
## return rv[0]
_dbutils["MySQLdb"] = Mysql_dbutils
_dbutils["psycopg"] = Psycopg_dbutils
_dbutils["psycopg2"] = Psycopg2_dbutils
class Pgdb_dbutils(Generic_dbutils):
"""Add support for pgdb in the PyGreSQL database connectivity package.
"""
_dbutils["pgdb"] = Pgdb_dbutils
| [
2,
15069,
6244,
416,
6858,
12348,
365,
13,
220,
1439,
2489,
10395,
13,
198,
2,
5416,
3279,
4343,
12,
11528,
416,
5613,
23769,
13,
198,
2,
770,
2438,
318,
636,
286,
262,
8436,
404,
7535,
6082,
290,
21825,
416,
663,
198,
2,
5964,
13... | 2.698953 | 382 |
from selenium.webdriver.common.by import By
| [
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
628
] | 3.214286 | 14 |
from .ConfigManager import ConfigManager
from .ConfigManagerCorona import ConfigManagerCorona
from .JsonFunction import JsonFunction | [
6738,
764,
16934,
13511,
1330,
17056,
13511,
198,
6738,
764,
16934,
13511,
10606,
4450,
1330,
17056,
13511,
10606,
4450,
198,
6738,
764,
41,
1559,
22203,
1330,
449,
1559,
22203
] | 4.551724 | 29 |
"""General helper classes and functions for all products."""
import netCDF4
import cloudnetpy.utils as utils
class CategorizeBits:
"""Class holding information about category and quality bits.
Args:
categorize_file (str): Categorize file name.
Attributes:
category_bits (dict): Dictionary containing boolean fields for `droplet`,
`falling`, `cold`, `melting`, `aerosol`, `insect`.
quality_bits (dict): Dictionary containing boolean fields for `radar`,
`lidar`, `clutter`, `molecular`, `attenuated`, `corrected`.
"""
category_keys = ('droplet', 'falling', 'cold', 'melting', 'aerosol',
'insect')
quality_keys = ('radar', 'lidar', 'clutter', 'molecular', 'attenuated',
'corrected')
def _read_bits(self, bit_type):
""" Converts bitfield into dictionary."""
nc = netCDF4.Dataset(self._categorize_file)
bitfield = nc.variables[f"{bit_type}_bits"][:]
keys = getattr(CategorizeBits, f"{bit_type}_keys")
bits = {key: utils.isbit(bitfield, i) for i, key in enumerate(keys)}
nc.close()
return bits
class ProductClassification(CategorizeBits):
"""Base class for creating different classifications in the child classes
of various Cloudnet products. Child of CategorizeBits class.
Args:
categorize_file (str): Categorize file name.
Attributes:
is_rain (ndarray): 1D array denoting rainy profiles.
is_undetected_melting (ndarray): 1D array denoting profiles which should
contain melting layer but was not detected from the data.
"""
def read_nc_fields(nc_file, names):
"""Reads selected variables from a netCDF file.
Args:
nc_file (str): netCDF file name.
names (str/list): Variables to be read, e.g. 'temperature' or
['ldr', 'lwp'].
Returns:
ndarray/list: Array in case of one variable passed as a string.
List of arrays otherwise.
"""
names = [names] if isinstance(names, str) else names
nc = netCDF4.Dataset(nc_file)
data = [nc.variables[name][:] for name in names]
nc.close()
return data[0] if len(data) == 1 else data
def interpolate_model(cat_file, names):
"""Interpolates 2D model field into dense Cloudnet grid.
Args:
cat_file (str): Categorize file name.
names (str/list): Model variable to be interpolated, e.g.
'temperature' or ['temperature', 'pressure'].
Returns:
ndarray/list: Array in case of one variable passed as a string.
List of arrays otherwise.
"""
names = [names] if isinstance(names, str) else names
data = [_interp_field(name) for name in names]
return data[0] if len(data) == 1 else data
| [
37811,
12218,
31904,
6097,
290,
5499,
329,
477,
3186,
526,
15931,
198,
11748,
2010,
34,
8068,
19,
198,
11748,
6279,
3262,
9078,
13,
26791,
355,
3384,
4487,
628,
198,
4871,
327,
47467,
1096,
33,
896,
25,
198,
220,
220,
220,
37227,
9487... | 2.539367 | 1,105 |
from typing import Optional, Dict, List
#: The redlock caches
INIESTA_CACHES: Dict[str, dict] = {
"iniesta1": {"HOST": "localhost", "PORT": 6379, "DATABASE": 1},
"iniesta2": {"HOST": "localhost", "PORT": 6379, "DATABASE": 2},
"iniesta3": {"HOST": "localhost", "PORT": 6379, "DATABASE": 3},
}
#: The initialization type Iniesta will be initialized with.
INIESTA_INITIALIZATION_TYPE: tuple = tuple()
# ["SNS_PRODUCER", "EVENT_POLLING", "QUEUE_POLLING", "CUSTOM"]
#: The topic arn for the SNS that will receive messages.
INIESTA_SNS_PRODUCER_GLOBAL_TOPIC_ARN: str = None
#: The number of messages to receive while polling. Value between 0-10
INIESTA_SQS_RECEIVE_MESSAGE_MAX_NUMBER_OF_MESSAGES: int = 10
#: The time to wait between receiving SQS messages. A value between 0-20 (0 for short polling).
INIESTA_SQS_RECEIVE_MESSAGE_WAIT_TIME_SECONDS: int = 20
# possible filters:
# if ends with ".*" then filter is concerted to prefix
# reference: https://docs.aws.amazon.com/sns/latest/dg/sns-subscription-filter-policies.html
#: The filters you would like for your application's queue to filter for.
INIESTA_SQS_CONSUMER_FILTERS: List[str] = []
#: If you would like to verify the filter policies on AWS match the filter policies declared in your application.
INIESTA_ASSERT_FILTER_POLICIES: bool = True
#: The event key that will be filtered.
INIESTA_SNS_EVENT_KEY: str = "iniesta_pass"
#: The default sqs queue name
INIESTA_SQS_QUEUE_NAME: Optional[str] = None
#: The SQS queue name template, if you have a normalized queue naming scheme.
INIESTA_SQS_QUEUE_NAME_TEMPLATE: str = "iniesta-{env}-{service_name}"
#: The retry count for attempting to acquire a lock.
INIESTA_LOCK_RETRY_COUNT: int = 1
#: The lock timeout for the message. Will release after defined value.
INIESTA_LOCK_TIMEOUT: int = 10
# mainly used for tests
# INIESTA_SQS_REGION_NAME: Optional[str] = None
INIESTA_SQS_ENDPOINT_URL: Optional[str] = None
#
# INIESTA_SNS_REGION_NAME: Optional[str] = None
INIESTA_SNS_ENDPOINT_URL: Optional[str] = None
INIESTA_DRY_RUN: bool = False
#: Your AWS Access Key if it is different from other access keys.
INIESTA_AWS_ACCESS_KEY_ID = None
#: Your AWS Secret Access Key if it is different from other access keys.
INIESTA_AWS_SECRET_ACCESS_KEY = None
#: Your AWS Default Region if it is iniesta specific
INIESTA_AWS_DEFAULT_REGION: Optional[str] = None
| [
6738,
19720,
1330,
32233,
11,
360,
713,
11,
7343,
198,
198,
2,
25,
383,
2266,
5354,
50177,
198,
1268,
11015,
5603,
62,
34,
16219,
1546,
25,
360,
713,
58,
2536,
11,
8633,
60,
796,
1391,
198,
220,
220,
220,
366,
5362,
18059,
16,
129... | 2.732491 | 871 |
"""The main API module. """
from __future__ import absolute_import, print_function
from .compiler import State, SplitState, Match, compile
from .tokenizer import to_postfix
def match(pattern, s):
"""Apply a pattern to a string and return the result of the match.
:param pattern: A POSIX-like regular expression.
:type pattern: str
:s: A string to match.
:type s: str
:returns: True if matches, False otherwise.
:rtype: bool
:raises: :py:class:`~MalformedRegex` if the regular expression is
malformed.
"""
postfix = to_postfix(pattern)
state = compile(postfix)
current_states = set()
update_states(current_states, state)
for c in s:
current_states = make_step(current_states, c)
return Match in current_states
| [
37811,
464,
1388,
7824,
8265,
13,
37227,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
3601,
62,
8818,
198,
6738,
764,
5589,
5329,
1330,
1812,
11,
27758,
9012,
11,
13225,
11,
17632,
198,
6738,
764,
30001,
7509,
1330,
284,
6... | 2.908425 | 273 |
from typing import List, Union
from ornitho import APIRequester
| [
6738,
19720,
1330,
7343,
11,
4479,
198,
198,
6738,
25322,
342,
78,
1330,
7824,
16844,
7834,
628
] | 3.882353 | 17 |
#!/usr/bin/python3
# coding=utf-8
__version__ = "3.0.0-dev"
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
19617,
28,
40477,
12,
23,
198,
198,
834,
9641,
834,
796,
366,
18,
13,
15,
13,
15,
12,
7959,
1,
198
] | 1.967742 | 31 |
import time
from distask import util
from distask.tiggers.base import Tigger
class IntervalTigger(Tigger):
'''每隔相同的时间会触发'''
def __getstate__(self):
"""Return state values to be pickled."""
return (self.microseconds)
def __setstate__(self, state):
"""Restore state from the unpickled state values."""
self.microseconds = state
| [
11748,
640,
198,
198,
6738,
1233,
2093,
1330,
7736,
198,
6738,
1233,
2093,
13,
83,
328,
5355,
13,
8692,
1330,
14189,
1362,
628,
198,
4871,
4225,
2100,
51,
15249,
7,
51,
15249,
2599,
198,
220,
220,
220,
705,
7061,
162,
107,
237,
4969... | 2.354037 | 161 |
import gym
from monte_carlo import Agent
if __name__=='__main__':
env=gym.make("Blackjack-v0")
agent=Agent()
n_episodes=500000
for i in range(n_episodes):
if i%50000==0:
print(f'starting episode {i}')
observation=env.reset()
done=False
while not done:
#choose an action based on the policy
action=agent.policy(observation)
# take the action
observation_, reward, done, info= env.step(action)
agent.memory.append((observation, reward))
observation=observation_
agent.update_V()
print(agent.V[(21, 3, True)])
print(agent.V[(4, 1, False)])
| [
11748,
11550,
201,
198,
6738,
937,
660,
62,
7718,
5439,
1330,
15906,
201,
198,
201,
198,
361,
11593,
3672,
834,
855,
6,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
17365,
28,
1360,
76,
13,
15883,
7203,
9915,
19650,
12,
85,
15,
... | 2.011331 | 353 |
#!/bin/python3
# Copyright (C) 2020 Matheus Fernandes Bigolin <mfrdrbigolin@disroot.org>
# SPDX-License-Identifier: MIT
"""Day Seven, Handy Haversacks."""
# I had to postpone this day because I was not aware of the techniques of
# graph theory to solve this problem.
from sys import argv
from re import findall
from utils import open_file, arrange, usage_and_exit, transfiged, dictf, \
merge
def edges(graph):
"""Return the edges of a <graph>."""
edge = []
for vertex in graph:
for neighbour in graph[vertex]:
edge.append((vertex, neighbour))
return edge
def solve1(bags, elem):
"""Return a set of ancestors of <elem> in the graph <bags>."""
have = set()
for edge in edges(bags):
if edge[1] == elem:
have |= solve1(bags, edge[0]) | {edge[0]}
return have
def solve2(bags, elem):
"""Return the cumulative weight of elements from <elem> in <bags>."""
count = 0
for edge in edges(bags):
if edge[0] == elem:
count += edge[1][0] * solve2(bags, edge[1][1]) + edge[1][0]
return count
# Capture the bag name and its contents (ignoring weights).
UNWEIGHTED_REG = r"(?:^|\d+ ?)(.+?) bags?"
# Capture the bag's contents and its weights.
WEIGHTED_REG = r"(\d+) (.+?) bags?"
# Capture the bag name.
VERTEX_REG = r"^(.+?) bags"
if __name__ == "__main__":
usage_and_exit(len(argv) != 2)
arranged_data = arrange(open_file(argv[1]))
unweighted_data = merge([dictf(findall(UNWEIGHTED_REG, f))
for f in arranged_data])
weighted_data = merge([dictf(findall(VERTEX_REG, f) + transfiged
(findall(WEIGHTED_REG, f), (int, str)))
for f in arranged_data])
print(len(solve1(unweighted_data, "shiny gold")))
print(solve2(weighted_data, "shiny gold"))
| [
2,
48443,
8800,
14,
29412,
18,
198,
198,
2,
15069,
357,
34,
8,
12131,
6550,
258,
385,
26366,
274,
4403,
24910,
1279,
76,
69,
4372,
81,
14261,
24910,
31,
6381,
15763,
13,
2398,
29,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,... | 2.389527 | 783 |
import signal
from time import sleep
from nerddiary.asynctools.delayedsignal import DelayedKeyboardInterrupt
| [
11748,
6737,
198,
6738,
640,
1330,
3993,
198,
198,
6738,
17156,
1860,
8042,
13,
292,
2047,
310,
10141,
13,
40850,
5379,
570,
282,
1330,
4216,
16548,
9218,
3526,
9492,
3622,
628,
198
] | 3.5 | 32 |
# ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import os
import torch
from torch.utils.data import DataLoader
import datasets
import util.misc as utils
import datasets.samplers as samplers
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch, viz
from models import build_model
from models.backbone import build_swav_backbone, build_swav_backbone_old
from util.default_args import set_model_defaults, get_args_parser
PRETRAINING_DATASETS = ['imagenet', 'imagenet100', 'coco_pretrain', 'airbus_pretrain']
if __name__ == '__main__':
parser = argparse.ArgumentParser('Deformable DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
set_dataset_path(args)
set_model_defaults(args)
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| [
2,
16529,
982,
198,
2,
1024,
687,
540,
38267,
49,
198,
2,
15069,
357,
66,
8,
12131,
24956,
7575,
13,
1439,
6923,
33876,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
685,
3826,
38559,
24290,
329,
3307,
60,
198... | 3.554217 | 415 |
from ..utils import action, results_formatter
from functools import partial
import arep
import pytest
import os
results_formatter = partial(results_formatter, name=os.path.basename(__file__))
results_with_elif = results_formatter({
(2, 0), (13, 0)
})
results_with_else = results_formatter({
(2, 0), (11, 5)
})
results_is_ifexp = results_formatter({
(11, 5)
})
results_in_comprehensions = results_formatter({
(18, 10), (20, 11), (23, 4), (23, 16)
})
misc_results = results_formatter({
(30, 4)
})
all_results = (misc_results | results_with_elif | results_with_else |
results_is_ifexp | results_in_comprehensions)
@pytest.fixture
@pytest.mark.parametrize(('elif_'), [True, False, None])
@pytest.mark.parametrize(('else_'), [True, False, None])
@pytest.mark.parametrize(('ifexp'), [True, False, None])
@pytest.mark.parametrize(('consideration'), [True, None])
| [
6738,
11485,
26791,
1330,
2223,
11,
2482,
62,
687,
1436,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
11748,
389,
79,
198,
11748,
12972,
9288,
198,
11748,
28686,
198,
198,
43420,
62,
687,
1436,
796,
13027,
7,
43420,
62,
687,
1436,
1... | 2.512535 | 359 |
## 1. Sample Space ##
coin_toss_omega = {'TH','HT','HH','TT'}
## 2. Probability of Events ##
p_sum_6 = 5/ 36
p_lower_15 = 36/ 36
p_greater_13 = 0/ 36
## 3. Certain and Impossible Events ##
p_2_or_4 = 4/ 36
p_12_or_13 = 1/ 36
## 4. The Addition Rule ##
p_5_or_9 = (4/36) + (4/36)
p_even_or_less_2 = (18/36) + (0/36)
p_4_or_3_multiple = 3/36 + 12/36
## 5. Venn Diagrams ##
p_c = 3/6
p_d = 3/6
p_c_d_addition = p_c + p_d
p_c_d_formula = 4/6
print(p_c_d_addition)
print(p_c_d_formula)
## 6. Exceptions to the Addition Rule ##
p_f_or_t = 0.26 + 0.11 - 0.03
## 7. Mutually Exclusive Events ##
p_h_and_c = 0.08 + 0.11 - 0.17
## 8. Set Notation ##
operation_1 = False
operation_2 = True
operation_3 = False
operation_4 = True | [
2235,
352,
13,
27565,
4687,
22492,
198,
198,
3630,
62,
83,
793,
62,
462,
4908,
796,
1391,
6,
4221,
41707,
6535,
41707,
16768,
41707,
15751,
6,
92,
198,
198,
2235,
362,
13,
30873,
1799,
286,
18715,
22492,
198,
198,
79,
62,
16345,
62,... | 2.032967 | 364 |
Funcao(1,2,3, 'fabio')
'''
a utilização do args serve para poder passa mais de um parametro cqunado declaramos a função ou uma classe
sempre utilizamos * ou ** para transformar em tupla ou dicionario
'''
F1(nome ='Fabio',idade=25)
| [
198,
37,
19524,
5488,
7,
16,
11,
17,
11,
18,
11,
705,
36434,
952,
11537,
198,
7061,
6,
198,
64,
7736,
23638,
16175,
28749,
466,
26498,
4691,
31215,
279,
12342,
1208,
64,
285,
15152,
390,
23781,
5772,
316,
305,
269,
80,
403,
4533,
... | 2.306931 | 101 |
# A simple logger to export events to omnisci
from __future__ import absolute_import, division
import geoip2.database
import pymapd as pmd
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
"""
OmniSciDB Output
"""
| [
2,
317,
2829,
49706,
284,
10784,
2995,
284,
22284,
271,
979,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
198,
198,
11748,
40087,
541,
17,
13,
48806,
198,
198,
11748,
279,
4948,
499,
67,
355,
9114,
67,
198,
19... | 3.104762 | 105 |
import datetime
import sys
import pprint
date_start = datetime.date(2015, 10, 5)
date_end = datetime.date(2016, 2, 8)
date_delta = datetime.timedelta(days=1)
whatsapp_file = sys.argv[1]
with open(whatsapp_file, 'r') as fp:
lines = fp.readlines()
day_count = {}
while date_start <= date_end:
date_start += date_delta
day_count[date_start.strftime("%d/%m/%Y")] = 0
line_count = 0
for line in lines:
line_count += 1
try:
day_count[line[0:10]] += 1
except:
pass
max_count = max(day_count.values())
max_key = max(day_count, key=lambda k: day_count[k])
print("Maximum messages: ", max_count, " on date: ", max_key)
print("Total line count = ", line_count)
print("Total days = ", len(day_count))
| [
11748,
4818,
8079,
198,
11748,
25064,
198,
11748,
279,
4798,
198,
198,
4475,
62,
9688,
796,
4818,
8079,
13,
4475,
7,
4626,
11,
838,
11,
642,
8,
198,
4475,
62,
437,
796,
4818,
8079,
13,
4475,
7,
5304,
11,
362,
11,
807,
8,
198,
44... | 2.396104 | 308 |
test = {
'name': 'question 3.4',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> type(my_factorial) == types.FunctionType
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> len(param) # wrong number of argument
1
""",
'hidden': False,
'locked': False
}
],
'scored': False,
'setup': 'import types; import inspect; param = inspect.signature(my_factorial).parameters',
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> my_factorial(0)==math.factorial(0)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> my_factorial(1)==math.factorial(1)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> my_factorial(42)==math.factorial(42)
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': 'import math',
'teardown': '',
'type': 'doctest'
}
]
}
| [
9288,
796,
1391,
198,
220,
705,
3672,
10354,
705,
25652,
513,
13,
19,
3256,
198,
220,
705,
13033,
10354,
352,
11,
198,
220,
705,
2385,
2737,
10354,
685,
198,
220,
220,
220,
1391,
198,
220,
220,
220,
220,
220,
705,
33964,
10354,
685,... | 1.719577 | 756 |
from django.urls import path
from .api.views import link_search_view, link_view
app_name = "links"
urlpatterns = [
path("", view=link_view),
path("search", link_search_view),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
15042,
13,
33571,
1330,
2792,
62,
12947,
62,
1177,
11,
2792,
62,
1177,
198,
198,
1324,
62,
3672,
796,
366,
28751,
1,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
... | 2.671429 | 70 |
import os
import stat
import sys
from .base import TempAppDirTestCase
from http_prompt import xdg
| [
11748,
28686,
198,
11748,
1185,
198,
11748,
25064,
198,
198,
6738,
764,
8692,
1330,
24189,
4677,
35277,
14402,
20448,
198,
6738,
2638,
62,
16963,
457,
1330,
2124,
67,
70,
628
] | 3.333333 | 30 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms.functional import to_tensor, to_pil_image
from captcha.image import ImageCaptcha
from tqdm import tqdm
import random
import numpy as np
from collections import OrderedDict
| [
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
201,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
11,
6060,
17401,
201,
198,
6738,
28034,
10178,
1... | 3.237624 | 101 |
import discord, os
from discord.ext import commands
from utils import checks, output, parsing
from aiohttp import ClientSession
import urllib.request
import json
| [
11748,
36446,
11,
28686,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
3384,
4487,
1330,
8794,
11,
5072,
11,
32096,
198,
6738,
257,
952,
4023,
1330,
20985,
36044,
198,
11748,
2956,
297,
571,
13,
25927,
198,
11748,
33918,
628
] | 4.075 | 40 |
# Author : Andreas Mussgiller
# Date : July 1st, 2010
# last update: $Date: 2010/03/17 18:17:34 $ by $Author: mussgill $
import FWCore.ParameterSet.Config as cms
#_________________________________HLT bits___________________________________________
import HLTrigger.HLTfilters.hltHighLevel_cfi
ALCARECOTkAlCosmicsInCollisionsHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
andOr = True, ## choose logical OR between Triggerbits
eventSetupPathsKey = 'TkAlCosmicsInCollisions',
throw = False # tolerate triggers not available
)
# DCS partitions
# "EBp","EBm","EEp","EEm","HBHEa","HBHEb","HBHEc","HF","HO","RPC"
# "DT0","DTp","DTm","CSCp","CSCm","CASTOR","TIBTID","TOB","TECp","TECm"
# "BPIX","FPIX","ESp","ESm"
import DPGAnalysis.Skims.skim_detstatus_cfi
ALCARECOTkAlCosmicsInCollisionsDCSFilter = DPGAnalysis.Skims.skim_detstatus_cfi.dcsstatus.clone(
DetectorType = cms.vstring('TIBTID','TOB','TECp','TECm','BPIX','FPIX'),
ApplyFilter = cms.bool(True),
AndOr = cms.bool(True),
DebugOn = cms.untracked.bool(False)
)
#_________________________ Cosmic During Collisions__________________________________
from RecoTracker.SpecialSeedGenerators.cosmicDC_cff import *
#________________________________Track selection____________________________________
# AlCaReco for track based alignment using Cosmic muons reconstructed by Cosmics during collisions
import Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi
ALCARECOTkAlCosmicsInCollisions = Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone(
src = 'cosmicDCTracks',
filter = True,
applyBasicCuts = True,
ptMin = 0., ##10
ptMax = 99999.,
pMin = 4., ##10
pMax = 99999.,
etaMin = -99., ##-2.4 keep also what is going through...
etaMax = 99., ## 2.4 ...both TEC with flat slope
nHitMin = 7,
nHitMin2D = 2,
chi2nMax = 999999.,
applyMultiplicityFilter = False,
applyNHighestPt = True, ## select only highest pT track
nHighestPt = 1
)
#________________________________Sequences____________________________________
seqALCARECOTkAlCosmicsInCollisions = cms.Sequence(cosmicDCTracksSeq*ALCARECOTkAlCosmicsInCollisionsHLT+ALCARECOTkAlCosmicsInCollisionsDCSFilter+ALCARECOTkAlCosmicsInCollisions)
| [
2,
6434,
220,
220,
220,
220,
1058,
33728,
43309,
70,
4665,
198,
2,
7536,
220,
220,
220,
220,
220,
220,
1058,
2901,
352,
301,
11,
3050,
198,
2,
938,
4296,
25,
720,
10430,
25,
3050,
14,
3070,
14,
1558,
1248,
25,
1558,
25,
2682,
72... | 2.77354 | 839 |
# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>
import argparse
import glob
import json
import logging
import os
import pathlib
from typing import Any, Dict, List, TextIO, Union
import motmetrics as mm
import numpy as np
from argoverse.evaluation.eval_utils import get_pc_inside_bbox, label_to_bbox, leave_only_roi_region
from argoverse.utils.json_utils import read_json_file
from argoverse.utils.ply_loader import load_ply
from argoverse.utils.se3 import SE3
from argoverse.utils.transform import quat2rotmat
min_point_num = 0
mh = mm.metrics.create()
logger = logging.getLogger(__name__)
_PathLike = Union[str, "os.PathLike[str]"]
def in_distance_range_pose(ego_center: np.ndarray, pose: np.ndarray, d_min: float, d_max: float) -> bool:
"""Determine if a pose is within distance range or not.
Args:
ego_center: ego center pose (zero if bbox is in ego frame).
pose: pose to test.
d_min: minimum distance range
d_max: maximum distance range
Returns:
A boolean saying if input pose is with specified distance range.
"""
dist = float(np.linalg.norm(pose[0:2] - ego_center[0:2]))
return dist > d_min and dist < d_max
def get_distance(x1: np.ndarray, x2: np.ndarray, name: str) -> float:
"""Get the distance between two poses, returns nan if distance is larger than detection threshold.
Args:
x1: first pose
x2: second pose
name: name of the field to test
Returns:
A distance value or NaN
"""
dist = float(np.linalg.norm(x1[name][0:2] - x2[name][0:2]))
return dist if dist < 2.25 else float(np.nan)
def eval_tracks(
path_tracker_output: str,
path_dataset: _PathLike,
d_min: float,
d_max: float,
out_file: TextIO,
centroid_method: str,
) -> None:
"""Evaluate tracking output.
Args:
path_tracker_output: path to tracker output
path_dataset: path to dataset
d_min: minimum distance range
d_max: maximum distance range
out_file: output file object
centroid_method: method for ground truth centroid estimation
"""
acc = mm.MOTAccumulator(auto_id=True)
path_track_data = sorted(glob.glob(os.fspath(path_tracker_output) + "/*"))
log_id = pathlib.Path(path_dataset).name
logger.info("log_id = %s", log_id)
city_info_fpath = f"{path_dataset}/city_info.json"
city_info = read_json_file(city_info_fpath)
city_name = city_info["city_name"]
logger.info("city name = %s", city_name)
ID_gt_all: List[str] = []
for ind_frame in range(len(path_track_data)):
if ind_frame % 50 == 0:
logger.info("%d/%d" % (ind_frame, len(path_track_data)))
timestamp_lidar = int(path_track_data[ind_frame].split("/")[-1].split("_")[-1].split(".")[0])
path_gt = os.path.join(
path_dataset, "per_sweep_annotations_amodal", f"tracked_object_labels_{timestamp_lidar}.json"
)
if not os.path.exists(path_gt):
logger.warning("Missing ", path_gt)
continue
gt_data = read_json_file(path_gt)
pose_data = read_json_file(f"{path_dataset}/poses/city_SE3_egovehicle_{timestamp_lidar}.json")
rotation = np.array(pose_data["rotation"])
translation = np.array(pose_data["translation"])
ego_R = quat2rotmat(rotation)
ego_t = translation
egovehicle_to_city_se3 = SE3(rotation=ego_R, translation=ego_t)
pc_raw0 = load_ply(os.path.join(path_dataset, f"lidar/PC_{timestamp_lidar}.ply"))
pc_raw_roi = leave_only_roi_region(
pc_raw0, egovehicle_to_city_se3, ground_removal_method="no", city_name=city_name
)
gt: Dict[str, Dict[str, Any]] = {}
id_gts = []
for i in range(len(gt_data)):
if gt_data[i]["label_class"] != "VEHICLE":
continue
bbox, orientation = label_to_bbox(gt_data[i])
pc_segment = get_pc_inside_bbox(pc_raw_roi, bbox)
center = np.array([gt_data[i]["center"]["x"], gt_data[i]["center"]["y"], gt_data[i]["center"]["z"]])
if (
len(pc_segment) >= min_point_num
and bbox[3] > 0
and in_distance_range_pose(np.zeros(3), center, d_min, d_max)
):
track_label_uuid = gt_data[i]["track_label_uuid"]
gt[track_label_uuid] = {}
if centroid_method == "average":
gt[track_label_uuid]["centroid"] = pc_segment.sum(axis=0) / len(pc_segment)
elif centroid_method == "label_center":
gt[track_label_uuid]["centroid"] = center
else:
logger.warning("Not implemented")
gt[track_label_uuid]["bbox"] = bbox
gt[track_label_uuid]["orientation"] = orientation
if track_label_uuid not in ID_gt_all:
ID_gt_all.append(track_label_uuid)
id_gts.append(track_label_uuid)
tracks: Dict[str, Dict[str, Any]] = {}
id_tracks: List[str] = []
track_data = read_json_file(path_track_data[ind_frame])
for track in track_data:
key = track["track_label_uuid"]
if track["label_class"] != "VEHICLE" or track["height"] == 0:
continue
center = np.array([track["center"]["x"], track["center"]["y"], track["center"]["z"]])
if in_distance_range_pose(np.zeros(3), center, d_min, d_max):
tracks[key] = {}
tracks[key]["centroid"] = center
id_tracks.append(key)
dists: List[List[float]] = []
for gt_key, gt_value in gt.items():
gt_track_data: List[float] = []
dists.append(gt_track_data)
for track_key, track_value in tracks.items():
gt_track_data.append(get_distance(gt_value, track_value, "centroid"))
acc.update(id_gts, id_tracks, dists)
mh = mm.metrics.create()
summary = mh.compute(
acc,
metrics=[
"num_frames",
"mota",
"motp",
"idf1",
"mostly_tracked",
"mostly_lost",
"num_false_positives",
"num_misses",
"num_switches",
"num_fragmentations",
],
name="acc",
)
logger.info("summary = %s", summary)
num_tracks = len(ID_gt_all)
fn = os.path.basename(path_tracker_output)
num_frames = summary["num_frames"][0]
mota = summary["mota"][0] * 100
motp = summary["motp"][0]
idf1 = summary["idf1"][0]
most_track = summary["mostly_tracked"][0] / num_tracks
most_lost = summary["mostly_lost"][0] / num_tracks
num_fp = summary["num_false_positives"][0]
num_miss = summary["num_misses"][0]
num_switch = summary["num_switches"][0]
num_flag = summary["num_fragmentations"][0]
out_string = (
f"{fn} {num_frames} {mota:.2f} {motp:.2f} {idf1:.2f} {most_track:.2f} "
f"{most_lost:.2f} {num_fp} {num_miss} {num_switch} {num_flag} \n"
)
out_file.write(out_string)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--path_tracker_output",
type=str,
default="../../argodataset_30Hz/test_label/028d5cb1-f74d-366c-85ad-84fde69b0fd3",
)
parser.add_argument(
"--path_labels", type=str, default="../../argodataset_30Hz/labels_v32/028d5cb1-f74d-366c-85ad-84fde69b0fd3"
)
parser.add_argument("--path_dataset", type=str, default="../../argodataset_30Hz/cvpr_test_set")
parser.add_argument("--centroid_method", type=str, default="average", choices=["label_center", "average"])
parser.add_argument("--flag", type=str, default="")
parser.add_argument("--d_min", type=float, default=0)
parser.add_argument("--d_max", type=float, default=100, required=True)
args = parser.parse_args()
logger.info("args = %s", args)
tracker_basename = os.path.basename(args.path_tracker_output)
out_filename = f"{tracker_basename}_{args.flag}_{int(args.d_min)}_{int(args.d_max)}_{args.centroid_method}.txt"
logger.info("output file name = %s", out_filename)
with open(out_filename, "w") as out_file:
eval_tracks(args.path_tracker_output, args.path_dataset, args.d_min, args.d_max, out_file, args.centroid_method)
| [
2,
1279,
15269,
13130,
11,
943,
2188,
9552,
11,
11419,
13,
28728,
739,
262,
17168,
5964,
13,
29,
198,
11748,
1822,
29572,
198,
11748,
15095,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
3108,
8019,
198,
6738,
197... | 2.120802 | 3,990 |
from scapy.all import sniff, sendp
import struct
import sys
if __name__ == '__main__':
main()
| [
6738,
629,
12826,
13,
439,
1330,
26300,
11,
3758,
79,
198,
11748,
2878,
198,
11748,
25064,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.828571 | 35 |
"""Derivation of variable ``xco2``."""
from iris import Constraint
from ._baseclass import DerivedVariableBase
from ._shared import column_average
class DerivedVariable(DerivedVariableBase):
"""Derivation of variable ``xco2``."""
@staticmethod
def required(project):
"""Declare the variables needed for derivation."""
required = [
{'short_name': 'co2'},
{'short_name': 'hus'},
{'short_name': 'zg'},
{'short_name': 'ps'},
]
return required
@staticmethod
def calculate(cubes):
"""Calculate the column-averaged atmospheric CO2 [1e-6]."""
co2_cube = cubes.extract_cube(
Constraint(name='mole_fraction_of_carbon_dioxide_in_air'))
print(co2_cube)
hus_cube = cubes.extract_cube(Constraint(name='specific_humidity'))
zg_cube = cubes.extract_cube(Constraint(name='geopotential_height'))
ps_cube = cubes.extract_cube(Constraint(name='surface_air_pressure'))
# Column-averaged CO2
xco2_cube = column_average(co2_cube, hus_cube, zg_cube, ps_cube)
xco2_cube.convert_units('1')
return xco2_cube
| [
37811,
28532,
26939,
286,
7885,
7559,
87,
1073,
17,
15506,
526,
15931,
198,
198,
6738,
4173,
271,
1330,
1482,
2536,
2913,
198,
198,
6738,
47540,
8692,
4871,
1330,
9626,
1572,
43015,
14881,
198,
6738,
47540,
28710,
1330,
5721,
62,
23913,
... | 2.266795 | 521 |
# -*- coding: utf-8 -*-
import yaml
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
331,
43695,
628,
198
] | 1.95 | 20 |
from datetime import timedelta
import simplekv
import simplekv.memory
from flask import Flask, request, jsonify
from flask_jwt_extended import JWTManager, jwt_required, \
get_jwt_identity, revoke_token, unrevoke_token, \
get_stored_tokens, get_all_stored_tokens, create_access_token, \
create_refresh_token, jwt_refresh_token_required, get_stored_token
# Setup flask
app = Flask(__name__)
app.secret_key = 'super-secret'
# Configure access token expires time
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(minutes=5)
# Enable and configure the JWT blacklist / token revoke. We are using an in
# memory store for this example. In production, you should use something
# persistant (such as redis, memcached, sqlalchemy). See here for options:
# http://pythonhosted.org/simplekv/
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_STORE'] = simplekv.memory.DictStore()
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = 'refresh'
jwt = JWTManager(app)
@app.route('/login', methods=['POST'])
@app.route('/refresh', methods=['POST'])
@jwt_refresh_token_required
# Endpoint for listing tokens that have the same identity as you
@app.route('/auth/tokens', methods=['GET'])
@jwt_required
# Endpoint for listing all tokens. In your app, you should either not expose
# this endpoint, or put some addition security on top of it so only trusted users,
# (administrators, etc) can access it
@app.route('/auth/all-tokens')
# Endpoint for allowing users to revoke their tokens
@app.route('/auth/tokens/revoke/<string:jti>', methods=['PUT'])
@jwt_required
@app.route('/auth/tokens/unrevoke/<string:jti>', methods=['PUT'])
@jwt_required
@app.route('/protected', methods=['GET'])
@jwt_required
if __name__ == '__main__':
app.run()
| [
6738,
4818,
8079,
1330,
28805,
12514,
198,
198,
11748,
2829,
74,
85,
198,
11748,
2829,
74,
85,
13,
31673,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
33918,
1958,
198,
198,
6738,
42903,
62,
73,
46569,
62,
2302,
1631,
1330,
449,
39386... | 2.858995 | 617 |
n = input( "Name: ")
print(f"My name is {n}") | [
77,
796,
5128,
7,
366,
5376,
25,
366,
8,
198,
4798,
7,
69,
1,
3666,
1438,
318,
1391,
77,
92,
4943
] | 2.142857 | 21 |
"""Testing TcEx Input module field types."""
# standard library
from typing import TYPE_CHECKING, Dict, List, Optional, Union
# third-party
import pytest
from pydantic import BaseModel, validator
# first-party
from tcex.input.field_types import String, always_array, conditional_required, string
from tcex.pleb.scoped_property import scoped_property
from tests.input.field_types.utils import InputTest
if TYPE_CHECKING:
# first-party
from tests.mock_app import MockApp
# pylint: disable=no-self-argument, no-self-use
class TestInputsFieldTypes(InputTest):
"""Test TcEx String Field Model Tests."""
def setup_method(self):
"""Configure setup before all tests."""
scoped_property._reset()
@pytest.mark.parametrize(
'input_value,expected,optional,fail_test',
[
#
# Pass Testing
#
# required, normal input
('string', 'string', False, False),
# required, empty input
('', '', False, False),
# optional, empty input
('', '', True, False),
# optional, null input
(None, None, True, False),
#
# Fail Testing
#
# required, null input
(None, None, False, True),
],
)
def test_field_model_string_input(
self,
input_value: str,
expected: str,
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: String
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Optional[String]
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type='String',
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
(
'input_value,expected,allow_empty,conditional_required_rules,'
'max_length,min_length,regex,optional,fail_test'
),
[
#
# Pass Testing
#
# required, normal input
('string', 'string', True, None, None, None, None, False, False),
# required, empty input
('', '', True, None, None, None, None, False, False),
# optional, empty input
('', '', True, None, None, None, None, True, False),
# optional, null input
(None, None, True, None, None, None, None, True, False),
# required, normal input, max_length=10
('string', 'string', True, None, 10, None, None, False, False),
# optional, normal input, max_length=10
('string', 'string', True, None, 10, None, None, True, False),
# required, normal input, min_length=2
('string', 'string', True, None, None, 2, None, False, False),
# optional, normal input, min_length=2
('string', 'string', True, None, None, 2, None, True, False),
# required, normal input, regex=string
('string', 'string', True, None, None, None, r'^string$', True, False),
# optional, null input, conditional_required=True
(
None,
None,
True,
[{'field': 'conditional', 'op': 'eq', 'value': 'optional'}],
None,
None,
None,
True,
False,
),
#
# Fail Testing
#
# required, null input
(None, None, True, None, None, None, None, False, True),
# required, empty input, allow_empty=False
('', None, False, None, None, None, None, False, True),
# required, empty input, conditional_required=True
(
'',
'string',
True,
[{'field': 'conditional', 'op': 'eq', 'value': 'required'}],
None,
None,
None,
False,
True,
),
# required, null input, conditional_required=True
(
None,
'string',
True,
[{'field': 'conditional', 'op': 'eq', 'value': 'required'}],
None,
None,
None,
False,
True,
),
# required, normal input, max_length=2
('string', 'string', True, None, 2, None, None, False, True),
# optional, normal input, max_length=2
('string', 'string', True, None, 2, None, None, True, True),
# required, normal input, min_length=10
('string', 'string', True, None, None, 10, None, False, True),
# optional, normal input, min_length=10
('string', 'string', True, None, None, 10, None, True, True),
# required, normal input, regex=string
('string', 'string', True, None, None, None, r'^string-extra$', True, True),
],
)
def test_field_model_string_custom_input(
self,
input_value: str,
expected: str,
allow_empty: bool,
conditional_required_rules: Optional[List[Dict[str, str]]],
max_length: int,
min_length: int,
regex: Optional[str],
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
conditional: str = 'required'
my_data: string(
allow_empty=allow_empty,
max_length=max_length,
min_length=min_length,
regex=regex,
)
_conditional_required = validator(
'my_data', allow_reuse=True, always=True, pre=True
)(conditional_required(rules=conditional_required_rules))
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
conditional: str = 'required'
my_data: Optional[
string(
allow_empty=allow_empty,
max_length=max_length,
min_length=min_length,
regex=regex,
)
]
_conditional_required = validator(
'my_data', allow_reuse=True, always=True, pre=True
)(conditional_required(rules=conditional_required_rules))
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type='String',
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
'input_value,expected,optional,fail_test',
[
#
# Pass Testing
#
# required, normal input
(['string'], ['string'], False, False),
# required, empty input
([], [], False, False),
# optional, empty input
([], [], True, False),
# optional, null input
(None, None, True, False),
#
# Fail Testing
#
# required, null input
(None, None, False, True),
],
)
def test_field_model_string_array_input(
self,
input_value: str,
expected: str,
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: List[String]
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Optional[List[String]]
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type='StringArray',
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
'input_value,expected,input_type,optional,fail_test',
[
#
# Pass Testing
#
# required, string input
('string', ['string'], 'String', False, False),
# required, array input
(['string'], ['string'], 'StringArray', False, False),
# required, empty string input
('', [], 'String', False, False),
# required, empty array input
([], [], 'StringArray', False, False),
# optional, empty string input
('', [], 'String', True, False),
# optional, empty array input
([], [], 'StringArray', True, False),
# optional, null input
(None, [], 'String', True, False),
#
# Fail Testing
#
# required, null input
(None, None, 'String', False, True),
],
)
def test_field_model_string_union_input(
self,
input_value: str,
expected: str,
input_type: str,
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Union[String, List[String]]
_always_array = validator('my_data', allow_reuse=True)(always_array())
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Optional[Union[String, List[String]]]
_always_array = validator('my_data', allow_reuse=True)(always_array())
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type=input_type,
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
('nested_reference,nested_value,value,expected_value'),
[
(
'#App:1234:my_ref!String',
'nested string',
'string with nested string: #App:1234:my_ref!String',
'string with nested string: nested string',
),
(
'#App:1234:my_ref!StringArray',
['nested string'],
'string with nested value: #App:1234:my_ref!StringArray',
'string with nested value: ["nested string"]',
),
(
'#App:1234:my_ref!Binary',
b'nested string',
'string with nested string: #App:1234:my_ref!Binary',
'string with nested string: <binary>',
),
(
'#App:1234:my_ref!BinaryArray',
[b'nested string'],
'string with nested string: #App:1234:my_ref!BinaryArray',
'string with nested string: <binary>',
),
(
'#App:1234:my_ref!KeyValue',
{'key': 'key', 'value': 'value', 'type': 'any'},
'string with nested string: #App:1234:my_ref!KeyValue',
'string with nested string: {"key": "key", "value": "value", "type": "any"}',
),
(
'#App:1234:my_ref!KeyValueArray',
[{'key': 'key', 'value': 'value', 'type': 'any'}],
'string with nested string: #App:1234:my_ref!KeyValueArray',
'string with nested string: [{"key": "key", "value": "value", "type": "any"}]',
),
(
'#App:1234:my_ref!TCEntity',
{'id': '1', 'value': '1.1.1.1', 'type': 'Address'},
'string with nested string: #App:1234:my_ref!TCEntity',
'string with nested string: {"id": "1", "value": "1.1.1.1", "type": "Address"}',
),
(
'#App:1234:my_ref!TCEntityArray',
[{'id': '1', 'value': '1.1.1.1', 'type': 'Address'}],
'string with nested string: #App:1234:my_ref!TCEntityArray',
'string with nested string: [{"id": "1", "value": "1.1.1.1", "type": "Address"}]',
),
(
'#App:1234:my_ref!String',
None,
'string with nested string: #App:1234:my_ref!String',
'string with nested string: <null>',
),
],
)
def test_field_type_string_with_nested_reference(
self,
nested_reference,
nested_value,
value,
expected_value,
playbook_app: 'MockApp',
):
"""Test String field type with nested reference.
Args:
nested_reference: nested variable reference found within string
nested_value: the value that nested_reference should resolve to
value: the String value exactly as passed in from the UI
expected_value: The String value as passed in from the UI after nested reference
is resolved
playbook_app (fixture): An instance of MockApp.
"""
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: String
config_data = {'my_data': '#App:1234:my_data!String'}
app = playbook_app(config_data=config_data)
tcex = app.tcex
self._stage_key_value('my_ref', nested_reference, nested_value, tcex)
self._stage_key_value(
'my_data',
'#App:1234:my_data!String',
value,
tcex,
)
tcex.inputs.add_model(PytestModel)
assert tcex.inputs.model.my_data == expected_value
| [
37811,
44154,
309,
66,
3109,
23412,
8265,
2214,
3858,
526,
15931,
198,
2,
3210,
5888,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
11,
360,
713,
11,
7343,
11,
32233,
11,
4479,
198,
198,
2,
2368,
12,
10608,
198,
11748,
12972,
9288,
... | 1.916925 | 7,740 |
import time
import base64
import hashlib
import functools
from flask import g, request, session, current_app
from flask import flash, url_for, redirect, abort
from flask.ext.babel import lazy_gettext as _
from .models import Asset_model
| [
11748,
640,
198,
11748,
2779,
2414,
198,
11748,
12234,
8019,
198,
11748,
1257,
310,
10141,
198,
6738,
42903,
1330,
308,
11,
2581,
11,
6246,
11,
1459,
62,
1324,
198,
6738,
42903,
1330,
7644,
11,
19016,
62,
1640,
11,
18941,
11,
15614,
1... | 3.661538 | 65 |
from __future__ import absolute_import, print_function
from datetime import datetime
import json
import urllib
import time
from pprint import pprint
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy import API
from itertools import ifilter
if __name__ == '__main__':
starttime = datetime.now()
api = getTwitterAPIHandle()
myid = api.me().id
#Get Default number of recent tweets
dm = api.direct_messages()
#print (dm)
filters= (filter_for_author,)
for twt in nFilter(filters, dm):
print (twt.text.encode('utf-8'))
print (twt.created_at) | [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
3601,
62,
8818,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
33918,
198,
11748,
2956,
297,
571,
198,
11748,
640,
198,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
198,
6738,
4... | 2.869955 | 223 |
from torch.nn.modules.module import Module
from ..functions.hough_voting import HoughVotingFunction
# from functions.hough_voting import HoughVotingFunction
| [
6738,
28034,
13,
20471,
13,
18170,
13,
21412,
1330,
19937,
198,
6738,
11485,
12543,
2733,
13,
71,
619,
62,
85,
10720,
1330,
367,
619,
53,
10720,
22203,
198,
2,
422,
5499,
13,
71,
619,
62,
85,
10720,
1330,
367,
619,
53,
10720,
22203,... | 3.568182 | 44 |
import rnn
import csv, random
from functions import *
#get the price and volume data from file
if __name__ == '__main__':
main()
| [
11748,
374,
20471,
198,
198,
11748,
269,
21370,
11,
4738,
198,
198,
6738,
5499,
1330,
1635,
198,
198,
2,
1136,
262,
2756,
290,
6115,
1366,
422,
2393,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
... | 2.978261 | 46 |
"""CelebA Dataset.
Notes:
- `http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html`
"""
# default packages
import logging
import pathlib
import shutil
import typing as t
import zipfile
# third party packages
import pandas as pd
import requests
import tqdm as tqdm_std
# my packages
import src.data.dataset as dataset
import src.data.utils as ut
# logger
_logger = logging.getLogger(__name__)
def _download(filepath: pathlib.Path, chunksize: int = 32768) -> None:
"""Download CelebA Dataset.
Args:
filepath (pathlib.Path): ダウンロードしたファイルを置くファイルパス.
chunksize (int, optional): ダウンロードのチャンクサイズ. Defaults to 32768.
Notes:
- reference:
`https://gist.github.com/charlesreid1/4f3d676b33b95fce83af08e4ec261822`
"""
URL = "https://docs.google.com/uc?export=download"
ID = "0B7EVK8r0v71pZjFTYXZWM3FlRnM"
with requests.Session() as session:
params: t.Dict[str, t.Any] = dict(id=ID)
response = session.get(URL, params=params, stream=True)
params["confirm"] = _get_confirm_token(response)
response = session.get(URL, params=params, stream=True)
_save_response_content(response, filepath, chunksize)
def _get_confirm_token(response: requests.Response) -> t.Optional[str]:
"""トークンを生成する.
Args:
response (requests.Response): 取得する先のレスポンス.
Returns:
t.Optional[str]: トークン.
"""
for key, value in response.cookies.items():
if key.startswith("download_warning"):
return value
return None
def _save_response_content(
response: requests.Response, filepath: pathlib.Path, chunksize: int = 32768,
) -> None:
"""レスポンス内容をファイルとして保存する.
Args:
response (requests.Response): レスポンス.
filepath (pathlib.Path): 保存先のファイルパス.
chunksize (int, optional): ダウンロードするチャンクサイズ. Defaults to 32768.
"""
with open(str(filepath), "wb") as f:
for chunk in tqdm_std.tqdm(response.iter_content(chunksize)):
if chunk:
f.write(chunk)
def main() -> None:
"""Celeba データセットをダウンロードし、学習及びテスト用のファイルリストを生成する."""
celeba = Celeba()
celeba.save()
if __name__ == "__main__":
try:
ut.init_root_logger()
main()
except Exception as e:
_logger.exception(e)
| [
37811,
42741,
65,
32,
16092,
292,
316,
13,
198,
198,
16130,
25,
198,
220,
220,
220,
532,
4600,
4023,
1378,
3020,
23912,
13,
494,
13,
66,
7456,
74,
13,
15532,
13,
71,
74,
14,
42068,
14,
42741,
65,
32,
13,
6494,
63,
198,
37811,
19... | 2.099908 | 1,091 |
# Aula 6 - Desafio 3: Somando dois numeros
num1 = int(input('1º numero: '))
num2 = int(input('2º numero: '))
soma = (num1 + num2)
print(f'A soma entre {num1} e {num2} eh igual a \033[7;33m{soma}\033[m')
| [
2,
317,
4712,
718,
532,
2935,
1878,
952,
513,
25,
9995,
25440,
466,
271,
5470,
418,
198,
198,
22510,
16,
796,
493,
7,
15414,
10786,
16,
36165,
997,
3529,
25,
705,
4008,
198,
22510,
17,
796,
493,
7,
15414,
10786,
17,
36165,
997,
35... | 2.080808 | 99 |
#!/usr/bin/env python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import torch as th
import torchvision
from tqdm import tqdm
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=1000)
parser.add_argument("--batch-size", type=int, default=20)
parser.add_argument("--fashion", action="store_true", default=False)
args = parser.parse_args()
main(args)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
1822,
29572,
198,
... | 2.98895 | 181 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test writing to stdout and stderr."""
import os
import sys
import pytest
import colorise
@pytest.mark.skip_on_windows
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
14402,
3597,
284,
14367,
448,
290,
336,
1082,
81,
526,
15931,
198,
198,
11748,
28686,
198,
11748,
25064,
19... | 2.597015 | 67 |
import datetime
import os
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from tqdm import trange
from common.evaluators.bert_evaluator import BertEvaluator
from datasets.bert_processors.abstract_processor import convert_examples_to_features
from datasets.bert_processors.abstract_processor import convert_examples_to_hierarchical_features
from utils.optimization import warmup_linear
from utils.preprocessing import pad_input_matrix
from utils.tokenization import BertTokenizer
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
from pathlib import Path
def plot_grad_flow(named_parameters):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if (p.requires_grad) and ("bias" not in n):
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.legend([Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
plt.savefig('grads.png')
| [
11748,
4818,
8079,
198,
11748,
28686,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
11,
14534,
16305,
20053,
11,
309,
22854,
27354,
292,
316,
198,
67... | 2.574007 | 831 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# noris network AG 2020
# Tim Zöllner
__date__ = '2020-06-22'
__version__ = '0.4.2'
#from docopt import docopt
import argparse
import sys
import ssl
import json
import requests
from requests.auth import HTTPBasicAuth
from datetime import datetime
# check elasticsearch module
try:
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError, \
TransportError, \
ConnectionTimeout, \
NotFoundError, \
RequestError
except ImportError as missing:
print (
'Error - could not import all required Python modules\n"%s"'
% missing + '\nDependency installation with pip:\n'
'"# pip install docopt elasticsearch"'
'or use your prefered package manage, i.e. APT or YUM.\n Example: yum install python-docopt python-elasticsearch')
sys.exit(2)
#ssl._create_default_https_context = ssl._create_unverified_context
if __name__ == '__main__':
args = parser_command_line()
if args.subparser_name == 'cluster':
API_CLUSTER_HEALTH = 'https://{}:9200/_cluster/health'.format(
args.client_node
)
if args.cluster_health:
result = getAPI(API_CLUSTER_HEALTH)
check_cluster_health(
result['status'],
args.perf_data,
args.only_graph,
)
if args.subparser_name == 'node':
API_NODES_STATS = 'https://{}:9200/_nodes/{}/stats'.format(
args.client_node,
args.node_name,
)
if args.heap_used_percent:
result = getAPI(API_NODES_STATS)
node = result["nodes"].values()[0]
check_heap_used_percent(
node['jvm']['mem']['heap_used_percent'],
args.perf_data,
args.only_graph,
)
if args.documents_count:
result = getAPI(API_NODES_STATS)
node = result["nodes"].values()[0]
check_documents_count(
node['indices']['docs']['count'],
args.perf_data,
args.only_graph,
)
if args.ratio_search_query_time:
result = getAPI(API_NODES_STATS)
node = result["nodes"].values()[0]
query_time_in_millis = float(
node['indices']['search']['query_time_in_millis']
)
query_total = float(
node['indices']['search']['query_total']
)
ratio = round(
query_time_in_millis/query_total,
2
)
check_ratio_search_query_time(
ratio,
args.perf_data,
args.only_graph,
)
if args.subparser_name == 'indices':
es = Elasticsearch(host=args.client_node)
if args.last_entry:
API_ALIASES = 'https://{}:9200/{}/_alias'
if args.index:
pattern = args.index
elif args.prefix:
pattern = args.prefix + "*"
else:
print("Invalid index name or prefix")
sys.exit(1)
index = get_indices(
API_ALIASES.format(
args.client_node,
pattern,
)
)[-1]
last_timestamp = get_last_timestamp(
index=index,
)
timedelta = (datetime.utcnow() - last_timestamp).seconds
check_last_entry(
timedelta,
args.perf_data,
args.only_graph,
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
4249,
271,
3127,
13077,
12131,
198,
2,
5045,
1168,
9101,
297,
1008,
198,
834,
4475,
834,
796,
705,
42334,
12,
3312... | 1.891808 | 1,941 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
import numpy as np
#--- for matplotlib
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
from matplotlib import cm
#import seaborn as sns
#-------------------------
NUM_MULTIPLE = 5
NUM_SECTION = 500
#-------------------------
#--- plot data
#--- plot data with Lorentzian function
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
11,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
6329,
329,
2603,
29487,
8019,
198,
11748,
260... | 2.963504 | 137 |
from .base import BaseField
| [
6738,
764,
8692,
1330,
7308,
15878,
628
] | 4.142857 | 7 |
from tests import ScraperTest
from recipe_scrapers.cookpad import CookPad
| [
6738,
5254,
1330,
1446,
38545,
14402,
198,
198,
6738,
8364,
62,
1416,
2416,
364,
13,
27916,
15636,
1330,
8261,
26114,
628
] | 3.619048 | 21 |
from setuptools import setup
setup(
name='qrgen',
version='0.0.1',
entry_points={
'console_scripts': [
'qrgen=qrgen:run'
]
}
) | [
6738,
900,
37623,
10141,
1330,
9058,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
80,
81,
5235,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
15,
13,
16,
3256,
198,
220,
220,
220,
5726,
62,
13033,
34758,
198,
220,
220,
220,
22... | 1.847826 | 92 |
from django.db import models
from django.urls import reverse
from django.template.defaultfilters import slugify
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
13,
28243,
13,
12286,
10379,
1010,
1330,
31065,
1958,
220,
628
] | 3.677419 | 31 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-14 22:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
16,
319,
1584,
12,
2931,
12,
1415,
2534,
25,
2548,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.73913 | 69 |