content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import numpy as np
from ukfm import SO3, SE3
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class PENDULUM:
"""Pendulum example, where the state lives on the 2-sphere.
See a text description of the spherical pendulum dynamics in
:cite:`sjobergAn2019`, Section 7, and :cite:`kotaruVariation2019`.
:arg T: sequence time (s).
:arg model_freq: model frequency (Hz).
"""
g = 9.81
"gravity constant (m/s^2) :math:`g`."
m = 1.0
"mass of payload (kg) :math:`m`."
b = 0.0
"damping :math:`b`."
L = 1.3
"wire length :math:`L`."
e3 = -np.array([0, 0, 1])
"third coordinate vector :math:`\mathbf{e}^b=-[0,0,1]^T`."
H = np.zeros((2, 3))
"observability matrix :math:`\mathbf{H}`."
H[:, 1:3] = np.eye(2)
class STATE:
"""State of the system.
It represents the orientation of the wire and its angular velocity.
.. math::
\\boldsymbol{\\chi} \in \\mathcal{M} = \\left\\{ \\begin{matrix}
\\mathbf{C} \in SO(3),
\\mathbf{u} \in \\mathbb R^3
\\end{matrix} \\right\\}
:ivar Rot: rotation matrix :math:`\mathbf{C}`.
:ivar u: angular velocity vector :math:`\mathbf{u}`.
"""
class INPUT:
"""Input of the propagation model.
The model does not require any input.
"""
@classmethod
def f(cls, state, omega, w, dt):
""" Propagation function.
.. math::
\\mathbf{C}_{n+1} &= \\mathbf{C}_{n} \\exp\\left(\\left(\\mathbf{u}
+ \\mathbf{w}^{(0:3)} \\right) dt\\right), \\\\
\\mathbf{u}_{n+1} &= \\mathbf{u}_{n} + \\dot{\\mathbf{u}} dt,
where
.. math::
\\dot{\\mathbf{u}} = \\begin{bmatrix}
-\\omega_y \\omega_x\\ \\\\ \\omega_x \\omega_z
\\\\ 0 \end{bmatrix} +
\\frac{g}{l} \\left(\\mathbf{e}^b \\right)^\\wedge
\\mathbf{C}^T \\mathbf{e}^b + \\mathbf{w}^{(3:6)}
:var state: state :math:`\\boldsymbol{\\chi}`.
:var omega: input :math:`\\boldsymbol{\\omega}`.
:var w: noise :math:`\\mathbf{w}`.
:var dt: integration step :math:`dt` (s).
"""
e3_i = state.Rot.T.dot(cls.e3)
u = state.u
d_u = np.array([-u[1]*u[2], u[0]*u[2], 0]) + \
cls.g/cls.L*np.cross(cls.e3, e3_i)
new_state = cls.STATE(
Rot=state.Rot.dot(SO3.exp((u+w[:3])*dt)),
u=state.u + (d_u+w[3:6])*dt
)
return new_state
@classmethod
def h(cls, state):
""" Observation function.
.. math::
h\\left(\\boldsymbol{\\chi}\\right) =
\\mathbf{H} \mathbf{x},
where
.. math::
\mathbf{H}&= \\begin{bmatrix} 0 & 1 & 0 \\\\ 0
& 0 & 1 \end{bmatrix} \\\\
\mathbf{x} &= L \\mathbf{C} \mathbf{e}^b
with :math:`\mathbf{x}` the position of the pendulum.
:var state: state :math:`\\boldsymbol{\\chi}`.
"""
x = cls.L*state.Rot.dot(cls.e3)
return cls.H.dot(x)
@classmethod
def phi(cls, state, xi):
"""Retraction.
.. math::
\\varphi\\left(\\boldsymbol{\\chi}, \\boldsymbol{\\xi}\\right) =
\\left( \\begin{matrix}
\\exp\\left(\\boldsymbol{\\xi}^{(0:3)}\\right) \\mathbf{C} \\\\
\\mathbf{u} + \\boldsymbol{\\xi}^{(3:6)}
\\end{matrix} \\right)
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SO(3)
\\times \\mathbb R^3`.
Its corresponding inverse operation is :meth:`~ukfm.PENDULUM.phi_inv`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var xi: state uncertainty :math:`\\boldsymbol{\\xi}`.
"""
new_state = cls.STATE(
Rot=state.Rot.dot(SO3.exp(xi[:3])),
u=state.u + xi[3:6],
)
return new_state
@classmethod
def phi_inv(cls, state, hat_state):
"""Inverse retraction.
.. math::
\\varphi^{-1}_{\\boldsymbol{\\hat{\\chi}}}\\left(\\boldsymbol{\\chi}
\\right) = \\left( \\begin{matrix}
\\log\\left(\\mathbf{\\hat{C}}^T \\mathbf{C} \\right)\\\\
\\mathbf{u} - \\mathbf{\\hat{u}}
\\end{matrix} \\right)
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SO(3)
\\times \\mathbb R^3`.
Its corresponding retraction is :meth:`~ukfm.PENDULUM.phi`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var hat_state: noise-free state :math:`\\boldsymbol{\hat{\\chi}}`.
"""
xi = np.hstack([SO3.log(hat_state.Rot.T.dot(state.Rot)),
state.u - hat_state.u])
return xi
@classmethod
@classmethod
| [
11748,
299,
32152,
355,
45941,
198,
6738,
334,
74,
38353,
1330,
12809,
18,
11,
7946,
18,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
285,
489,
62,
25981,
74,
896,
13,
76,
29487,
18,
67,
1330,
12176,
274,... | 1.900667 | 2,547 |
"""Run the Sample ACE problem from [Breiman85]_."""
import numpy.random
import scipy.special
from ace import ace
def build_sample_ace_problem_breiman85(N=200):
"""Sample problem from Breiman 1985."""
x_cubed = numpy.random.standard_normal(N)
x = scipy.special.cbrt(x_cubed)
noise = numpy.random.standard_normal(N)
y = numpy.exp((x ** 3.0) + noise)
return [x], y
def build_sample_ace_problem_breiman2(N=500):
"""Build sample problem y(x) = exp(sin(x))."""
x = numpy.linspace(0, 1, N)
# x = numpy.random.uniform(0, 1, size=N)
noise = numpy.random.standard_normal(N)
y = numpy.exp(numpy.sin(2 * numpy.pi * x)) + 0.0 * noise
return [x], y
def run_breiman85():
"""Run Breiman 85 sample."""
x, y = build_sample_ace_problem_breiman85(200)
ace_solver = ace.ACESolver()
ace_solver.specify_data_set(x, y)
ace_solver.solve()
try:
ace.plot_transforms(ace_solver, 'sample_ace_breiman85.png')
except ImportError:
pass
return ace_solver
def run_breiman2():
"""Run Breiman's other sample problem."""
x, y = build_sample_ace_problem_breiman2(500)
ace_solver = ace.ACESolver()
ace_solver.specify_data_set(x, y)
ace_solver.solve()
try:
plt = ace.plot_transforms(ace_solver, None)
except ImportError:
pass
plt.subplot(1, 2, 1)
phi = numpy.sin(2.0 * numpy.pi * x[0])
plt.plot(x[0], phi, label='analytic')
plt.legend()
plt.subplot(1, 2, 2)
y = numpy.exp(phi)
plt.plot(y, phi, label='analytic')
plt.legend(loc='lower right')
# plt.show()
plt.savefig('no_noise_linear_x.png')
return ace_solver
if __name__ == '__main__':
run_breiman2()
| [
37811,
10987,
262,
27565,
40488,
1917,
422,
685,
12679,
24086,
5332,
60,
62,
526,
15931,
198,
198,
11748,
299,
32152,
13,
25120,
198,
11748,
629,
541,
88,
13,
20887,
198,
198,
6738,
31506,
1330,
31506,
628,
198,
4299,
1382,
62,
39873,
... | 2.182166 | 785 |
"""
Modifications copyright (C) 2020 Michael Strobl
"""
import pprint
import configparser
pp = pprint.PrettyPrinter()
#endinit
if __name__=='__main__':
c = Config("configs/allnew_mentions_config.ini", verbose=True)
| [
37811,
198,
5841,
6637,
6634,
357,
34,
8,
12131,
3899,
30183,
2436,
198,
37811,
198,
198,
11748,
279,
4798,
198,
11748,
4566,
48610,
198,
198,
381,
796,
279,
4798,
13,
35700,
6836,
3849,
3419,
628,
220,
220,
220,
1303,
437,
15003,
198... | 2.8375 | 80 |
import pandas as pd
import dill as pickle
# sklearn
from sklearn.model_selection import train_test_split
import json
import os
import numpy as np
import matplotlib.pyplot as plt
import itertools
from collections import Counter
# sklearn
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import roc_auc_score
import scikitplot.metrics as skplt
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.utils.multiclass import unique_labels
# from this project
import utils.common as common
# Function to calculate missing values by column
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
288,
359,
355,
2298,
293,
198,
2,
1341,
35720,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
198,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
299,
321... | 3.532934 | 167 |
"""Utility functions for interacting with the console"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Used to determine python version
import sys
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def input(prompt_text):
"""
Prompt the user for input.
The input command will change depending on the version of python
installed. To maintain support for 2 and earlier, we must use
raw_input in that case. Else use input.
Parameters
----------
prompt_text : str
Prompt to display to the user.
"""
# Try to get the python version. This command is only available in
# python 2 and later, so it's important that we catch the exception
# if the command isn't found.
try:
majorversion = sys.version_info[0]
except AttributeError:
majorversion = 1
# Use the correct function to prompt the user for input depending on
# what python version the code is running in.
if majorversion >= 3:
return input(prompt_text)
else:
return raw_input(prompt_text).decode(sys.stdin.encoding)
def prompt_boolean(prompt, default=False):
"""
Prompt the user for a boolean response.
Parameters
----------
prompt : str
prompt to display to the user
default : bool, optional
response to return if none is given by the user
"""
response = input(prompt)
response = response.strip().lower()
#Catch 1, true, yes as True
if len(response) > 0 and (response == "1" or response[0] == "t" or response[0] == "y"):
return True
#Catch 0, false, no as False
elif len(response) > 0 and (response == "0" or response[0] == "f" or response[0] == "n"):
return False
else:
return default
def prompt_dictionary(choices, default_style=1, menu_comments={}):
"""
Prompt the user to chose one of many selections from a menu.
Parameters
----------
choices : dictionary
Keys - choice numbers (int)
Values - choice value (str), this is what the function will return
default_style : int, optional
Choice to select if the user doesn't respond
menu_comments : dictionary, optional
Additional comments to append to the menu as it is displayed
in the console.
Keys - choice numbers (int)
Values - comment (str), what will be appended to the
corresponding choice
"""
# Build the menu that will be displayed to the user with
# all of the options available.
prompt = ""
for key, value in choices.items():
prompt += "%d %s " % (key, value)
if key in menu_comments:
prompt += menu_comments[key]
prompt += "\n"
# Continue to ask the user for a style until an appropriate
# one is specified.
response = -1
while (not response in choices):
try:
text_response = input(prompt)
# Use default option if no input.
if len(text_response.strip()) == 0:
response = default_style
else:
response = int(text_response)
except ValueError:
print("Error: Value is not an available option. 0 selects the default.\n")
return choices[response]
| [
37811,
18274,
879,
5499,
329,
24986,
351,
262,
8624,
37811,
198,
2,
10097,
32501,
198,
2,
15069,
357,
66,
8,
2211,
11,
262,
6101,
7535,
7712,
4816,
13,
198,
2,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
40499,
347,
10305,
13789,
... | 2.947955 | 1,345 |
# coding=utf-8
from os import sys, path
from logging import getLogger
from items.view import app
sys.path.append(path.dirname(path.abspath(__file__)))
logger = getLogger(__name__)
logger.info(sys.path)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
6738,
28686,
1330,
25064,
11,
3108,
198,
6738,
18931,
1330,
651,
11187,
1362,
198,
198,
6738,
3709,
13,
1177,
1330,
598,
198,
198,
17597,
13,
6978,
13,
33295,
7,
6978,
13,
15908,
3672,
7,
6978,... | 2.469027 | 113 |
from lib import action
| [
6738,
9195,
1330,
2223,
628
] | 4.8 | 5 |
"""
********************************************************************************
* Name: gen_commands.py
* Author: Nathan Swain
* Created On: 2015
* Copyright: (c) Brigham Young University 2015
* License: BSD 2-Clause
********************************************************************************
"""
import os
import string
import random
from tethys_apps.utilities import get_tethys_home_dir, get_tethys_src_dir
from distro import linux_distribution
from django.conf import settings
from jinja2 import Template
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tethys_portal.settings")
GEN_SETTINGS_OPTION = 'settings'
GEN_APACHE_OPTION = 'apache'
GEN_ASGI_SERVICE_OPTION = 'asgi_service'
GEN_NGINX_OPTION = 'nginx'
GEN_NGINX_SERVICE_OPTION = 'nginx_service'
GEN_PORTAL_OPTION = 'portal'
GEN_SERVICES_OPTION = 'services'
GEN_INSTALL_OPTION = 'install'
GEN_SITE_YAML_OPTION = 'site_content'
FILE_NAMES = {
GEN_SETTINGS_OPTION: 'settings.py',
GEN_APACHE_OPTION: 'tethys-default.conf',
GEN_ASGI_SERVICE_OPTION: 'asgi_supervisord.conf',
GEN_NGINX_OPTION: 'tethys_nginx.conf',
GEN_NGINX_SERVICE_OPTION: 'nginx_supervisord.conf',
GEN_PORTAL_OPTION: 'portal.yml',
GEN_SERVICES_OPTION: 'services.yml',
GEN_INSTALL_OPTION: 'install.yml',
GEN_SITE_YAML_OPTION: 'site_content.yml',
}
VALID_GEN_OBJECTS = (
GEN_SETTINGS_OPTION,
# GEN_APACHE_OPTION,
GEN_ASGI_SERVICE_OPTION,
GEN_NGINX_OPTION,
GEN_NGINX_SERVICE_OPTION,
GEN_PORTAL_OPTION,
GEN_SERVICES_OPTION,
GEN_INSTALL_OPTION,
GEN_SITE_YAML_OPTION
)
TETHYS_SRC = get_tethys_src_dir()
gen_commands = {
GEN_SETTINGS_OPTION: gen_settings,
GEN_ASGI_SERVICE_OPTION: gen_asgi_service,
GEN_NGINX_OPTION: gen_nginx,
GEN_NGINX_SERVICE_OPTION: gen_nginx_service,
GEN_PORTAL_OPTION: gen_portal_yaml,
GEN_SERVICES_OPTION: gen_services_yaml,
GEN_INSTALL_OPTION: gen_install,
GEN_SITE_YAML_OPTION: gen_site_content_yaml,
}
def generate_command(args):
"""
Generate a settings file for a new installation.
"""
# Setup variables
context = gen_commands[args.type](args)
destination_path = get_destination_path(args)
render_template(args.type, context, destination_path)
| [
37811,
198,
17174,
17174,
8412,
198,
9,
6530,
25,
2429,
62,
9503,
1746,
13,
9078,
198,
9,
6434,
25,
18106,
2451,
391,
198,
9,
15622,
1550,
25,
1853,
198,
9,
15069,
25,
357,
66,
8,
37434,
6960,
2059,
1853,
198,
9,
13789,
25,
347,
... | 2.461287 | 917 |
if __name__ == "__main__":
grid = [[0 for x in range(9)] for y in range(9)]
grid = [[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0]]
if (solve_sudoku(grid)):
print_grid(grid)
else:
print
"No solution exists"
| [
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
201,
198,
201,
198,
220,
220,
220,
10706,
796,
16410,
15,
329,
2124,
287,
2837,
7,
24,
15437,... | 1.477612 | 402 |
# -*- coding: utf-8 -*-
#
# Hymn documentation build configuration file
import os
import sys
PROJECT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
sys.path.insert(0, PROJECT_DIR)
import hymn
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
source_suffix = '.rst'
master_doc = 'index'
project = u'Hymn'
copyright = u'2014-2018, Philip Xu'
author = u'Philip Xu'
version = '%d.%d' % hymn.__version__
release = hymn.VERSION
language = None
exclude_patterns = ['_build']
pygments_style = 'colorful'
todo_include_todos = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
html_theme = 'bizstyle'
htmlhelp_basename = 'Hymndoc'
latex_documents = [
(master_doc, 'Hymn.tex', u'Hymn Documentation',
u'Philip Xu', 'manual'),
]
man_pages = [
(master_doc, 'hymn', u'Hymn Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'Hymn', u'Hymn Documentation',
author, 'Hymn', hymn.__doc__,
'Miscellaneous'),
]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
367,
4948,
77,
10314,
1382,
8398,
2393,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
31190,
23680,
62,
34720,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7... | 2.302428 | 453 |
# coding=utf-8
from services.base import BaseService
from services.service_locator import ServiceLocator
from logger import error
__author__ = 'Glebov Boris'
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
6738,
2594,
13,
8692,
1330,
7308,
16177,
198,
6738,
2594,
13,
15271,
62,
17946,
1352,
1330,
4809,
33711,
1352,
198,
6738,
49706,
1330,
4049,
198,
198,
834,
9800,
834,
796,
705,
38,
293,
65,
709... | 3.5 | 46 |
import os
from typing import List
import random
import h5py
import numpy as np
from PIL import Image, ImageFile
import threading
# force pillow to load also truncated images
ImageFile.LOAD_TRUNCATED_IMAGES = True
# number of images to take from the folder
N_EL = int(5e5)
# path/to/folder that contains the images. No particular structure is required and nested folder are accepted.
RES_PATH = os.path.join('E:\\dataset\\images_only')
def square_img(im: Image.Image) -> Image:
"""
:param im:
:return:
"""
w, h = im.size
if w == h:
return im
crop_shift = random.randrange(abs(h-w)) # crops only in the dimension that is bigger!
if w > h:
# left-upper, right-lower
# box dimension must be that way
box = [0, 0, h, h]
# and it may be moved horizontally
box[0] += crop_shift
box[2] += crop_shift
else:
# moving box vertically
box = [0, 0, w, w]
box[1] += crop_shift
box[3] += crop_shift
im = im.crop(box)
return im
class ThreadedImageWriter(threading.Thread):
"""
Threaded version to prepare the dataset. Everything runs smoothly because we have multiple folders that avoid
race conditions
"""
def images_in_paths(folder_path: str) -> List[str]:
"""
Collects all images from one folder and return a list of paths
:param folder_path:
:return:
"""
paths = []
folder_path = os.path.join(os.getcwd(), folder_path)
for root, dirs, files in os.walk(folder_path):
for file in files:
paths.append(os.path.join(root, file))
return paths
def shuffle_dataset(lst: List, seed: int = None) -> None:
"""
Controlled shuffle.
:param lst:
:param seed: if specified the shuffle returns the same shuffled list every time it is invoked
:return:
"""
if seed is not None:
random.seed(seed)
random.shuffle(lst)
def generate_dataset(file_list: List, dataset_folder: str, img_size=256, train_dim: float = 0.70, val_dim: float = 0.25):
"""
Generate and save train, validation and test data. Test data is what is left from train and validation sets
:param file_list:
:param img_size:
:param train_dim:
:param val_dim:
:param hdf5_file_name:
:return:
"""
shuffle_dataset(file_list)
# make train, validation and test partitions
n = len(file_list)
train_i = [0, int(train_dim*n)]
val_i = [int(train_dim*n), int((train_dim+val_dim)*n)]
test_i = [int((train_dim+val_dim)*n), -1]
file_dict = {
'train': file_list[train_i[0]:train_i[1]],
'val': file_list[val_i[0]:val_i[1]],
'test': file_list[test_i[0]:]
}
# it is better to keep validation dataset bigger than test one
assert len(file_dict['train']) > len(file_dict['val']) > len(file_dict['test'])
os.makedirs(dataset_folder, exist_ok=True)
# create h5file to store information about train_mean and train_std that are useful for training later
h5_path = os.path.join(dataset_folder, 'info.h5')
with h5py.File(h5_path, mode='w') as hdf5_out:
hdf5_out.create_dataset('train_mean', (img_size, img_size, 3), np.float32)
hdf5_out.create_dataset('train_std', (img_size, img_size, 3), np.float32)
hdf5_out.create_dataset('train_dim', (), np.int32, data=int(n*train_dim))
hdf5_out.create_dataset('val_dim', (), np.int32, data=int(n*val_dim))
hdf5_out.create_dataset('test_dim', (), np.int32, data=int(n*(1-train_dim-val_dim)))
# make one thread for <set_type>
threaded_types = []
for set_type, img_list in file_dict.items():
threaded_types.append(ThreadedImageWriter(img_list, set_type, hdf5_out, img_size, dataset_folder))
for thread in threaded_types:
thread.start()
for thread in threaded_types:
# wait for the threads to finish the execution
thread.join()
for i, thread in enumerate(threaded_types):
if thread.read_errors:
with open('errors{}.txt'.format(i), 'w') as f:
f.writelines(thread.read_errors)
if thread.set_type == 'train':
# calculate the std using the variace array only for train set
training_std = np.sqrt(thread.M2 / (len(file_dict['train']) - 1))
hdf5_out['train_mean'][...] = thread.mean
hdf5_out['train_std'][...] = training_std
if __name__ == '__main__':
output_path = os.path.join(os.getcwd(), 'resources', 'images')
elements = N_EL
res_path = RES_PATH
images_list = images_in_paths(os.path.join(res_path))
random.shuffle(images_list)
images_list = images_list[0:elements]
generate_dataset(images_list, os.path.join(output_path, 'ILSVRC_' + str(elements)))
| [
11748,
28686,
198,
6738,
19720,
1330,
7343,
198,
11748,
4738,
198,
11748,
289,
20,
9078,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
8979,
198,
11748,
4704,
278,
198,
2,
2700,
28774,
284,
3440,
635,
... | 2.372252 | 2,047 |
import urllib.request
import os
import argparse
from bs4 import BeautifulSoup
parser = argparse.ArgumentParser()
parser.add_argument("url", type=str, nargs=1, help="Main url with list of recipe URLs")
parser.add_argument("cuisine", type=str, nargs=1, help="Type of cuisine on the main url page")
parser.add_argument("pageNum", type=int, nargs=1, help="Page number to pull from")
#parser.add_argument("fileStart", type=int, nargs=1, help="number to start filenames on")
args = parser.parse_args()
cuisine = str(args.cuisine[0]).lower()
page = str(args.pageNum[0])
main_url = str(args.url[0]) + "?sort=Newest&page=" + page
#local_filename, headers = urllib.request.urlretrieve(main_url)
try:local_filename, headers = urllib.request.urlretrieve(main_url)
except:
print("\n### Unable to open webpage " + main_url + " ### \n")
exit(-1)
url_file = open(local_filename)
html = url_file.read()
soup = BeautifulSoup(html, 'html.parser')
div = soup.find_all('article', class_='grid-col--fixed-tiles')
url_list = []
for item in div:
for a in item.find_all('a', href=True):
if "/recipe" in a['href']:
if a['href'] not in url_list:
url_list.append(a['href'])
url_file.close()
filenum = len(os.listdir("html/" + cuisine))
for url in url_list:
if filenum > 160:
break
urlname = "http://allrecipes.com" + url
html_filename = "html/" + cuisine +"/" + cuisine + str(filenum) + ".html"
html_file = open(html_filename, 'w')
print(urlname, filenum)
try:local_filename, headers = urllib.request.urlretrieve(urlname)
except:
print("UNABLE TO OPEN " + urlname)
exit(-1)
file_ = open(local_filename)
data = file_.read()
html_file.write(data)
html_file.close()
file_.close()
filenum += 1
print("Done")
| [
11748,
2956,
297,
571,
13,
25927,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
48610,
13,
2860,
62,
49140,
7203,
6371,
... | 2.442408 | 764 |
"""
Here are declare all the settings of the app.
1. Database configurations.
2. Develop config
3. Prod config
4. Also default config that is develop
"""
import os
# file' path
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
#main class configuration
# Develop configuration
# Production Configuration
# dictionary for selecting the confinguration desired
config = {
"dev": DevMode,
"prod": ProdMode,
"default": DevMode
} | [
37811,
198,
220,
220,
220,
3423,
389,
13627,
477,
262,
6460,
286,
262,
598,
13,
198,
220,
220,
220,
220,
220,
220,
220,
352,
13,
24047,
25412,
13,
198,
220,
220,
220,
220,
220,
220,
220,
362,
13,
6013,
4566,
198,
220,
220,
220,
... | 2.852071 | 169 |
import pytest
from django.urls import reverse
from freezegun import freeze_time
from rest_framework import status
from datahub.company_referral.test.factories import (
ClosedCompanyReferralFactory,
CompanyReferralFactory,
)
from datahub.core.test_utils import format_date_or_datetime, get_attr_or_none
from datahub.dataset.core.test import BaseDatasetViewTest
def get_expected_data_from_company_referral(referral):
"""Returns company referral data as a dictionary"""
return {
'company_id': str(referral.company_id),
'completed_by_id': get_attr_or_none(referral, 'completed_by_id'),
'completed_on': format_date_or_datetime(referral.completed_on),
'contact_id': str(referral.contact_id),
'created_by_id': str(referral.created_by_id),
'created_on': format_date_or_datetime(referral.created_on),
'id': str(referral.id),
'interaction_id': (
str(referral.interaction_id)
if referral.interaction_id is not None
else None
),
'notes': referral.notes,
'recipient_id': str(referral.recipient_id),
'status': str(referral.status),
'subject': referral.subject,
}
@pytest.mark.django_db
class TestCompanyReferralDatasetView(BaseDatasetViewTest):
"""
Tests for CompanyReferralDatasetView
"""
view_url = reverse('api-v4:dataset:company-referrals-dataset')
factory = CompanyReferralFactory
@pytest.mark.parametrize(
'referral_factory', (
CompanyReferralFactory,
ClosedCompanyReferralFactory,
),
)
def test_success(self, data_flow_api_client, referral_factory):
"""Test that endpoint returns with expected data for a single referral"""
referral = referral_factory()
response = data_flow_api_client.get(self.view_url)
assert response.status_code == status.HTTP_200_OK
response_results = response.json()['results']
assert len(response_results) == 1
result = response_results[0]
expected_result = get_expected_data_from_company_referral(referral)
assert result == expected_result
def test_with_multiple_records(self, data_flow_api_client):
"""Test that endpoint returns correct number of records"""
with freeze_time('2019-01-01 12:30:00'):
referral1 = CompanyReferralFactory()
with freeze_time('2019-01-03 12:00:00'):
referral2 = CompanyReferralFactory()
with freeze_time('2019-01-01 12:00:00'):
referral3 = CompanyReferralFactory()
referral4 = CompanyReferralFactory()
response = data_flow_api_client.get(self.view_url)
assert response.status_code == status.HTTP_200_OK
response_results = response.json()['results']
assert len(response_results) == 4
expected_list = sorted([referral3, referral4], key=lambda x: x.pk) + [referral1, referral2]
for index, referral in enumerate(expected_list):
assert str(referral.id) == response_results[index]['id']
| [
11748,
12972,
9288,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
1479,
89,
1533,
403,
1330,
16611,
62,
2435,
198,
6738,
1334,
62,
30604,
1330,
3722,
198,
198,
6738,
4818,
993,
549,
13,
39722,
62,
260,
2232,
1373,
13,
... | 2.444885 | 1,261 |
"""
The AwsIamTester class implements all necessary logic to run validations on an account, role or user.
"""
# pylint: disable=broad-except,C0103,E0401,R0912,R0913,R0914,R0915,R1702,W0603,W1203
from __future__ import annotations
import os
import sys
import errno
import json
import logging
import re
import time
import yaml
import click
import boto3 # type: ignore
import botocore # type: ignore
from tabulate import tabulate
from typing import Any, Dict, List, Optional, Tuple, Union #, Literal # Literal is p3.8 and higher
from termcolor import colored
| [
37811,
198,
464,
5851,
82,
40,
321,
51,
7834,
1398,
23986,
477,
3306,
9156,
284,
1057,
4938,
602,
319,
281,
1848,
11,
2597,
393,
2836,
13,
198,
37811,
198,
198,
2,
279,
2645,
600,
25,
15560,
28,
36654,
12,
16341,
11,
34,
486,
3070... | 3.139665 | 179 |
#!/usr/bin/python -u
import datetime
import calendar
if __name__ == "__main__":
print datetime.datetime.today().weekday() # 3
print calendar.day_name[datetime.datetime.today().weekday()]
# Thursday
| [
2,
48443,
14629,
14,
8800,
14,
29412,
532,
84,
198,
11748,
4818,
8079,
198,
11748,
11845,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3601,
4818,
8079,
13,
19608,
8079,
13,
40838,
22446,
1046... | 2.776316 | 76 |
from rich.console import Console
import subprocess as sp
import time
import click
@click.command()
@click.option('--path','-p',help='Path of file to watch')
@click.option('--arguments','-args',help='Arguments to run when file changes')
@click.option('--delay','-d',default=4,help='Delay in seconds')
def start(path,arguments,delay):
'''FILEWATCH is a file watcher that allows you to watch files if something changes run arguments'''
App(filepath=str(path),arguments=arguments,delay=delay)
if __name__ == '__main__':
try:
start()
except FileNotFoundError:
print("Use --help to see help information") | [
6738,
5527,
13,
41947,
1330,
24371,
201,
198,
11748,
850,
14681,
355,
599,
201,
198,
11748,
640,
201,
198,
11748,
3904,
201,
198,
201,
198,
201,
198,
31,
12976,
13,
21812,
3419,
201,
198,
31,
12976,
13,
18076,
10786,
438,
6978,
3256,
... | 2.864035 | 228 |
import json
import os
from lib.object_documentor import (
documentize_object,
documentize_prop,
documentize_array,
)
file_name = input("Specify json to document: ")
file = open(file_name)
data = json.load(file)
# Iterating through the json
lines = [" Prop | Type | Description | Example \n", "----|----|----|----\n"]
for i in data:
parents = (i,)
if isinstance(data[i], dict):
lines = lines + documentize_object(i, data[i], parents)
elif isinstance(data[i], list):
lines = lines + documentize_array(i, data[i], parents)
else:
lines = lines + documentize_prop(i, data[i])
file.close()
# Write output MD to file
directory = "./output-md"
if not os.path.exists(directory):
os.makedirs(directory)
file1 = open(directory + "/" + file_name + ".md", "w+")
file1.writelines(lines)
file1.close()
| [
11748,
33918,
198,
11748,
28686,
198,
6738,
9195,
13,
15252,
62,
22897,
273,
1330,
357,
198,
220,
220,
220,
3188,
1096,
62,
15252,
11,
198,
220,
220,
220,
3188,
1096,
62,
22930,
11,
198,
220,
220,
220,
3188,
1096,
62,
18747,
11,
198... | 2.64486 | 321 |
# Blackheart Day Damage Skin
success = sm.addDamageSkin(2435313)
if success:
sm.chat("The Blackheart Day Damage Skin has been added to your account's damage skin collection.")
# sm.consumeItem(2435313)
| [
2,
2619,
11499,
3596,
8995,
17847,
198,
13138,
796,
895,
13,
2860,
22022,
42455,
7,
1731,
2327,
25838,
8,
198,
361,
1943,
25,
198,
220,
220,
220,
895,
13,
17006,
7203,
464,
2619,
11499,
3596,
8995,
17847,
468,
587,
2087,
284,
534,
1... | 3.28125 | 64 |
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
class ScheduledDropout(Layer):
"""Applies Scheduled Dropout to the input.
The Dropout layer randomly sets input units to 0 with a frequency of `rate`
scheduled by network layer's depth and training step at each step, which
helps prevent overfitting.
Inputs not set to 0 are scaled up by 1/(1 - rate) such that the sum over
all inputs is unchanged.
Note that the Dropout layer only applies when `training` is set to True
such that no values are dropped during inference. When using `model.fit`,
`training` will be appropriately set to True automatically, and in other
contexts, you can set the kwarg explicitly to True when calling the layer.
(This is in contrast to setting `trainable=False` for a Dropout layer.
`trainable` does not affect the layer's behavior, as Dropout does
not have any variables/weights that can be frozen during training.)
Arguments:
drop_rate: Float between 0 and 1. Fraction of the input units to drop.
cell_num: Cell number in the network
total_num_cells: Number of cells in the network
total_training_steps: Number of total steps performed during training
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
class ScheduledDroppath(Layer):
"""Applies Scheduled Droppath to the input.
The Droppath layer randomly sets whole input path inside to 0 with a
frequency of `rate` scheduled by network layer's depth and training
step at each step, which helps prevent overfitting.
Inputs not set to 0 are scaled up by 1/(1 - rate) such that the sum over
all inputs is unchanged.
Note that the Scheduled Droppath layer only applies when `training` is set to True.
When using `model.fit`, `training` will be appropriately set to True
automatically, and in other contexts, you can set the kwarg explicitly
to True when calling the layer. (This is in contrast to setting
`trainable=False` for a Droppath layer. `trainable` does not affect the
layer's behavior, as Droppath does not have any variables/weights that
can be frozen during training.)
Arguments:
drop_rate: Float between 0 and 1. Fraction of the inputs to drop.
cell_num: Cell number in the network
total_num_cells: Number of cells in the network
total_training_steps: Number of total steps performed during training
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
class ConcreteDropout(Layer):
"""Applies Concrete Dropout to the input.
The Concrete Droppath layer randomly sets input path to 0 with a
frequency considered as a weight of the layer optimized during training
time, which helps prevent overfitting.
Inputs not set to 0 are scaled up by 1/(1 - rate) such that the sum over
all inputs is unchanged.
Note that the Concrete Dropout layer only applies when `training` is set
to True. When using `model.fit`, `training` will be appropriately set to
True automatically, and in other contexts, you can set the kwarg explicitly
to True when calling the layer. (This is in contrast to setting
`trainable=False` for a Concrete Dropout layer. `trainable` does not affect
the layer's behavior, as Dropout does not have any variables/weights that
can be frozen during training.)
Arguments:
dropout_regularizer: A positive number which satisfies
$dropout_regularizer = 2 / (\tau * N)$ with model precision
$\tau$ (inverse observation noise) and N the number of
instances in the dataset.
init_min: dropout probability initializer min
init_max: dropout probability initializer max
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
@tf.function
def concrete_dropout(self, x):
'''
Concrete dropout - used at training time and testing time (gradients can be propagated)
:param x: input
:return: approx. dropped out input
'''
eps = K.cast_to_floatx(K.epsilon())
temp = 0.1
unif_noise = K.random_uniform(K.shape(x))
drop_prob = (
K.log(self.get_p() + eps)
- K.log(1. - self.get_p() + eps)
+ K.log(unif_noise + eps)
- K.log(1. - unif_noise + eps)
)
drop_prob = K.sigmoid(drop_prob / temp)
random_tensor = 1. - drop_prob
retain_prob = 1. - self.get_p()
x *= random_tensor
x /= retain_prob
return x
class ConcreteDroppath(Layer):
"""Applies Concrete Droppath to the input.
The Concrete Droppath layer randomly sets input path to 0 with a
frequency considered as a weight of the layer optimized during training
time, which helps prevent overfitting.
Inputs not set to 0 are scaled up by 1/(1 - rate) such that the sum over
all inputs is unchanged.
Note that the Concrete Droppath layer only applies when `training` is set
to True. When using `model.fit`, `training` will be appropriately set to
True automatically, and in other contexts, you can set the kwarg explicitly
to True when calling the layer. (This is in contrast to setting
`trainable=False` for a Concrete Droppath layer. `trainable` does not affect
the layer's behavior, as Dropout does not have any variables/weights that
can be frozen during training.)
Arguments:
dropout_regularizer: A positive number which satisfies
$dropout_regularizer = 2 / (\tau * N)$ with model precision
$\tau$ (inverse observation noise) and N the number of
instances in the dataset.
init_min: dropout probability initializer min
init_max: dropout probability initializer max
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
@tf.function
def concrete_droppath(self, x):
"""
Concrete droppath - used at training and testing time (gradients can be propagated)
:param x: input
:return: approx. dropped out input
"""
eps = K.cast_to_floatx(K.epsilon())
temp = 0.1
unif_noise = tf.random.uniform(shape=[K.shape(x)[0], 1, 1, 1])
drop_prob = (
K.log(self.get_p() + eps)
- K.log(1. - self.get_p() + eps)
+ K.log(unif_noise + eps)
- K.log(1. - unif_noise + eps)
)
drop_prob = K.sigmoid(drop_prob / temp)
random_tensor = 1. - drop_prob
retain_prob = 1. - self.get_p()
x *= random_tensor
x /= retain_prob
return x
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
1330,
30203,
355,
509,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
75,
6962,
1330,
34398,
628,
198,
4871,
... | 2.604788 | 3,049 |
import re
string = ""
while True:
command = input()
if command == "":
break
else:
string += " "+command
search_patter = r"(www\.([A-Za-z0-9]+((-[A-Za-z0-9]+))*)\.([a-z]+((\.[a-z]+))*))"
for i in re.findall(search_patter, string):
print(i[0]) | [
11748,
302,
198,
198,
8841,
796,
13538,
198,
4514,
6407,
25,
198,
220,
220,
220,
3141,
796,
5128,
3419,
198,
220,
220,
220,
611,
3141,
6624,
366,
1298,
198,
220,
220,
220,
220,
220,
220,
220,
2270,
198,
220,
220,
220,
2073,
25,
19... | 2.014706 | 136 |
"""
Do not modify this file. It is generated from the Swagger specification.
Container module for JSONSchema definitions.
This does not include inlined definitions.
The pretty-printing functionality provided by the json module is superior to
what is provided by pformat, hence the use of json.loads().
"""
import json
# When no schema is provided in the definition, we use an empty schema
__UNSPECIFIED__ = {}
{% for name, definition in schemas|dictsort(true) %}
{{ name }} = json.loads("""
{{ definition }}
""",strict=False)
{% endfor %}
| [
37811,
198,
5211,
407,
13096,
428,
2393,
13,
632,
318,
7560,
422,
262,
2451,
7928,
20855,
13,
198,
198,
29869,
8265,
329,
19449,
27054,
2611,
17336,
13,
198,
1212,
857,
407,
2291,
287,
10837,
17336,
13,
198,
198,
464,
2495,
12,
4798,
... | 3.675676 | 148 |
# VMware vCloud Director Python SDK
# Copyright (c) 2014-2019 VMware, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from pyvcloud.vcd.vapp_firewall import VappFirewall
from vcd_cli.utils import restore_session
from vcd_cli.utils import stderr
from vcd_cli.utils import stdout
from vcd_cli.vapp_network import services
@services.group('firewall',
short_help='manage firewall service of vapp network')
@click.pass_context
def firewall(ctx):
"""Manages firewall service of vapp network.
\b
Examples
vcd vapp network services firewall enable-firewall vapp_name
network_name --enable
Enable firewall service.
\b
vcd vapp network services firewall set-default-action vapp_name
network_name --action allow --log-action False
Set deault action in firewall service.
\b
vcd vapp network services firewall list vapp_name network_name
List firewall rules in firewall service.
\b
vcd vapp network services firewall add vapp_name network_name rule_name
--enable --policy drop --protocols Tcp,Udp --source-ip Any
--source-port-range Any --destination-port-range Any
--destination-ip Any --enable-logging
Add firewall rule in firewall service.
\b
vcd vapp network services firewall update vapp_name network_name
rule_name --name rule_new_name --enable --policy
drop --protocols Tcp,Udp --source-ip Any
--source-port-range Any --destination-port-range Any
--destination-ip Any --enable-logging
Update firewall rule in firewall service.
\b
vcd vapp network services firewall delete vapp_name network_name
--name firewall_rule_name
Delete firewall rule in firewall service.
"""
def get_vapp_network_firewall(ctx, vapp_name, network_name):
"""Get the VappFirewall object.
It will restore sessions if expired. It will reads the client and
creates the VappFirewall object.
"""
restore_session(ctx, vdc_required=True)
client = ctx.obj['client']
vapp_dhcp = VappFirewall(client, vapp_name, network_name)
return vapp_dhcp
@firewall.command('enable-firewall', short_help='Enable firewall service')
@click.pass_context
@click.argument('vapp_name', metavar='<vapp-name>', required=True)
@click.argument('network_name', metavar='<network-name>', required=True)
@click.option('--enable/--disable',
'is_enabled',
default=True,
metavar='<is_enable>',
help='enable firewall service')
@firewall.command('set-default-action',
short_help='set default action of firewall service')
@click.pass_context
@click.argument('vapp_name', metavar='<vapp-name>', required=True)
@click.argument('network_name', metavar='<network-name>', required=True)
@click.option('--action',
'action',
default='drop',
metavar='<action>',
help='deafult action on firewall service')
@click.option('--enable-log-action/--disable-log-action',
'log_action',
default=True,
metavar='<log_action>',
help='default action on firewall service log')
@firewall.command('add', short_help='add firewall rule to firewall service')
@click.pass_context
@click.argument('vapp_name', metavar='<vapp-name>', required=True)
@click.argument('network_name', metavar='<network-name>', required=True)
@click.argument('firewall_rule_name',
metavar='<firewall-rule-name>',
required=True)
@click.option('--enable/--disable',
'is_enable',
default=True,
metavar='<is_enable>',
help='enable firewall rule')
@click.option('--policy',
'policy',
default='drop',
metavar='<policy>',
help='policy on firewall rule')
@click.option('--protocols',
'protocols',
default=None,
metavar='<protocols>',
help='all protocol names in comma separated format')
@click.option('--source-port-range',
'source_port_range',
default='Any',
metavar='<source_port_range>',
help='source port range on firewall rule')
@click.option('--source-ip',
'source_ip',
default='Any',
metavar='<source_ip>',
help='source ip on firewall rule')
@click.option('--destination-port-range',
'destination_port_range',
default='Any',
metavar='<destination_port_range>',
help='destination port range on firewall rule')
@click.option('--destination-ip',
'destination_ip',
default='Any',
metavar='<destination_ip>',
help='destination ip on firewall rule')
@click.option('--enable-logging/--disable-logging',
'is_logging',
default=True,
metavar='<is_logging>',
help='enable logging on firewall rule')
@firewall.command('list', short_help='list firewall rules in firewall service')
@click.pass_context
@click.argument('vapp_name', metavar='<vapp-name>', required=True)
@click.argument('network_name', metavar='<network-name>', required=True)
@firewall.command('update',
short_help='update firewall rule of firewall service')
@click.pass_context
@click.argument('vapp_name', metavar='<vapp-name>', required=True)
@click.argument('network_name', metavar='<network-name>', required=True)
@click.argument('firewall_rule_name',
metavar='<firewall-rule-name>',
required=True)
@click.option('--name',
'new_name',
default=None,
metavar='<new_name>',
help='new name of firewall rule')
@click.option('--enable/--disable',
'is_enable',
default=None,
metavar='<is_enable>',
help='enable firewall rule')
@click.option('--policy',
'policy',
default=None,
metavar='<policy>',
help='policy on firewall rule')
@click.option('--protocols',
'protocols',
default=None,
metavar='<protocols>',
help='all protocol names in comma separated format')
@click.option('--source-port-range',
'source_port_range',
default=None,
metavar='<source_port_range>',
help='source port range on firewall rule')
@click.option('--source-ip',
'source_ip',
default=None,
metavar='<source_ip>',
help='source ip on firewall rule')
@click.option('--destination-port-range',
'destination_port_range',
default=None,
metavar='<destination_port_range>',
help='destination port range on firewall rule')
@click.option('--destination-ip',
'destination_ip',
default=None,
metavar='<destination_ip>',
help='destination ip on firewall rule')
@click.option('--enable-logging/--disable-logging',
'is_logging',
default=None,
metavar='<is_logging>',
help='enable logging on firewall rule')
@firewall.command('delete',
short_help='delete firewall rule in firewall service')
@click.pass_context
@click.argument('vapp_name', metavar='<vapp-name>', required=True)
@click.argument('network_name', metavar='<network-name>', required=True)
@click.argument('firewall_rule_name',
metavar='<firewall-rule-name>',
required=True)
| [
2,
37754,
410,
18839,
5890,
11361,
26144,
198,
2,
15069,
357,
66,
8,
1946,
12,
23344,
37754,
11,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
... | 2.287694 | 3,681 |
import graphene
from graphene import relay
from api.schema.benefit import BenefitQuery
from api.schema.branch import BranchQuery
from api.schema.cultural_fit import CulturalFitQuery
from api.schema.dashboard import DashboardQuery
from api.schema.faq_category import FAQCategoryQuery
from api.schema.company import CompanyProfileMutation, CompanyQuery, UniversityProfileMutation
from api.schema.attachment import AttachmentMutation, AttachmentQuery
from api.schema.employee import EmployeeMutation
from api.schema.job_requirement import JobRequirementQuery
from api.schema.job_type import JobTypeQuery
from api.schema.job_posting import JobPostingMutation, JobPostingQuery
from api.schema.keyword.schema import KeywordQuery
from api.schema.language import LanguageQuery
from api.schema.auth import AuthMutation, LogoutMutation, VerifyPasswordResetToken
from api.schema.language_level import LanguageLevelQuery
from api.schema.match import MatchQuery, MatchMutation
from api.schema.project_posting.schema import ProjectPostingQuery, ProjectPostingMutation
from api.schema.project_type.schema import ProjectTypeQuery
from api.schema.skill import SkillQuery
from api.schema.soft_skill import SoftSkillQuery
from api.schema.student import StudentProfileMutation, StudentQuery
from api.schema.registration import RegistrationMutation
from api.schema.topic.schema import TopicQuery
from api.schema.upload import UploadMutation
from api.schema.upload.schema import UploadConfigurationQuery
from api.schema.user import UserQuery
from api.schema.user_request import UserRequestMutation
from api.schema.zip_city import ZipCityQuery
schema = graphene.Schema(query=Query, mutation=Mutation)
| [
11748,
42463,
198,
198,
6738,
42463,
1330,
24248,
198,
6738,
40391,
13,
15952,
2611,
13,
48649,
1330,
38065,
20746,
198,
6738,
40391,
13,
15952,
2611,
13,
1671,
3702,
1330,
20551,
20746,
198,
6738,
40391,
13,
15952,
2611,
13,
30844,
62,
... | 3.707048 | 454 |
import pandas as pd
import h5py
from sentence_transformers import SentenceTransformer, util
import re
import pickle
class Sample():
"""Samples a relevant paper given an input, using corpus_embeddings
"""
def sample(self, paper_id, abstract, title):
"""Given paper_text ( = paper_abstract+paper_title), samples out the most relevant paper
Args:
paper_id (str): the arxiv id of the paper which is treated as the starting point
abstract (str): abstract of paper
title (str) : title of paper
Returns:
[type]: [description]
"""
paper_text = abstract + ' ' + title
paper_text = self.clean_text(paper_text)
# get the vector for query paper
query_embedding = self.model.encode(paper_text, convert_to_tensor=True)
# retrieve top similar papers
search_hits = util.semantic_search(query_embedding, self.corpus_embeddings)[0]
# do softmax normalization and sampling using random strategy
next_paper_id = self.corpus_ids[search_hits[0]['corpus_id']]
if next_paper_id == paper_id:
next_paper_id = self.corpus_ids[search_hits[1]['corpus_id']]
return str(next_paper_id)
if __name__=='__main__':
paper_id = '0704.0001'
title = "Calculation of prompt diphoton production cross sections at Tevatron and LHC energies"
abstract = '''A fully differential calculation in perturbative quantum chromodynamics is presented for
the production of massive photon pairs at hadron colliders. All next-to-leading order perturbative
contributions from quark-antiquark, gluon-(anti)quark, and gluon-gluon subprocesses are included,
as well as all-orders resummation of initial-state gluon radiation valid at next-to-next-to-leading
logarithmic accuracy. The region of phase space is specified in which the calculation is most reliable.
Good agreement is demonstrated with data from the Fermilab Tevatron, and predictions are made for more
detailed tests with CDF and DO data. Predictions are shown for distributions of diphoton pairs produced
at the energy of the Large Hadron Collider (LHC). Distributions of the diphoton pairs from the decay of
a Higgs boson are contrasted with those produced from QCD processes at the LHC, showing that enhanced
sensitivity to the signal can be obtained with judicious selection of events.'''
sample = Sample()
result = sample.sample(paper_id, abstract, title)
print(result) | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
289,
20,
9078,
198,
6738,
6827,
62,
35636,
364,
1330,
11352,
594,
8291,
16354,
11,
7736,
198,
11748,
302,
198,
11748,
2298,
293,
628,
198,
4871,
27565,
33529,
198,
220,
220,
220,
37227,
50,... | 2.646712 | 1,019 |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.registry import registry
from mmf.datasets.builders.visual_genome.builder import VisualGenomeBuilder
from mmf.datasets.builders.visual_genome.masked_dataset import MaskedVisualGenomeDataset
@registry.register_builder("masked_visual_genome")
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
198,
6738,
8085,
69,
13,
11321,
13,
2301,
4592,
1330,
20478,
198,
6738,
8085,
69,
13,
19608,
292,
1039,
13,
50034,
13,
41464,
62,
5235,
462,
13,
38272,
1330,
1561... | 3.206186 | 97 |
from unittest import TestCase
from core.download import DownloadHelperMulti, DownLoadUrl, DownLoadUrlAdvance
# def test
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
4755,
13,
15002,
1330,
10472,
47429,
29800,
11,
5588,
8912,
28165,
11,
5588,
8912,
28165,
2782,
19259,
628,
198,
220,
220,
220,
1303,
825,
1332,
628,
198
] | 3.486486 | 37 |
#!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2017 allancth
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
fn = lambda a: a + 1
arg = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ]
r = map(fn, arg)
for e in r:
print("{0}".format(e))
r = map(fn_map, arg)
for e in r:
print(">> {0}".format(e))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
198,
2,
15069,
357,
66,
8,
2177,
477,
272,
310,
71,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
... | 3.241463 | 410 |
from pathlib import Path
EXP_NAME = 'Transformer32'
EPOCH = 20
EMBEDDING_DIM = 64
ENCODER_STACK = 6
ATTENTION_HEAD = 1
DROPOUT = 0.1
LR = 0.0001
BATCH_SIZE = 32
AUGMENTATION = None
MAX_FEATURE = 32
SMOTE_SEED = 23904
PYTORCH_SEED = 321295675063
PYTHON_SEED = 123146427
ML_SEED = 32129
MODEL_DIR = Path.cwd() / "models" / EXP_NAME
if not MODEL_DIR.exists():
MODEL_DIR.mkdir(parents=True)
FEATURES = ['Baseline Features', 'Intensity Parameters', 'Formant Frequencies', 'Bandwidth Parameters',
'Vocal Fold', 'MFCC', 'Wavelet Features', 'TQWT Features']
FEATURE_GROUPS = ['Basic Info', 'Baseline Features', 'Intensity Parameters', 'Formant Frequencies',
'Bandwidth Parameters', 'Vocal Fold', 'MFCC', 'Wavelet Features', 'TQWT Features']
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
49864,
62,
20608,
796,
705,
8291,
16354,
2624,
6,
198,
8905,
46,
3398,
796,
1160,
198,
3620,
33,
1961,
35,
2751,
62,
35,
3955,
796,
5598,
198,
24181,
3727,
1137,
62,
2257,
8120,
796,
718,
198... | 2.473016 | 315 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 11:55:42 2020
@author: esteban
"""
# Este script necesita que instales
# conda install geopandas
#conda install -c conda-forge descartes
fechaAAnalizar='2020-05-04'
alFecha=" al 04/05"
cuarentena_total=['Arica',
'Estación Central',
'Independencia',
'El Bosque',
'Quinta Normal',
'Pedro Aguirre Cerda',
'Angol','Victoria',
'Punta Arenas']
cuarentena_parcial=['San Ramón',
'La Pintana',
'Ñuñoa',
'Santiago',
'Puente Alto',
'San Bernardo']
import geopandas as gp
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sys
import unicodedata
def strip_accents(text):
try:
text = unicode(text, 'utf-8')
except NameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)\
.encode('ascii', 'ignore')\
.decode("utf-8")
return str(text)
s = strip_accents('àéêöhello')
#print(s)
#reload(sys)
#sys.setdefaultencoding('utf8')
## Primero necesitamos cargar los polígonos de las comunas.
# poligonos descargados desde https://www.bcn.cl/siit/mapas_vectoriales/index_html
shp_path = "../../fuentes/geometrias_comunas/comunas.shp"
comunasChile = gp.read_file(shp_path)
#aprovechamos al toque de calcular la superficie de cada comuna en km2
comunasChile['superficie']=comunasChile.to_crs({'init': 'epsg:3035'}).area/10**6
## Luego cargamos los datos del COVID19
datos_path="../../Consolidado_COVID19_Chile_Comunas.CSV"
#datos_path="../../COVID19_Chile_Comunas-casos_totales.CSV"
datosComunas = pd.read_csv(datos_path)
df=datosComunas
#################################### Aumento porcentual
############ Idea 1
fechas=df.fecha.unique()
i=1
df_old=df
while i<len(fechas):
old=df[df.fecha==fechas[i-1]][['id_comuna','casos_totales']]
old=old.rename(columns={'casos_totales':'casos_totales_old'})
# Si mantenemos la fecha del new, donde vamos a calcular los casos nuevos
new=df[df.fecha==fechas[i]][['fecha','id_comuna','casos_totales']]
new=new.rename(columns={'casos_totales':'casos_totales_new'})
old_new=pd.merge(old,new,on=['id_comuna'])
old_new['var%1periodo']=(old_new.casos_totales_new-old_new.casos_totales_old)*100/old_new.casos_totales_old
old_new=old_new[['fecha','id_comuna','var%1periodo']]
if (i==1):
#para el primero hacemos merge, porque la columna casos_nuevos no existe en df
df=pd.merge(df,old_new,how='left',on=['fecha','id_comuna'])
else:
df_aporte=pd.merge(df_old,old_new,how='left',on=['fecha','id_comuna'])
#para todo el resto tenemos que sobreescribir los datos
df[df.fecha==fechas[i]]=df_aporte[df_aporte.fecha==fechas[i]]
i=i+1
df['var%1periodo']=df['var%1periodo'].fillna(0)
df['var%1periodo']=df['var%1periodo'].replace([np.inf, -np.inf], np.nan).fillna(0)
########### Idea 2
df=df[df.fecha==fechaAAnalizar]
'''
comunasChile.columns =
Index(['objectid', 'shape_leng', 'dis_elec', 'cir_sena', 'cod_comuna',
'codregion', 'st_area_sh', 'st_length_', 'Region', 'Comuna',
'Provincia', 'geometry'],
dtype='object')
'''
## Necesitamos que las columnas tengan el mismo nombre:
comunasChile['nombre_comuna']=comunasChile.Comuna
############################################################
df=comunasChile.merge(df, on='nombre_comuna')
'''
df.columns=
Index(['id_region', 'nombre_region', 'id_comuna', 'nombre_comuna', 'poblacion',
'casos_totales', 'tasa', 'objectid', 'shape_leng', 'dis_elec',
'cir_sena', 'cod_comuna', 'codregion', 'st_area_sh', 'st_length_',
'Region', 'Comuna', 'Provincia', 'geometry'],
dtype='object')
### Los datos por Comuna tienen que ser arreglados.
# Primero, a partir de la columna de tasa y la de población, hay que
# reconstruir los datos de los casos (porque sólo informan cuando hay más
# de 4 casos)
df['casos_totales']=df.casos_totales.replace('-',0)
df['casos_totales']=df.casos_totales.fillna(0)
df['casos_totales']=df.casos_totales.astype(int)
df['tasa']=df.tasa.fillna(0)
df['tasa']=df.tasa.astype(float)
df['poblacion']=df.poblacion.fillna(0)
##Ahora corregimos los datos de los casos totales.
df['casos_totales']=(df.tasa*df.poblacion/100000).round(0).astype(int)
'''
df['nombre_comuna']=df.nombre_comuna.replace('San Juan de la Costa','S.J. de la Costa')
######################################
######################################
######################################
######################################
# CALCULO DE RIESGO = casos*poblacion/superficie
######################################
######################################
######################################
######################################
df['riesgo']=df['casos_totales']*df['poblacion']/df['superficie']
# Lo normalizamos!
df['riesgo']=df['riesgo']/df['riesgo'].max()
df['casos_pp']=df['casos_totales']/df['poblacion']*100000
df['casos_totales']=df['casos_totales'].astype(int)
df['casos_activos']=df['casos_activos'].astype(int)
df['riesgo_activos']=df['casos_activos']*df['poblacion']/df['superficie']
# Lo normalizamos!
df['riesgo_activos']=df['riesgo_activos']/df['riesgo_activos'].max()
df['casos_activos_pp']=df['casos_activos']/df['poblacion']*100000
import seaborn as sns
casos=[['casos_totales','Casos Totales','%i'],
['riesgo','Indice de Riesgo','%.2f'],
['casos_pp','Casos por 100.000 habitantes','%i'],
['riesgo_activos','Índice de Riesgo Activo','%.2f'],
['casos_activos','Casos Activos','%i'],
['casos_activos_pp','Casos Activos por 100.000 hbs.','%i'],
['var%1periodo','Variacion % 1 periodo','%i'],
]
#Datos al 18 de Abril
for caso in casos:
caracteristica=caso[0]
titulo=caso[1]
t=caso[2]
#top10=df[df.nombre_region!='Metropolitana'][['nombre_comuna',caracteristica]].sort_values(caracteristica,ascending=False).head(10)
top10=df[['nombre_comuna',caracteristica]].sort_values(caracteristica,ascending=False).head(10)
top10=top10.reset_index(drop=True)
print(top10)
paleta_rojos=['red']*10#sns.color_palette("Reds",10)#sns.color_palette("bwr",50)[40:50]
paleta_verdes=['lime']*10#sns.color_palette("Greens_r",20)[0:10]
yellow=[(255/255, 198/255, 0/255)]*10
paleta_naranjos=yellow#['yellow']*10#sns.color_palette("Oranges_r",20)[0:10]
paleta=paleta_verdes#['green']*10 #sns.color_palette("winter",10)
i=0
for bool in top10.nombre_comuna.isin(cuarentena_total):
if bool:
paleta[i]=paleta_rojos[i]#'tomato'
i+=1
i=0
for bool in top10.nombre_comuna.isin(cuarentena_parcial):
if bool:
paleta[i]=paleta_naranjos[i]#'lightyellow'
i+=1
sns.set(font_scale=2)
# sns.set_style("ticks")
sns.set_style("whitegrid")
alto=11
ancho=8
f, ax = plt.subplots(figsize=(ancho, alto))
sns.barplot(x=caracteristica, y='nombre_comuna',data=top10,palette=paleta)
sns.despine(left=True, bottom=True)
#ax.set_xticklabels(top10[caracteristica])
for p in ax.patches:
ax.annotate(t % p.get_width(), (p.get_x() + p.get_width(), p.get_y() + 1.2),
xytext=(5, 40), textcoords='offset points')
plt.xlabel(titulo)
plt.title("Top 10 Comunas según "+titulo + alFecha)
plt.ylabel('')
#plt.yticks(rotation=45)
plt.show()
plt.tight_layout()
plt.savefig('indice_comunas'+caracteristica+'.png')
#plt.figure(figsize=(12,8))
# plot barh chart with index as x values
#ax = sns.barplot(top15.index, top10.casos_totales)
#ax.get_yaxis().set_major_formatter(plt.FuncFormatter(lambda x, loc: "{:,}".format(int(x))))
#ax.set(xlabel="Dim", ylabel='Count')
# add proper Dim values as x labels
#ax.set_xticklabels(top15.nombre_comuna)
#for item in ax.get_xticklabels(): item.set_rotation(90)
#for i, v in enumerate(top15["nombre_comuna"].iteritems()):
# ax.text(i ,v[1], "{:,}".format(v[1]), color='m', va ='bottom', rotation=45)
#plt.tight_layout()
#plt.show()
rm= df[df.Region=='Región Metropolitana de Santiago']
gran_stgo_path="../../fuentes/gran_stgo/gran_stgo.csv"
#datos_path="../../COVID19_Chile_Comunas-casos_totales.CSV"
gran_stgo = pd.read_csv(gran_stgo_path)
rm=rm.merge(gran_stgo, left_on='nombre_comuna', right_on='nombre_comuna', sort='False')
stgo= rm[rm.gran_stgo==1]
# Control del tamaño de la figura del mapa
fig, ax = plt.subplots(figsize=(30, 30))
# Control del título y los ejes
ax.set_title(u'Comunas del Gran Santiago por Índice de Riesgo de Contagio',
pad = 20,
fontdict={'fontsize':20, 'color': 'black'})
# Control del título y los ejes
#ax.set_xlabel('Longitud')
#ax.set_ylabel('Latitud')
plt.axis('off')
#ax.legend(fontsize=1000)
# Añadir la leyenda separada del mapa
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
#map_STGO[(map_STGO.NOMBRE!='Santiago')&(map_STGO.NOMBRE!='Providencia')&(map_STGO.NOMBRE!='Ñuñoa')&(map_STGO.NOMBRE!='Las Condes')]
# Mostrar el mapa finalizado
stgo.plot(column='riesgo',
cmap='Reds', ax=ax,
legend=True,
legend_kwds={'label': "Riesgo de Contagio"},
cax=cax, zorder=5,#
missing_kwds={"color": "lightgrey",
"edgecolor": "black",
"hatch": "///"
#"label": "Missing values",
})
fig, ax = plt.subplots(figsize=(30, 30))
'''
stgo.plot(column='riesgo',cmap='Reds', ax=ax,
legend=Truelegend_kwds={'label': "Riesgo de Contagio"},
cax=cax, zorder=5,
missing_kwds={"color": "lightgrey",
"edgecolor": "black",
"hatch": "///" })#,
#"label": "Missing values",})
'''
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
201,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
37811,
201,
41972,
319,
30030,
1526,
3261,
1367,
25,
2816,
25,
3682,
12131,
201,
31,
9800,
25,
1556,
1765,
272,
2... | 2.039841 | 5,045 |
#!/usr/bin/env python
#
# Copyright (c) 2019, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Locate last Snapshot with user provided name for CVP 2018.1.x
#
# Version 0.1 22/01/2019
#
# Written by:
# Hugh Adams, Arista Networks
#
# Revision history:
# 0.1 - 22/01/2019 - initial script
#
# Requires a user with read access to "Snapshots" in CVP
# Requires a snapshot to be created with the following commands
# show inventory | json
# show lldp neighbors | json
#
# Requires CVP user credentials
#
# Import Required Libraries
import json
import re
import os, csv
import argparse
import getpass
import sys
import json
import requests
from requests import packages
from time import sleep
# Global Variables
# CVP manipulation class
# Set up classes to interact with CVP API
# serverCVP exception class
# Create a session to the CVP server
def fileOpen(filePath,fileType):
""" filePath - full directory and filename for file
function returns file contents based on selection
json - JSON object
txt - text string
csv - Comma Separated Variable
j2 - Jinja2 Template object"""
if os.path.exists(filePath) and os.path.getsize(filePath) > 0:
print "Retrieving file:%s" %filePath
if fileType.lower() == "xl":
fileObject = xlrd.open_workbook(filePath)
else:
with open(filePath, 'r') as FH:
if fileType.lower() == "json":
fileObject = json.load(FH)
elif fileType.lower() == "txt":
fileObject = FH.readlines()
elif fileType.lower() == "csv":
file_data = csv.reader(FH)
fileObject = output = list(file_data)
elif fileType.lower() == "j2":
fileObject = Template(FH.read())
else:
print "Invalid fileType"
fileObject = False
return fileObject
else:
print "File does not exist or is empty: %s" %filePath
return False
def fileWrite(filePath,data,fileType,option="c"):
""" filePath - full directory and filename for file
Function returns True is file is successfully written to media
data - content to write to file
fileType
json - JSON object
txt - text string
csv - Comman Separated Variable string
option
a - append
w - overwrite
c - choose option based on file existance
"""
if option.lower() == "c":
if os.path.exists(filePath) and os.path.getsize(filePath) > 0:
print "Appending data to file:%s" %filePath
fileOp = "a"
else:
print "Creating file %s to write data to" %filePath
fileOp = "w"
else:
fileOp = option.lower()
try:
with open(filePath, fileOp) as FH:
if fileOp == "a":
FH.seek(0, 2)
if fileType.lower() == "json":
#json.dump(json.loads(data), FH, sort_keys = True, indent = 4, ensure_ascii = True)
json.dump(data, FH, sort_keys = True, indent = 4, ensure_ascii = True)
result = True
elif fileType.lower() == "txt":
FH.writelines(data)
result = True
elif fileType.lower() == "csv":
#write_csv = csv.writer(FH, dialect='excel')
write_csv = csv.writer(FH)
write_csv.writerows(data)
result = True
else:
print "Invalid fileType"
result = False
except IOError as file_error:
print "File Write Error: %s"%file_error
result = False
return result
def parseArgs():
"""Gathers comand line options for the script, generates help text and performs some error checking"""
# Configure the option parser for CLI options to the script
usage = "usage: %prog [options] userName password configlet xlfile"
parser = argparse.ArgumentParser(description="Excel File to JSON Configlet Builder")
parser.add_argument("--userName", help='Username to log into CVP')
parser.add_argument("--password", help='Password for CVP user to login')
parser.add_argument("--target", nargs="*", metavar='TARGET', default=[],
help='List of CVP appliances to get snapshot from URL,URL')
parser.add_argument("--snapshot", help='CVP Snapshot name containing required data')
parser.add_argument("--last", default="True", help="True - Only get latest snapshot for each device")
args = parser.parse_args()
return checkArgs( args )
def askPass( user, host ):
"""Simple function to get missing password if not recieved as a CLI option"""
prompt = "Password for user {} on host {}: ".format( user, host )
password = getpass.getpass( prompt )
return password
def checkArgs( args ):
"""check the correctness of the input arguments"""
# Set Intial Variables required
getCvpAccess = False
destList = []
# React to the options provided
# CVP Username for script to use
if args.userName == None:
getCvpAccess = True
# CVP Password for script to use
if args.password == None:
getCvpAccess = True
else:
if (args.password[0] == args.password[-1]) and args.password.startswith(("'", '"')):
password = args.password[1:-1]
if getCvpAccess:
args.userName = raw_input("User Name to Access CVP: ")
args.password = askPass( args.userName, "CVP" )
# CVP appliances to get snapsots from
if not args.target:
applianceNumber = int(raw_input("Number of CVP Appliance to use: "))
loop = 0
while loop < applianceNumber:
args.target.append(raw_input("CVP Appliance %s: " %(loop+1)))
loop += 1
# Target snapshot
if args.snapshot == None:
args.snapshot = raw_input("Name of Snapshot to retrieve: ")
else:
if (args.snapshot[0] == args.snapshot[-1]) and args.snapshot.startswith(("'", '"')):
args.snapshot = args.snapshot[1:-1]
# Get Last Snapshot
if args.last.lower() == "true":
args.last = True
else:
args.last = False
return args
# Main Script
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
357,
66,
8,
13130,
11,
943,
12523,
27862,
11,
3457,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
1... | 2.549626 | 3,073 |
# Space: O(1)
# Time: O(n)
| [
198,
2,
4687,
25,
440,
7,
16,
8,
198,
2,
3862,
25,
440,
7,
77,
8,
628,
628,
198
] | 1.684211 | 19 |
#!/usr/bin/env python
import rospy
from mavros_msgs.msg import PositionTarget
from geometry_msgs.msg import PoseStamped
from std_msgs.msg import Float32, String, Bool
if __name__ == '__main__':
rospy.init_node('checker_1')
server = Server()
rospy.Subscriber("/uav1/mavros/local_position/pose", PoseStamped , server.curpos_callback)
rospy.Subscriber("/uav1/mavros/setpoint_raw/local", PositionTarget, server.targetwp_callback)
rospy.spin()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
686,
2777,
88,
198,
6738,
285,
615,
4951,
62,
907,
14542,
13,
19662,
1330,
23158,
21745,
198,
6738,
22939,
62,
907,
14542,
13,
19662,
1330,
37557,
1273,
13322,
198,
6738,
14367,
... | 2.606742 | 178 |
"""Models!"""
from pathlib import Path
import re
import glob
from django.db import models
from django.urls import reverse
from gbt_archive.utils import get_archive_path
class History(models.Model):
"""Stores history of CSV exports, intended for AAT consumption"""
historyid = models.AutoField(db_column="historyID", primary_key=True)
archivaldate = models.DateField(db_column="archivalDate")
aatfilename = models.CharField(db_column="aatFilename", max_length=256)
version = models.CharField(max_length=12)
# class TestOfflineOld(models.Model):
# errorid = models.AutoField(db_column="errorID", primary_key=True)
# errormsg = models.CharField(db_column="errorMsg", max_length=64)
# severity = models.IntegerField()
# class Meta:
# managed = False
# db_table = "test_offline_old"
| [
37811,
5841,
1424,
2474,
15931,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
302,
198,
11748,
15095,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
198,
6738,
308,
18347,
6... | 2.83557 | 298 |
from talon import ctrl, noise, actions
noise.register(
"pop",
lambda m: ctrl.mouse_click()
if actions.speech.enabled()
else actions.speech.enable(),
)
| [
6738,
3305,
261,
1330,
269,
14859,
11,
7838,
11,
4028,
198,
198,
3919,
786,
13,
30238,
7,
198,
220,
220,
220,
366,
12924,
1600,
198,
220,
220,
220,
37456,
285,
25,
269,
14859,
13,
35888,
62,
12976,
3419,
198,
220,
220,
220,
611,
4... | 2.666667 | 63 |
from PIL import Image
from io import BytesIO
from base64 import b64decode
import numpy
import re
import cv2
import random
from collections import namedtuple
from math import hypot
if cv2.__version__.split()[0] == '3':
old_find_contours = cv2.findContours
cv2.findContours = new_find_contours
RotatedRect = namedtuple("RotatedRect", "center, size, angle")
DEFAULT_SIZE = (40, 50)
def read_base64(data_url):
"Read and binarize an image from data_url."
image_str = re.fullmatch("data:image/jpg;base64,(.+)", data_url).group(1)
pil_image = Image.open(BytesIO(b64decode(image_str)))
raw_image = ~cv2.cvtColor(numpy.array(pil_image), cv2.COLOR_RGB2GRAY)
_, binary_image = cv2.threshold(raw_image, 10, 255, cv2.THRESH_BINARY)
return binary_image
def get_angle(rrect):
"Get the nearest angle to make the rectangle up-right."
return rrect.angle if rrect.angle > -45 else 90 + rrect.angle
def findContours(image):
"Work around the difference between opencv 3 and opencv 4."
return cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
def find_chars(image):
"Find the characters in image, return them as rotated rectangles."
contours = findContours(image)
# print(contours)
rrects = [RotatedRect(*cv2.minAreaRect(c)) for c in contours]
if len(contours) != 4: # i and j
if len(contours) < 4: # some lost character
raise CharacterTooComplicated()
dot_indices = filter(
lambda i: get_area(rrects[i]) < 40, range(len(rrects)))
body_indices = list(filter(
lambda i: get_area(rrects[i]) >= 40, range(len(rrects))))
if len(body_indices) != 4:
raise CharacterTooComplicated()
for di in dot_indices:
nearest = min(body_indices, key=get_distance)
# print(nearest)
joined_contour = numpy.concatenate(
(contours[di], contours[nearest]))
rrects[nearest] = RotatedRect(*cv2.minAreaRect(joined_contour))
rrects = [rrects[i] for i in body_indices]
return rrects
def crop_rrect(image, rrect, margin):
"Crop a rotated rectangle from image."
mat = cv2.getRotationMatrix2D(rrect.center, get_angle(rrect), 1)
size = int(rrect.size[0]+margin*2), int(rrect.size[1]+margin*2)
if rrect.angle <= -45:
size = size[1], size[0]
for i in (0, 1):
mat[i, 2] += size[i] / 2 - rrect.center[i]
dst = cv2.warpAffine(image, mat, size, cv2.INTER_LINEAR)
# print(get_angle(rrect), size, rrect)
return dst
def isolate_chars(image, margin=0):
"Find the characters in the image, return them as images."
rrects = sorted(find_chars(image), key=lambda rrect: rrect.center)
cropped = [crop_rrect(image, rrect, margin) for rrect in rrects]
return cropped
def concat_chars(chars, size=DEFAULT_SIZE):
"Concatenate the characters to form a whole picture for use in tesseract."
canvas = numpy.zeros((size[1], size[0]*4), numpy.uint8)
for i in range(4):
char_size = chars[i].shape
high = (size[1]+char_size[0])//2, (size[0]+char_size[1])//2
low = high[0]-char_size[0], high[1]-char_size[1]
canvas[
low[0]: high[0],
size[0]*i + low[1]: size[0]*i + high[1],
] = chars[i]
return canvas
if __name__ == "__main__":
image_url = "data:image/jpg;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAABFCAIAAACAFD7PAAACKUlEQVR42u3bwU3EMBBG4ZRDF9sHlVABEjTCgTqohTICp2iVaLP22DP/2HlPuREJKfmYxN5lWYkcWrgEBCwCFgGLCFgELAIWEbBS9/L+w0UAVmdS28HVAFZnUsACliMsbAGLoQUshhawsAUsYmhJYX1/vm7HBdFgKwJWCa85CFYRAZY7rOPJwzmzEcGWI6wJVJmJAMsL1hyqsJUL1kyqWogAqyesyVQxtFLDmmlVyNASwBKq+vh6Y2jNCUs7q/5hbUfyrQftzf693XZHaljyJ+A9LCdeow+tIylvXnZYeV7YA2ANPbTOVTnxaoKVZBmogmUbWjlVdbe12FSdwIr/iyyE1c6ui620qrLAyrNrVQKry1QbC9ZTOq62LgHreEKkrTywIteJ/WElfMfqpcoAS/iOVYgGWEZYHVXV2hJuN5SPopFg5dlu6K6qXIx2E6v2GZcO1vneqRaWk6pHaJ4e2vVg8GOkCVbhT4NheY8rmy359tUwsGpPCIblqqrWVvxKeWZYkd9u2OkJUFVua1U0Dyzh0CqB5ffbs5G6BKwYW4/2PwNUpa12dzQ1LJUtYNlsZfys0LZ4lMBar5rwE+hSWO3/qqqCtV67qWDFv8Wjqt2W7B1r56YWlqstYDXaUq4KbQlh4amEl367IXOoStiCKgIWmwvAUqgCFrAYV8AaBBb3ElhsLgArtzDuH7AIWETAImARsIiARcAiYBEBi4BFwCICFgGLgEVk6g+FZO0jKAvv3AAAAABJRU5ErkJggg=="
image = read_base64(image_url)
cv2.imshow("src", image)
chars = isolate_chars(image)
for i in range(4):
cv2.imshow(f"character {i}", chars[i])
concat = concat_chars(chars)
cv2.imshow("concat", concat)
# cv2.imwrite("c.png", ~concat)
cv2.waitKey()
| [
6738,
350,
4146,
1330,
7412,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
6738,
2779,
2414,
1330,
275,
2414,
12501,
1098,
198,
11748,
299,
32152,
198,
11748,
302,
198,
11748,
269,
85,
17,
198,
11748,
4738,
198,
6738,
17268,
1330,
3706... | 2.010254 | 2,243 |
import ubelt as ub
import numpy as np
from . import embeding
from . import util
__all__ = ['InteractiveIter']
INDEXABLE_TYPES = (list, tuple, np.ndarray)
class InteractiveIter(object):
"""
Choose next value interactively
iterable should be a list, not a generator. sorry
"""
def __init__(iiter, iterable=None, enabled=True, startx=0,
default_action='next', custom_actions=[], wraparound=False,
display_item=False, verbose=True):
r"""
Args:
iterable (None): (default = None)
enabled (bool): (default = True)
startx (int): (default = 0)
default_action (str): (default = 'next')
custom_actions (list): list of 4-tuple (name, actions, help, func) (default = [])
wraparound (bool): (default = False)
display_item (bool): (default = True)
verbose (bool): verbosity flag(default = True)
Example:
>>> # DISABLE_DOCTEST
>>> from xdev.interactive_iter import * # NOQA
>>> iterable = [1, 2, 3]
>>> enabled = True
>>> startx = 0
>>> default_action = 'next'
>>> custom_actions = []
>>> wraparound = False
>>> display_item = True
>>> verbose = True
>>> iiter = InteractiveIter(iterable, enabled, startx, default_action, custom_actions, wraparound, display_item, verbose)
>>> for _ in iiter:
>>> pass
Example:
>>> # DISABLE_DOCTEST
>>> # Interactive matplotlib stuff
>>> from xdev.interactive_iter import * # NOQA
>>> import kwimage
>>> import kwplot
>>> kwplot.autompl()
>>> keys = list(kwimage.grab_test_image.keys())
>>> iterable = [kwimage.grab_test_image(key) for key in keys]
>>> iiter = InteractiveIter(iterable)
>>> for img in iiter:
>>> kwplot.imshow(img)
>>> InteractiveIter.draw()
"""
iiter.wraparound = wraparound
iiter.enabled = enabled
iiter.iterable = iterable
for actiontup in custom_actions:
if isinstance(custom_actions, tuple):
pass
else:
pass
iiter.custom_actions = util.take_column(custom_actions, [0, 1, 2])
iiter.custom_funcs = util.take_column(custom_actions, 3)
iiter.action_tuples = [
# (name, list, help)
('next', ['n'], 'move to the next index'),
('prev', ['p'], 'move to the previous index'),
('reload', ['r'], 'stay at the same index'),
('index', ['x', 'i', 'index'], 'move to that index'),
('set', ['set'], 'set current index value'),
('ipy', ['ipy', 'ipython', 'cmd'], 'start IPython'),
('quit', ['q', 'exit', 'quit'], 'quit'),
] + iiter.custom_actions
default_action_index = util.take_column(iiter.action_tuples, 0).index(default_action)
iiter.action_tuples[default_action_index][1].append('')
iiter.action_keys = {tup[0]: tup[1] for tup in iiter.action_tuples}
iiter.index = startx
iiter.display_item = display_item
iiter.verbose = verbose
@classmethod
def eventloop(cls, custom_actions=[]):
"""
For use outside of iteration wrapping. Makes an interactive event loop
custom_actions should be specified in format
[dispname, keys, desc, func]
"""
iiter = cls([None], custom_actions=custom_actions, verbose=False)
print('[IITER] Begining interactive main loop')
for _ in iiter:
pass
return iiter
def handle_ans(iiter, ans_):
"""
preforms an actionm based on a user answer
"""
ans = ans_.strip(' ')
# Handle standard actions
if ans in iiter.action_keys['quit']:
raise StopIteration()
elif ans in iiter.action_keys['prev']:
iiter.index -= 1
elif ans in iiter.action_keys['next']:
iiter.index += 1
elif ans in iiter.action_keys['reload']:
iiter.index += 0
elif chack_if_answer_was(iiter.action_keys['index']):
try:
iiter.index = int(parse_str_value(ans))
except ValueError:
print('Unknown ans=%r' % (ans,))
elif chack_if_answer_was(iiter.action_keys['set']):
try:
iiter.iterable[iiter.index] = eval(parse_str_value(ans))
except ValueError:
print('Unknown ans=%r' % (ans,))
elif ans in iiter.action_keys['ipy']:
return 'IPython'
else:
# Custom interactions
for func, tup in zip(iiter.custom_funcs, iiter.custom_actions):
key = tup[0]
if chack_if_answer_was(iiter.action_keys[key]):
value = parse_str_value(ans)
# cal custom function
print('Calling custom action func')
import inspect
argspec = inspect.getfullargspec(func)
if len(argspec.args) == 3:
# Forgot why I had custom functions take args in the first place
func(iiter, key, value)
else:
func()
# Custom funcs dont cause iteration
return False
print('Unknown ans=%r' % (ans,))
return False
return True
@classmethod
def draw(iiter):
"""
in the common case where InteractiveIter is used to view matplotlib
figures, you will have to draw the figure manually. This is a helper
for that task.
"""
from matplotlib import pyplot as plt
fig = plt.gcf()
fig.canvas.draw()
| [
11748,
20967,
2120,
355,
20967,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
764,
1330,
11525,
278,
198,
6738,
764,
1330,
7736,
198,
198,
834,
439,
834,
796,
37250,
9492,
5275,
29993,
20520,
628,
198,
12115,
6369,
17534,
62,
9936,
47,... | 2.043153 | 2,943 |
import cv2 as cv
import numpy as np
import kociemba
DEBUG = False
eps = 0.00001
firstRead = []
secondRead = []
firstDone = False
cam = cv.VideoCapture(0)
cam.set(cv.CAP_PROP_FRAME_HEIGHT, 720)
W, H = int(cam.get(cv.CAP_PROP_FRAME_WIDTH)), int(cam.get(cv.CAP_PROP_FRAME_HEIGHT))
if W != 1280 or H != 720:
print("WARNING!!! This software was prepared according to 1280x720 camera resolution, but your resolution is %dx%d, this may or may not cause problems" % (W, H))
color_white = (255, 255, 255)
color_yellow = (0, 255, 255)
color_red = (0, 0, 255)
color_orange = (0, 162, 255)
color_green = (0, 255, 0)
color_blue = (255, 0, 0)
while True:
isTrue, raw = cam.read()
if isTrue == False:
break
raw = cv.flip(raw, 1)
if firstDone == False:
raw = cv.rectangle(raw, (0, H-40), (W, H), (55, 55, 55), -1)
raw = cv.putText(raw, "Show one corner of the cube to the camera, Q to exit", (10, H-12), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
else:
raw = cv.circle(raw, (40, H-50), 100, (255, 255, 255), -1)
raw = cv.line(raw, (40, H-50), (100, H-110), (0, 150, 0), 10)
raw = cv.line(raw, (10, H-80), (40, H-50), (0, 150, 0), 10)
raw = cv.rectangle(raw, (0, H-40), (W, H), (55, 55, 55), -1)
raw = cv.putText(raw, "Now show the opposite corner to the camera, Q to exit", (10, H-12), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
#* canny edge detection
blur = cv.medianBlur(raw, 7)
canny = cv.Canny(blur, 50, 150)
scanning_areas = canny.copy()
canny_gray = canny.copy()
canny = cv.cvtColor(canny, cv.COLOR_GRAY2BGR)
#* Draw cube skeleton
pts = np.array([
[665, 115],
[885, 200],
[855, 428],
[675, 571],
[490, 434],
[454, 211]
], np.int32)
pts.reshape((-1, 1, 2))
if DEBUG:
canny = cv.polylines(canny, [pts], True, (0, 255, 0), 3)
canny = cv.circle(canny, (675, 342), 30, (0, 0, 255))
canny = cv.circle(canny, (675, 342), 50, (0, 0, 255))
else:
raw = cv.polylines(raw, [pts], True, (0, 255, 0), 3)
raw = cv.line(raw, (885, 200), (675, 342), (0, 255, 0), 3)
raw = cv.line(raw, (675, 571), (675, 342), (0, 255, 0), 3)
raw = cv.line(raw, (454, 211), (675, 342), (0, 255, 0), 3)
cube_area = area(pts)
#* Draw two circles centered at the corner, find intersection points with edges
little_circle_points = []
big_circle_points = []
points = cv.ellipse2Poly((675, 342), (30, 30), 0, 0, 360, 1)
for (x, y) in points:
if canny_gray[y, x] == 255:
if len(little_circle_points) == 0 or distance((x, y), little_circle_points[-1]) > 30:
little_circle_points.append((x, y))
points = cv.ellipse2Poly((675, 342), (50, 50), 0, 0, 360, 1)
for (x, y) in points:
if canny_gray[y, x] == 255:
if len(big_circle_points) == 0 or distance((x, y), big_circle_points[-1]) > 30:
big_circle_points.append((x, y))
all_edges_found = False
if len(little_circle_points) > 0 and len(big_circle_points) > 0 and distance(little_circle_points[0], big_circle_points[0]) < 22:
canny = cv.line(canny, little_circle_points[0], big_circle_points[0], (0, 255, 0), 2)
if len(little_circle_points) > 1 and len(big_circle_points) > 1 and distance(little_circle_points[1], big_circle_points[1]) < 22:
canny = cv.line(canny, little_circle_points[1], big_circle_points[1], (0, 255, 0), 2)
if len(little_circle_points) > 2 and len(big_circle_points) > 2 and distance(little_circle_points[2], big_circle_points[2]) < 22:
canny = cv.line(canny, little_circle_points[2], big_circle_points[2], (0, 255, 0), 2)
all_edges_found = True
if all_edges_found:
#* All found points
x1, y1 = little_circle_points[0][0] + eps, little_circle_points[0][1] + eps
x2, y2 = big_circle_points[0][0], big_circle_points[0][1]
x3, y3 = little_circle_points[1][0] + eps, little_circle_points[1][1] + eps
x4, y4 = big_circle_points[1][0], big_circle_points[1][1]
x5, y5 = little_circle_points[2][0] + eps, little_circle_points[2][1] + eps
x6, y6 = big_circle_points[2][0], big_circle_points[2][1]
#* Find middle corner
axis1, center_y1 = intersection(x1, y1, x2, y2, x3, y3, x4, y4)
axis2, center_y2 = intersection(x3, y3, x4, y4, x5, y5, x6, y6)
center_x3, center_y3 = intersection(x5, y5, x6, y6, x1, y1, x2, y2)
center_x, center_y = (axis1 + axis2 + center_x3)/3, (center_y1 + center_y2 + center_y3)/3
center = int(center_x), int(center_y)
if center_x > 100000 or center_y > 100000:
continue
if DEBUG and 0 < center_x < 1000 and 0 < center_y < 1000:
canny = cv.circle(canny, center, 5, (255, 255, 0), -1)
#* Find corners near middle corner
dilated_edges = cv.dilate(canny, np.ones((10, 10), np.uint8))
dx, dy = big_circle_points[0][0] - little_circle_points[0][0], big_circle_points[0][1] - little_circle_points[0][1]
dx, dy = dx/length((dx, dy)), dy/length((dx, dy))
corner1_x, corner1_y = center_x + dx*200, center_y + dy*200
while 0 < corner1_x < W and 0 < corner1_y < H:
if dilated_edges[int(corner1_y), int(corner1_x)].all() == 0:
break
corner1_x += dx
corner1_y += dy
corner1_x -= 5*dx
corner1_y -= 5*dy
dx, dy = big_circle_points[1][0] - little_circle_points[1][0], big_circle_points[1][1] - little_circle_points[1][1]
dx, dy = dx/length((dx, dy)), dy/length((dx, dy))
corner2_x, corner2_y = center_x + dx*200, center_y + dy*200
canny = cv.circle(canny, (int(corner2_x), int(corner2_y)), 10, (0, 0, 255))
while 0 < corner2_x < W and 0 < corner2_y < H:
if dilated_edges[int(corner2_y), int(corner2_x)].all() == 0:
break
corner2_x += dx
corner2_y += dy
corner2_x -= 5*dx
corner2_y -= 5*dy
dx, dy = big_circle_points[2][0] - little_circle_points[2][0], big_circle_points[2][1] - little_circle_points[2][1]
dx, dy = dx/length((dx, dy)), dy/length((dx, dy))
corner3_x, corner3_y = center_x + dx*200, center_y + dy*200
while 0 < corner3_x < W and 0 < corner3_y < H:
if dilated_edges[int(corner3_y), int(corner3_x)].all() == 0:
break
corner3_x += dx
corner3_y += dy
corner3_x -= 5*dx
corner3_y -= 5*dy
corner1 = (int(corner1_x), int(corner1_y))
corner2 = (int(corner2_x), int(corner2_y))
corner3 = (int(corner3_x), int(corner3_y))
if DEBUG:
canny = cv.circle(canny, corner1, 10, (0, 0, 255))
canny = cv.circle(canny, corner2, 10, (0, 0, 255))
canny = cv.circle(canny, corner3, 10, (0, 0, 255))
#* Estimate other corners
far_corner1 = plus(minus(corner1, center), corner2)
far_corner2 = plus(minus(corner2, center), corner3)
far_corner3 = plus(minus(corner3, center), corner1)
far_corner1 = minus(far_corner1, times(0.13, minus(far_corner1, center)))
far_corner2 = minus(far_corner2, times(0.13, minus(far_corner2, center)))
far_corner3 = minus(far_corner3, times(0.13, minus(far_corner3, center)))
far_corner1 = (int(far_corner1[0]), int(far_corner1[1]))
far_corner2 = (int(far_corner2[0]), int(far_corner2[1]))
far_corner3 = (int(far_corner3[0]), int(far_corner3[1]))
#* Check if calculated area and skeleton area matches
unsuccessful = False
calculated_area = area([corner1, far_corner1, corner2, far_corner2, corner3, far_corner3])
error = abs(calculated_area - cube_area)/cube_area
if error < 0.1:
if DEBUG:
canny = cv.circle(canny, far_corner1, 10, (0, 0, 255))
canny = cv.circle(canny, far_corner2, 10, (0, 0, 255))
canny = cv.circle(canny, far_corner3, 10, (0, 0, 255))
scanning_areas = cv.circle(scanning_areas, corner1, 10, (255, 255, 255))
scanning_areas = cv.circle(scanning_areas, corner2, 10, (255, 255, 255))
scanning_areas = cv.circle(scanning_areas, corner3, 10, (255, 255, 255))
scanning_areas = cv.circle(scanning_areas, far_corner1, 10, (255, 255, 255))
scanning_areas = cv.circle(scanning_areas, far_corner2, 10, (255, 255, 255))
scanning_areas = cv.circle(scanning_areas, far_corner3, 10, (255, 255, 255))
#* Divide faces and extract colors
read = []
for faces in range(3):
if faces == 0:
axis1 = minus(corner1, center)
axis2 = minus(corner2, center)
elif faces == 1:
axis1 = minus(corner2, center)
axis2 = minus(corner3, center)
else:
axis1 = minus(corner3, center)
axis2 = minus(corner1, center)
for i in range(3):
for j in range(3):
piece_corner1 = plus(center, plus(times( i /3, axis1), times( j /3, axis2)))
piece_corner2 = plus(center, plus(times((i+1)/3, axis1), times( j /3, axis2)))
piece_corner3 = plus(center, plus(times( i /3, axis1), times((j+1)/3, axis2)))
piece_corner4 = plus(center, plus(times((i+1)/3, axis1), times((j+1)/3, axis2)))
piece_corner1 = minus(piece_corner1, times(0.13*min(i , j )/3, minus(piece_corner1, center)))
piece_corner2 = minus(piece_corner2, times(0.13*min(i+1, j )/3, minus(piece_corner2, center)))
piece_corner3 = minus(piece_corner3, times(0.13*min(i , j+1)/3, minus(piece_corner3, center)))
piece_corner4 = minus(piece_corner4, times(0.13*min(i+1, j+1)/3, minus(piece_corner4, center)))
piece_mask = np.zeros((canny.shape[0], canny.shape[1]), np.uint8)
pts = np.array([
[piece_corner1[0], piece_corner1[1]],
[piece_corner2[0], piece_corner2[1]],
[piece_corner4[0], piece_corner4[1]],
[piece_corner3[0], piece_corner3[1]]
], np.int32)
pts.reshape((-1, 1, 2))
piece_mask = cv.fillPoly(piece_mask, [pts], (255, 255, 255))
piece_mask = cv.erode(piece_mask, np.ones((35,35), np.uint8)) # erode to prevent little misplacements
scanning_areas = cv.bitwise_or(scanning_areas, piece_mask)
# uncomment for higher accuracy but hard match
# edge_check = cv.mean(canny, piece_mask)
# if edge_check[0] > 0:
# olmadi = True
#* If color picking area is so small, retreat
cube_area = cv.mean(piece_mask)
if cube_area[0] < 0.005:
unsuccessful = True
read_color = cv.mean(raw, piece_mask)
if DEBUG:
canny = cv.fillPoly(canny, [pts], (int(read_color[0]), int(read_color[1]), int(read_color[2])))
read.append((read_color[0], read_color[1], read_color[2]))
if DEBUG:
cv.imshow('scanning_areas', scanning_areas)
if unsuccessful:
continue
if not firstRead:
firstRead = read
firstDone = True
if DEBUG:
cv.imshow('first read', canny)
cv.imshow('first read raw', raw)
else:
difference = 0
for i in range(len(read)):
for j in range(3):
difference += (firstRead[i][j] - read[i][j])**2
if difference > 270000:
secondRead = read
reads = firstRead + secondRead
if DEBUG:
cv.imshow('ikinci okuma', canny)
cv.imshow('ikinci okuma raw', raw)
print(reads)
#* Determine which color which
color_groups = [[], [], [], [], [], []]
reads = turnHSV(reads)
# First 9 least saturated color is white
reads = reads[reads[:,1].argsort()]
for i in range(9):
color_groups[0].append(int(reads[i][3]))
reads = reads[9:]
# Other colors are determined according to their hue value
reads = reads[reads[:,0].argsort()]
for j in range(1, 6):
for i in range(9):
color_groups[j].append(int(reads[(j-1)*9 + i][3]))
where = []
for i in range(54):
where.append(-1)
for i in range(6):
for j in range(9):
where[color_groups[i][j]] = i
cube_list = []
for i in range(54):
cube_list.append(-1)
#* Find places of pieces and fill
fill(cube_list, where[0:9], where[13])
fill(cube_list, where[9:18], where[22])
fill(cube_list, where[18:27], where[4])
fill(cube_list, where[27:36], where[40])
fill(cube_list, where[36:45], where[49])
fill(cube_list, where[45:54], where[31])
#* Create result image
result = np.zeros((H, W, 3), np.uint8)
seperatorThickness = 2
for i in range(3):
for j in range(3):
px = 10 + 150 + 10
py = 10
result = cv.rectangle(result, (px + i*50, py + j*50), (px + (i+1)*50, py + (j+1)*50), getcolor(cube_list[i+j*3]), -1)
result = cv.rectangle(result, (px, py), (px + 100, py + 150), (0, 0, 0), seperatorThickness)
result = cv.rectangle(result, (px + 50, py), (px + 150, py + 150), (0, 0, 0), seperatorThickness)
result = cv.rectangle(result, (px, py + 50), (px + 150, py + 100), (0, 0, 0), seperatorThickness)
for i in range(3):
for j in range(3):
px = 10
py = 10 + 150 + 10
result = cv.rectangle(result, (px + i*50, py + j*50), (px + (i+1)*50, py + (j+1)*50), getcolor(cube_list[9+i+j*3]), -1)
result = cv.rectangle(result, (px, py), (px + 100, py + 150), (0, 0, 0), seperatorThickness)
result = cv.rectangle(result, (px + 50, py), (px + 150, py + 150), (0, 0, 0), seperatorThickness)
result = cv.rectangle(result, (px, py + 50), (px + 150, py + 100), (0, 0, 0), seperatorThickness)
for i in range(3):
for j in range(3):
px = 10 + 150 + 10
py = 10 + 150 + 10
result = cv.rectangle(result, (px + i*50, py + j*50), (px + (i+1)*50, py + (j+1)*50), getcolor(cube_list[18+i+j*3]), -1)
result = cv.rectangle(result, (px, py), (px + 100, py + 150), (0, 0, 0), seperatorThickness)
result = cv.rectangle(result, (px + 50, py), (px + 150, py + 150), (0, 0, 0), seperatorThickness)
result = cv.rectangle(result, (px, py + 50), (px + 150, py + 100), (0, 0, 0), seperatorThickness)
for i in range(3):
for j in range(3):
px = 10 + 150 + 10 + 150 + 10
py = 10 + 150 + 10
result = cv.rectangle(result, (px + i*50, py + j*50), (px + (i+1)*50, py + (j+1)*50), getcolor(cube_list[27+i+j*3]), -1)
result = cv.rectangle(result, (px, py), (px + 100, py + 150), (0, 0, 0), seperatorThickness)
result = cv.rectangle(result, (px + 50, py), (px + 150, py + 150), (0, 0, 0), seperatorThickness)
result = cv.rectangle(result, (px, py + 50), (px + 150, py + 100), (0, 0, 0), seperatorThickness)
for i in range(3):
for j in range(3):
px = 10 + 150 + 10 + 150 + 10 + 150 + 10
py = 10 + 150 + 10
result = cv.rectangle(result, (px + i*50, py + j*50), (px + (i+1)*50, py + (j+1)*50), getcolor(cube_list[36+i+j*3]), -1)
result = cv.rectangle(result, (px, py), (px + 100, py + 150), (0, 0, 0), seperatorThickness)
result = cv.rectangle(result, (px + 50, py), (px + 150, py + 150), (0, 0, 0), seperatorThickness)
result = cv.rectangle(result, (px, py + 50), (px + 150, py + 100), (0, 0, 0), seperatorThickness)
for i in range(3):
for j in range(3):
px = 10 + 150 + 10
py = 10 + 150 + 10 + 150 + 10
result = cv.rectangle(result, (px + i*50, py + j*50), (px + (i+1)*50, py + (j+1)*50), getcolor(cube_list[45+i+j*3]), -1)
result = cv.rectangle(result, (px, py), (px + 100, py + 150), (0, 0, 0), seperatorThickness)
result = cv.rectangle(result, (px + 50, py), (px + 150, py + 150), (0, 0, 0), seperatorThickness)
result = cv.rectangle(result, (px, py + 50), (px + 150, py + 100), (0, 0, 0), seperatorThickness)
kociemba_input_style = cube_list[0:9] + cube_list[27:36] + cube_list[18:27] + cube_list[45:54] + cube_list[9:18] + cube_list[36:45]
kociemba_text = str(kociemba_input_style).replace('[', '').replace(']', '').replace(',', '').replace(' ', '')
kociemba_text = kociemba_text.replace('0', 'U')
kociemba_text = kociemba_text.replace('1', 'B')
kociemba_text = kociemba_text.replace('2', 'F')
kociemba_text = kociemba_text.replace('3', 'D')
kociemba_text = kociemba_text.replace('4', 'R')
kociemba_text = kociemba_text.replace('5', 'L')
solution = ""
try:
solution = kociemba.solve(kociemba_text)
except:
solution = "Sorry, this cube cannot be solved. Try again"
result = cv.putText(result, "White on top, Orange in front", (10, 520), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
result = cv.putText(result, solution, (10, 560), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
result = cv.putText(result, "R to retry, Q to quit", (10, 600), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
dx = (W - 650) // 6
dy = 500 // 4
if solution[0] != 'S':
solution_array = solution.split()
for j in range(4):
for i in range(6):
number = j*6 + i
if len(solution_array) <= number:
break
center = 650 + i*dx + dx//2, j*dy + dy//2
minax = min(dx, dy)
color = (0, 0, 0)
if solution_array[number][0] == 'U':
color = color_white[0], color_white[1], color_white[2]
elif solution_array[number][0] == 'L':
color = color_blue[0], color_blue[1], color_blue[2]
elif solution_array[number][0] == 'F':
color = color_orange[0], color_orange[1], color_orange[2]
elif solution_array[number][0] == 'R':
color = color_green[0], color_green[1], color_green[2]
elif solution_array[number][0] == 'B':
color = color_red[0], color_red[1], color_red[2]
elif solution_array[number][0] == 'D':
color = color_yellow[0], color_yellow[1], color_yellow[2]
result = cv.rectangle(result, (center[0] - int(minax*0.2), center[1] - int(minax*0.2)), (center[0] + int(minax*0.2), center[1] + int(minax*0.2)), color, -1)
if len(solution_array[number]) == 1:
result = cv.ellipse(result, center, (int(minax*0.4), int(minax*0.4)), 0, -90, 0, (0, 255, 0), 3)
result = cv.line(result, (center[0] + int(minax*0.4), center[1]), (center[0] + int(minax*0.4) + int(minax*0.05), center[1] - int(minax*0.05)), (0, 255, 0), 3)
result = cv.line(result, (center[0] + int(minax*0.4), center[1]), (center[0] + int(minax*0.4) - int(minax*0.05), center[1] - int(minax*0.05)), (0, 255, 0), 3)
elif solution_array[number][1] == "'":
result = cv.ellipse(result, center, (int(minax*0.4), int(minax*0.4)), 0, -90, -180, (0, 255, 0), 3)
result = cv.line(result, (center[0] - int(minax*0.4), center[1]), (center[0] - int(minax*0.4) + int(minax*0.05), center[1] - int(minax*0.05)), (0, 255, 0), 3)
result = cv.line(result, (center[0] - int(minax*0.4), center[1]), (center[0] - int(minax*0.4) - int(minax*0.05), center[1] - int(minax*0.05)), (0, 255, 0), 3)
else:
result = cv.ellipse(result, center, (int(minax*0.4), int(minax*0.4)), 0, -90, 90, (0, 255, 0), 3)
result = cv.line(result, (center[0], center[1] + int(minax*0.4)), (center[0] + int(minax*0.05), center[1] + int(minax*0.4) - int(minax*0.05)), (0, 255, 0), 3)
result = cv.line(result, (center[0], center[1] + int(minax*0.4)), (center[0] + int(minax*0.05), center[1] + int(minax*0.4) + int(minax*0.05)), (0, 255, 0), 3)
cv.imshow("Rubik's Cube Solver", result)
while True:
option = cv.waitKey() & 0xff
if option == ord('r') or option == ord('R'):
firstDone = False
firstRead = []
secondRead = []
break
elif option == ord('q') or option == ord('Q'):
exit(0)
if DEBUG:
cv.imshow('canny', canny)
else:
cv.imshow("Rubik's Cube Solver", raw)
key = cv.waitKey(20)
if key == ord('q') or key == ord('Q'):
break
| [
11748,
269,
85,
17,
355,
269,
85,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
479,
1733,
368,
7012,
198,
198,
30531,
796,
10352,
198,
25386,
796,
657,
13,
2388,
16,
198,
11085,
5569,
796,
17635,
198,
12227,
5569,
796,
17635,
198,
... | 1.767335 | 13,556 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__metaclss__=type
import random
import math
import collections
from PIL import Image, ImageDraw, ImageFont
from utils.font import FontObj
from utils.color import Color
import skimage.util
import numpy as np
import cv2
import matplotlib.pyplot as plt
import sys
import time
if __name__ == '__main__':
pass | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
4164,
37779,
824,
834,
28,
4906,
198,
198,
11748,
4738,
198,
11748,
10688,
198,
11748,
17268,
198,
6738,
350,... | 2.897638 | 127 |
from dimacs import load_file
| [
6738,
5391,
16436,
1330,
3440,
62,
7753,
198
] | 3.625 | 8 |
# Code behind module for DCAL_Custom_Mosaics.ipynb
################################
##
## Import Statments
##
################################
# Import standard Python modules
import sys
import datacube
# Import DCAL utilities containing function definitions used generally across DCAL
sys.path.append('../DCAL_utils')
################################
##
## Function Definitions
##
################################
# None.
| [
2,
6127,
2157,
8265,
329,
6257,
1847,
62,
15022,
62,
44,
8546,
873,
13,
541,
2047,
65,
198,
198,
29113,
198,
2235,
198,
2235,
17267,
5133,
902,
198,
2235,
198,
29113,
198,
198,
2,
17267,
3210,
11361,
13103,
198,
11748,
25064,
198,
1... | 4.623656 | 93 |
import fulfillment
fulfillment.core.api_key = 'YOUR_API_KEY_GOES_HERE'
# set debug to true to get print json
fulfillment.core.Debug = True
fulfillment.Warehouse.retrieveALL()
| [
11748,
32402,
198,
198,
913,
20797,
434,
13,
7295,
13,
15042,
62,
2539,
796,
705,
56,
11698,
62,
17614,
62,
20373,
62,
11230,
1546,
62,
39,
9338,
6,
198,
2,
900,
14257,
284,
2081,
284,
651,
3601,
33918,
198,
913,
20797,
434,
13,
7... | 2.854839 | 62 |
r"""
Solve Poisson equation in 2D with periodic bcs in one direction
and homogeneous Neumann in the other
\nabla^2 u = f,
Use Fourier basis for the periodic direction and Shen's Neumann basis for the
non-periodic direction.
The equation to solve is
(\nabla^2 u, v) = (f, v)
"""
import sys
import os
from sympy import symbols, cos, sin, pi
import numpy as np
from shenfun import inner, div, grad, TestFunction, TrialFunction, \
TensorProductSpace, FunctionSpace, Array, Function, comm, la, dx, \
chebyshev
# Collect basis and solver from either Chebyshev or Legendre submodules
assert len(sys.argv) == 3, "Call with two command-line arguments"
assert sys.argv[-1].lower() in ('legendre', 'chebyshev')
assert isinstance(int(sys.argv[-2]), int)
family = sys.argv[-1].lower()
Solver = chebyshev.la.Helmholtz if family == 'chebyshev' else la.SolverGeneric1ND
# Use sympy to compute a rhs, given an analytical solution
x, y = symbols("x,y", real=True)
#ue = (1-x**3)*cos(2*y)
ue = cos(2*pi*x)
fe = -ue.diff(x, 2)-ue.diff(y, 2)
# Size of discretization
N = int(sys.argv[-2])
N = (N, N)
bc = {'left': ('N', ue.diff(x, 1).subs(x, -1)), 'right': ('N', ue.diff(x, 1).subs(x, 1))}
SN = FunctionSpace(N[0], family=family, bc=bc)
K1 = FunctionSpace(N[1], family='F', dtype='d')
T = TensorProductSpace(comm, (SN, K1), axes=(0, 1))
u = TrialFunction(T)
v = TestFunction(T)
# Get f on quad points
fj = Array(T, buffer=fe)
# Compute right hand side of Poisson equation
f_hat = inner(v, fj)
# Get left hand side of Poisson equation
matrices = inner(v, -div(grad(u)))
# Create Helmholtz linear algebra solver
sol = Solver(matrices)
constraint = ((0, dx(Array(T, buffer=ue), weighted=True)/dx(Array(T, val=1), weighted=True)),)
# Solve and transform to real space
u_hat = Function(T).set_boundary_dofs() # Solution spectral space
u_hat = sol(f_hat, u_hat, constraints=constraint)
uq = T.backward(u_hat).copy()
# Compare with analytical solution
uj = Array(T, buffer=ue)
print(abs(uj-uq).max())
assert np.allclose(uj, uq)
if 'pytest' not in os.environ:
import matplotlib.pyplot as plt
plt.figure()
X = T.local_mesh(True) # With broadcasting=True the shape of X is local_shape, even though the number of datapoints are still the same as in 1D
plt.contourf(X[0], X[1], uq)
plt.colorbar()
plt.figure()
plt.contourf(X[0], X[1], uj)
plt.colorbar()
plt.figure()
plt.contourf(X[0], X[1], uq-uj)
plt.colorbar()
plt.title('Error')
plt.show()
| [
81,
37811,
198,
50,
6442,
7695,
30927,
16022,
287,
362,
35,
351,
27458,
275,
6359,
287,
530,
4571,
198,
392,
3488,
32269,
3169,
40062,
287,
262,
584,
628,
220,
220,
220,
3467,
77,
397,
5031,
61,
17,
334,
796,
277,
11,
198,
198,
11... | 2.505988 | 1,002 |
import subprocess
import itertools
command="xrandr --listmonitors"
output = subprocess.run(command.split(), stdout=subprocess.PIPE, check=True, text=True)
output = output.stdout
displays_lines = output.split('\n')[1:-1]
displays = []
for line in displays_lines:
displays.append(line.split()[-1])
options = []
for L in range(1, len(displays)):
for subset in itertools.combinations(displays, L):
options.append(subset)
for item in itertools.permutations(displays):
options.append(item)
#print(options)
for option in options:
if len(option) == 1:
print(option[0] + " ONLY")
else:
to_print = option[0]
for i in range(1, len(option)):
to_print = to_print + " + " + option[i]
print(to_print)
print("All the same")
| [
11748,
850,
14681,
198,
11748,
340,
861,
10141,
198,
198,
21812,
2625,
87,
25192,
81,
1377,
4868,
2144,
6742,
1,
198,
198,
22915,
796,
850,
14681,
13,
5143,
7,
21812,
13,
35312,
22784,
14367,
448,
28,
7266,
14681,
13,
47,
4061,
36,
... | 2.454829 | 321 |
from random import randint
import threading
# Variavel global
n_populacao = []
# funcoes
if __name__ == '__main__':
# Leitura do arquivo externo (instancias)
file = open("100.txt")
arquivo = file.read() # ler a cadeia de caracteres do arquivo .txt
instancias = arquivo.split() # separar e agrupar os caracteres
# manipulando a entrada dos dados
qtd_instancias = (len(instancias))
valor = []
peso = []
# salvar as infos do valor na lista valor
for i in range(2, qtd_instancias, 2):
valor.append(int(instancias[i]))
# salvar as infors do peso na lista peso
for i in range(3,qtd_instancias,2):
peso.append(int(instancias[i]))
# Variaveis
tam_pop = 2000 # pode ser alterada pelo usuário
max_geracao = 10
processos = 2
tx_mutacao = 5 # pode ser alterada pelo usuario (qtd de itens a sofrer mutação)
cap_max = int(instancias[1])
qtd_itens = int(instancias[0])
geracao_atual = 1
populacao = []
# Inicio do algoritmo
# 1 - Gerar a população inicial
populacao = gerar_pop(tam_pop, peso, valor, cap_max, qtd_itens)
# 2 - Avaliar a população
while (geracao_atual != max_geracao+1):
print("Geracao: ", geracao_atual)
#definir 6 processos
if (processos == 1):
itens = tam_pop/processos
i0=i1=0
x1=0
i1=int(i0+itens)
while(i1<=(tam_pop-1)):
i1=i1+1
# Processo 1
t1 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i0,i1))
t1.start()
t1.join()
if (t1.is_alive()==False):
del populacao
populacao = n_populacao
elif (processos == 2):
itens = tam_pop/processos
i0=i1=i2=0
i1=int(i0+itens)
i2=int(i1+itens)
while(i2<=(tam_pop-1)):
i2=i2+1
# Processo 1
t1 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i0,i1))
t1.start()
t1.join()
# Processo 2
t2 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i1,i2))
t2.start()
t2.join()
if (t1.is_alive()==t2.is_alive()==False):
del populacao
populacao = n_populacao
elif (processos==3):
itens = tam_pop/processos
i0=i1=i2=i3=0
i1=int(i0+itens)
i2=int(i1+itens)
i3=int(i2+itens)
while(i3<=(tam_pop-1)):
i3=i3+1
# Processo 1
t1 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i0,i1))
t1.start()
t1.join()
# Processo 2
t2 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i1,i2))
t2.start()
# Processo 3
t3 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i2,i3))
t3.start()
t2.join()
t3.join()
if (t1.is_alive()==t2.is_alive()==t3.is_alive()==False):
del populacao
populacao = n_populacao
elif (processos==4):
itens = tam_pop/processos
i0=i1=i2=i3=i4=0
i1=int(i0+itens)
i2=int(i1+itens)
i3=int(i2+itens)
i4=int(i3+itens)
while(i4<=(tam_pop-1)):
i4=i4+1
# Processo 1
t1 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i0,i1))
t1.start()
t1.join()
# Processo 2
t2 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i1,i2))
t2.start()
# Processo 3
t3 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i2,i3))
t3.start()
# Processo 4
t4 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i3,i4))
t4.start()
t2.join()
t3.join()
t4.join()
if (t1.is_alive()==t2.is_alive()==t3.is_alive()==t4.is_alive()==False):
del populacao
populacao = n_populacao
elif (processos==5):
itens = tam_pop/processos
i0=i1=i2=i3=i4=i5=0
i1=int(i0+itens)
i2=int(i1+itens)
i3=int(i2+itens)
i4=int(i3+itens)
i5=int(i4+itens)
while(i5<=(tam_pop-1)):
i5=i5+1
# Processo 1
t1 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i0,i1))
t1.start()
t1.join()
# Processo 2
t2 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i1,i2))
t2.start()
# Processo 3
t3 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i2,i3))
t3.start()
# Processo 4
t4 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i3,i4))
t4.start()
# Processo 5
t5 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i4,i5))
t5.start()
t2.join()
t3.join()
t4.join()
t5.join()
if (t1.is_alive()==t2.is_alive()==t3.is_alive()==t4.is_alive()==t5.is_alive()==False):
del populacao
populacao = n_populacao
elif (processos==6):
itens = tam_pop/processos
i0=i1=i2=i3=i4=i5=i6=0
i1=int(i0+itens)
i2=int(i1+itens)
i3=int(i2+itens)
i4=int(i3+itens)
i5=int(i4+itens)
i6=int(i5+itens)
while(i6<=(tam_pop-1)):
i6=i6+1
# Processo 1
t1 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i0,i1))
t1.start()
t1.join()
# Processo 2
t2 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i1,i2))
t2.start()
# Processo 3
t3 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i2,i3))
t3.start()
# Processo 4
t4 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i3,i4))
t4.start()
# Processo 5
t5 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i4,i5))
t5.start()
# Processo 6
t6 = threading.Thread(target=crossover, args=(populacao, tx_mutacao, peso, valor, cap_max, i5,i6))
t6.start()
t2.join()
t3.join()
t4.join()
t5.join()
t6.join()
if (t1.is_alive()==t2.is_alive()==t3.is_alive()==t4.is_alive()==t5.is_alive()==t6.is_alive()==False):
del populacao
populacao = n_populacao
geracao_atual += 1
if geracao_atual == max_geracao:
populacao.sort(reverse=True)
print("Processos:", processos)
print("Melhor solucao da geracao ", geracao_atual-1)
print("Valor: ",populacao[0][0]," Peso: ",populacao[0][1])
print("Cromossomo", populacao[0][2:])
"""
import threading
from multiprocessing import Queue
def dobro(x, que):
x = x*x
que.put(x)
print(x)
queue1 = Queue()
t1 = threading.Thread(target=dobro, args=(2,queue1))
t1.start()
print(t1)
#t1.join()
x = queue1.get()
print(x)
print(t1)
""" | [
6738,
4738,
1330,
43720,
600,
198,
11748,
4704,
278,
198,
2,
569,
10312,
626,
3298,
198,
77,
62,
12924,
377,
330,
5488,
796,
17635,
198,
2,
1257,
1073,
274,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
19... | 1.867358 | 3,566 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-25 11:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
319,
2177,
12,
2998,
12,
1495,
1367,
25,
4310,
201,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
201,... | 2.586667 | 75 |
# Generated by Django 3.0 on 2020-10-16 11:43
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
319,
12131,
12,
940,
12,
1433,
1367,
25,
3559,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.966667 | 30 |
#!/usr/bin/env python
#
# Copyright @2014 blackshirtmuslim@yahoo.com
# Licensed: see Python license
"""Utility module"""
import json
import uuid
import hashlib
from decimal import Decimal
from datetime import date, datetime
from tornado import concurrent, ioloop
from concurrent.futures import ThreadPoolExecutor
def generate_hash(password, random_key=None):
"""Membuat password hash dengan random key 'random_key' menggunakan sha512 dari hashlib"""
if not random_key:
random_key = uuid.uuid4().hex
hashed_pass = hashlib.sha512(str(password).encode() + random_key.encode()).hexdigest()
return hashed_pass, random_key
def verify_password(password, hashed_password, key):
"""Verify password"""
computed_hash, key = generate_hash(password, key)
return computed_hash == hashed_password
# Some data types we want to check for.
# Turn a good precise decimal into a more JavaScript-friendly float.
# Use an isoformat string for dates and times.
# # from http://nchls.com/post/serializing-complex-python-data-json/
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
2488,
4967,
2042,
15600,
14664,
2475,
31,
40774,
13,
785,
198,
2,
49962,
25,
766,
11361,
5964,
198,
198,
37811,
18274,
879,
8265,
37811,
198,
198,
11748,
33918,
198,
... | 3.151786 | 336 |
from main import summation,summation1
| [
6738,
1388,
1330,
30114,
341,
11,
82,
13929,
341,
16,
628
] | 3.545455 | 11 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# В строке могут присутствовать скобки как круглые, так и квадратные скобки. Каждой
# открывающей скобке соответствует закрывающая того же типа (круглой – круглая,
# квадратной- квадратная). Напишите рекурсивную функцию, проверяющую правильность
# расстановки скобок в этом случае.
if __name__ == '__main__':
# Проверка
print(task(input()))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
12466,
240,
220,
21727,
20375,
21169,
25443,
118,
16843,
12466,
120,
25443,
111,
35072,
20375,
12466,
... | 1.094987 | 379 |
from functools import partial, reduce
from itertools import chain, product
from math import sqrt
def cluster_iter(clustered, point, threshold):
"""Add a point to a grid-like cluster structure.
This allows comparing point distances only to clusters from nearby grids, not to all clusters. Useful when there are many clusters expected."""
coords, object_ = point
point_grid_cell = get_grid_cell(*coords, threshold=threshold)
nearby_grid_cells = get_nearby_grid_cells(point_grid_cell)
possible_nearby_cluster_locations = chain(
*[(location for location in clustered.get(grid_cell, {})) for grid_cell in nearby_grid_cells]
)
nearest_cluster_with_distance = reduce(nearest_location, possible_nearby_cluster_locations, None)
if nearest_cluster_with_distance:
nearest_cluster_location, _nearest_cluster_distance = nearest_cluster_with_distance
else:
nearest_cluster_location = None
if nearest_cluster_location:
cluster_grid_cell = get_grid_cell(*nearest_cluster_location, threshold=threshold)
cluster = clustered[cluster_grid_cell].pop(nearest_cluster_location)
cluster_object_count = len(cluster)
new_cluster_location = (
(nearest_cluster_location[0] * cluster_object_count + coords[0]) / (cluster_object_count + 1),
(nearest_cluster_location[1] * cluster_object_count + coords[1]) / (cluster_object_count + 1),
)
else:
cluster = []
new_cluster_location = coords
cluster.append(point)
new_cluster_grid_cell = get_grid_cell(*new_cluster_location, threshold=threshold)
clustered.setdefault(new_cluster_grid_cell, {})
clustered[new_cluster_grid_cell][new_cluster_location] = cluster
return clustered
def cluster(points, threshold):
"""Cluster points using distance-based clustering algorithm.
Arguments:
points — an iterable of two-element point tuples, each containing:
• a two-element tuple with X and Y coordinates,
• the actual object being clustered;
threshold — if a point is included into a cluster, it must be closer to its centroid than this value.
Return value:
an iterable of two-element cluster tuples, each containing:
• a two-element tuple with X and Y coordinates of the cluster centroid;
• a list of objects belonging to the cluster.
Cluster’s centroid is defined as average coordinates of the cluster’s members.
"""
cluster_iter_for_threshold = partial(cluster_iter, threshold=threshold)
clustered = reduce(cluster_iter_for_threshold, points, {})
return chain(
*[((location, [object_ for coords, object_ in points]) for location, points in grid_clusters.items())
for grid_clusters in clustered.values()]
)
| [
6738,
1257,
310,
10141,
1330,
13027,
11,
4646,
198,
6738,
340,
861,
10141,
1330,
6333,
11,
1720,
198,
6738,
10688,
1330,
19862,
17034,
628,
628,
198,
200,
198,
4299,
13946,
62,
2676,
7,
565,
436,
1068,
11,
966,
11,
11387,
2599,
198,
... | 2.902287 | 962 |
from django.http import HttpResponse
from django.shortcuts import render, render_to_response, RequestContext
from uno.models import Question_m
from django.views.generic import FormView
from uno.forms import Question_f
import requests
#rom uno.info import information
from copy import deepcopy
from uno.a import info1 as information
#from django.template.defaulttags import register
pro = []
'''
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
'''
| [
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
201,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
8543,
62,
1462,
62,
26209,
11,
19390,
21947,
201,
198,
6738,
555,
78,
13,
27530,
1330,
18233,
62,
76,
201,
198,
673... | 3.2 | 155 |
from .user_id import UserID
from .channel_id import ChannelID
from .enum_converter import EnumConverter
from .boolean_converter import BooleanConverter
from .colour_converter import ColourConverter
from .filtered_user import FilteredUser, FilteredMember
from .number_converter import NumberConverter
| [
6738,
764,
7220,
62,
312,
1330,
11787,
2389,
198,
6738,
764,
17620,
62,
312,
1330,
11102,
2389,
198,
6738,
764,
44709,
62,
1102,
332,
353,
1330,
2039,
388,
3103,
332,
353,
198,
6738,
764,
2127,
21052,
62,
1102,
332,
353,
1330,
41146,
... | 3.448276 | 87 |
import torch as t
from torch import nn
import math as m
import torchvision.models as models
import numpy as np
import matplotlib.pyplot as plt
import copy
'''neural_net.py: Custom network object deriving from nn.Module to track the architecture '''
__author__ = "Luis Quinones"
__email__ = "luis@complicitmatter.com"
__status__ = "Prototype"
class Neural_Network(nn.Module):
'''
The neural network object sits a level above the classifier to
store relevant properties and values. The classifier uses nn.LogSoftmax so use the
negative log likelihood loss criterion nn.NLLLoss
Args:
inputs (int): The number of inputs.
hidden_sizes (list of ints): The hidden layer sizes.
outputs (int): The number of outputs.
hidden_activation (str): The hidden layer activation functions (ex. relu, sigmoid, tahn).
device (str): The gpu or the cpu.
optimizer_name (str): The optimizer name ('sgd' or 'adam') to update the weights and gradients
dropout (float): The dropout rate, value to randomly drop input units through training.
learn_rate (float): The learning rate value, used along with the gradient to update the weights,
small values ensure that the weight update steps are small enough.
Attributes:
inputs (int): This is where we store the input count,
hidden_sizes (list of int): This is where we store the hidden layer sizes,
outputs (int): This is where we store the output size,
hidden_activation (str): This is where we store the hidden activation type,
dropout (float): This is where we store the random input unit dropout rate,
learn_rate (float): This is where we store the learn rate value,
processing_device (str): This is where we store the device to calculate the results,
linear_layers (list): This is where we store the values to sequentially build the classifier,
model (torch.nn.module or torchvision model): Where either the generated classifier or the loaded model is stored,
optimizer (torch.optim): This is where we store the optimizer used,
criterior (torch.nn.module.loss): This is where we store the loss function type,
device (str): This is where we store the device,
epochs_completed (int): This is where we store how many total epochs of training this model has.
'''
def generate_classifier(self):
'''Generates the nn.module container Sequential classfier as the default for this class.
Args:
None.
Raises:
TODO: Update exceptions with error_handling class.
Returns:
None.
'''
self.linear_layers = []
n = len(self.data)
for i in range(n-1):
self.linear_layers.append(nn.Linear(self.data[i],self.data[(i + 1) % n]))
if i != n-2:
if self.hidden_activation == 'relu':
self.linear_layers.append(nn.ReLU())
elif self.hidden_activation == 'sigmoid':
self.linear_layers.append(nn.Sigmoid())
elif self.hidden_activation == 'tanh':
self.linear_layers.append(nn.Tanh())
self.linear_layers.append(nn.Dropout(self.dropout))
self.linear_layers.append(nn.LogSoftmax(dim = 1))
# expand the list into sequential args
self.model = nn.Sequential(*self.linear_layers)
def train_network(self, train_data, validation_data, epochs = 1, load_best_params = False, plot = False):
'''Trains the model, requires the criterion and optimizer to be passed into the class args before hand.
TODO: add exception handling for optimizer and criterion as None values.
Args:
train_data (torch.utils.data.dataloader.DataLoader): The training torch data loader.
validation_data (torch.utils.data.dataloader.DataLoader): The validation torch data loader.
epochs (int): The number of epochs for training.
load_best_params (bool): If true then we will load the model_state_dict from the highest accuracy iteration
plot (bool): If true we plot both losses.
Raises:
TODO: Add exceptions.
Returns:
None.
'''
# move the model to whatever device we have
self.model.to(self.device)
# if we loaded the model in eval mode and want to train switch it
if not self.model.training:
self.model.train()
iteration, running_loss = 0, 0
highest_accuracy, high_acc_iter, high_acc_epoch = 0, 0, 0
training_loss_set, validation_loss_set = [], []
best_params = None
for epoch in range(epochs):
batch_iteration = 0
for x, y_labels in train_data:
# move to whatever device we have
x, y_labels = x.to(self.device), y_labels.to(self.device)
# zero out the gradients
self.optimizer.zero_grad()
# forward pass - get the log probabilities (logits / scores)
output = self.model(x)
# calculate the loss
loss = self.criterion(output, y_labels)
# backprop - calculate the gradients for the parameters
loss.backward()
# parameter update based on gradient
self.optimizer.step()
# update stats
running_loss += loss.item()
iteration += 1
batch_iteration += 1
else:
# Validation Process
validation_loss, accuracy = self.validate_network(validation_data)
training_loss = running_loss/len(train_data)
print('Model has a total of {} training epochs completed.'.format(self.epochs_completed))
print('Active session Epoch {} out of {}'.format(epoch + 1, epochs))
print('Currently model has Accuracy of {}% \nCurrent training loss is {} \
\nCurrent validation loss is {}'.format(accuracy,
training_loss, validation_loss))
training_loss_set.append(training_loss)
validation_loss_set.append(validation_loss)
print('-------------')
running_loss = 0
# Track best run
if accuracy > highest_accuracy:
highest_accuracy = accuracy
high_acc_iter = batch_iteration
high_acc_epoch = epoch + 1
if load_best_params:
best_params = copy.deepcopy(self.model.state_dict())
# Set the model back to train mode, enable dropout again
self.model.train()
self.epochs_completed += 1
t_slope, v_slope = self.check_overfitting(training_loss_set, validation_loss_set, plot)
print('Slope of linear reg training curve fit is {} \nSlope of linear reg Validation curve fit is {}'.format(t_slope,
v_slope))
print('Training session highest accuracy was {} on epoch {} batch iteration {}'.format(highest_accuracy,
high_acc_epoch,
high_acc_iter))
if load_best_params:
self.model.load_state_dict(best_params)
print('Params from {} epoch, {} batch iteration were loaded'.format(high_acc_epoch, high_acc_iter))
def validate_network(self, data):
'''Validate our model to check the loss and accuracy.
Args:
data (torch.utils.data.dataloader.DataLoader): The data we want to validate as torch data loader.
Raises:
TODO: Add exceptions.
Returns:
loss,accuracy (tuple): The loss and accuracy of the validation.
'''
# enable eval mode, turn off dropout
self.model.eval()
# turn off the gradients since we are not updating params
with t.no_grad():
batch_loss = 0
batch_accuracy = 0
# validation pass
for x, y_labels in data:
# move to device
x, y_labels = x.to(self.device), y_labels.to(self.device)
output = self.model(x)
# update loss and extract tensor as python float
batch_loss += self.criterion(output, y_labels).item()
# calculate the probability
probability = t.exp(output)
# get the top n indexes and values
_, top_class = probability.topk(1, dim=1)
# reshape top class to match label and get binary value from equals,
# check if the prediction matches label
equals = top_class == y_labels.view(*top_class.shape)
# have to convert byte tensor to float tensor and get accuracy
batch_accuracy += t.mean(equals.type(t.FloatTensor)).item()
test_accuracy = (batch_accuracy / len(data))*100
test_loss = batch_loss / len(data)
return test_loss, test_accuracy
def check_overfitting(self, train_losses, validation_losses, plot = False):
'''Validate our model to check the loss and accuracy
Args:
train_losses (list of floats): The list of training losses per epoch.
validation_losses (list of floats): The list of validation losses per epoch.
plot (bool): If true we plot both losses.
Raises:
TODO: Add exceptions.
Returns:
slopes (tuple): The slopes of the linear reg curve fits for both validation/training.
'''
# Data
tl_x_val = np.arange(0, len(train_losses))
vl_x_val = np.arange(0, len(validation_losses))
# To numpy
train_data = np.array([tl_x_val, train_losses])
validate_data = np.array([vl_x_val, validation_losses])
# Least squares polynomial fit.
train_slope, train_intercept = np.polyfit(train_data[0], train_data[1], 1)
validation_slope, validation_intercept = np.polyfit(validate_data[0], validate_data[1], 1)
if plot:
plt.plot(train_data[0], train_data[1], 'o', label='training loss')
plt.plot(validate_data[0], validate_data[1], 'o', label='validation loss')
plt.plot(train_data[0], train_intercept + train_slope*train_data[0], 'r', label='train_regg')
plt.plot(validate_data[0], validation_intercept + validation_slope*validate_data[0], 'r', label='val_regg')
plt.legend()
plt.show()
return train_slope, validation_slope
def save_model_checkpoint(self, full_path, training_class_to_idx):
'''Save the model checkpoint.
Args:
full_path (str): The full path to save the checkpoint to
training_class_to_idx (dic of ints): This is where we store the dictionary mapping the name of the class to the index (label)
Raises:
TODO: Add exceptions
Returns:
None
'''
net_data_dic = {'input_count': self.inputs,
'hidden_sizes': self.hidden_sizes,
'outputs': self.outputs,
'h_activation': self.hidden_activation,
'dropout': self.dropout,
'learn_rate': self.learn_rate,
'epochs_completed' : self.epochs_completed}
checkpoint = {'data' : net_data_dic,
'model' : self.model,
'classifier' : self.model.classifier,
'optimizer.state_dict' : self.optimizer.state_dict(),
'state_dict' : self.model.state_dict(),
'device' : self.device,
'class_to_idx': training_class_to_idx}
t.save (checkpoint, full_path)
| [
11748,
28034,
355,
256,
198,
6738,
28034,
1330,
299,
77,
198,
11748,
10688,
355,
285,
198,
11748,
28034,
10178,
13,
27530,
355,
4981,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,... | 2.182765 | 5,663 |
import argparse
import logging
import os
import unittest
from keras.layers import recurrent
import numpy as np
from shcomplete.model2correct import Seq2seq, generate_model, get_chars, train_correct
from shcomplete.model2correct import generator_misprints, dislpay_sample_correction
if __name__ == '__main__':
unittest.main()
| [
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
42465,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
427,
20751,
13,
19849,
17,
30283,
1330,
1001... | 3.284314 | 102 |
import unittest
from draftjs_exporter.error import ConfigException
from draftjs_exporter.options import Options
| [
11748,
555,
715,
395,
198,
198,
6738,
4538,
8457,
62,
1069,
26634,
13,
18224,
1330,
17056,
16922,
198,
6738,
4538,
8457,
62,
1069,
26634,
13,
25811,
1330,
18634,
628
] | 3.931034 | 29 |
import unittest
from zope.testing import doctest, module
import zc.set
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| [
11748,
555,
715,
395,
198,
198,
6738,
1976,
3008,
13,
33407,
1330,
10412,
395,
11,
8265,
198,
11748,
1976,
66,
13,
2617,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
1241... | 2.735849 | 53 |
import os
import tarfile
output = os.path.splitext(input)[0]
try:
os.makedirs(output)
except OSError:
if not os.path.exists(output):
raise
with tarfile.open(input, 'r') as tf:
tf.extractall(output)
| [
11748,
28686,
198,
11748,
13422,
7753,
628,
198,
22915,
796,
28686,
13,
6978,
13,
22018,
578,
742,
7,
15414,
38381,
15,
60,
198,
198,
28311,
25,
198,
220,
220,
220,
28686,
13,
76,
4335,
17062,
7,
22915,
8,
198,
16341,
440,
5188,
81,... | 2.265306 | 98 |
import enum
import struct
from .abstract import AbstractNode
from .utils import ValuedNodeMixin, NodeContext
| [
11748,
33829,
198,
11748,
2878,
198,
198,
6738,
764,
397,
8709,
1330,
27741,
19667,
198,
6738,
764,
26791,
1330,
3254,
1739,
19667,
35608,
259,
11,
19081,
21947,
628,
628,
628,
628
] | 3.774194 | 31 |
from gevent import monkey
monkey.patch_time()
monkey.patch_socket()
import abc
import datetime
import time
from rx.concurrency.eventloopscheduler import EventLoopScheduler
from rx.concurrency.historicalscheduler import HistoricalScheduler
from rx.concurrency.mainloopscheduler import GEventScheduler
from rx.concurrency.newthreadscheduler import NewThreadScheduler
from algotrader.trading.event import MarketDataEventHandler
from algotrader.utils.logging import logger
from algotrader.utils.date import unixtimemillis_to_datetime
from algotrader import Startable, HasId, Context
| [
6738,
4903,
1151,
1330,
21657,
198,
198,
49572,
13,
17147,
62,
2435,
3419,
198,
49572,
13,
17147,
62,
44971,
3419,
198,
198,
11748,
450,
66,
198,
11748,
4818,
8079,
198,
11748,
640,
198,
198,
6738,
374,
87,
13,
1102,
34415,
13,
15596,... | 3.322034 | 177 |
# coding: utf-8
# Node class based on the book "Inteligencia Artificial - Fundamentos, práctica y aplicaciones" by Alberto García Serrano | [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
19081,
1398,
1912,
319,
262,
1492,
366,
24123,
9324,
33743,
35941,
532,
7557,
3263,
418,
11,
778,
6557,
28914,
331,
257,
489,
291,
49443,
274,
1,
416,
40649,
16364,
29690,
2930,
35823
] | 3.341463 | 41 |
numero = int(input("Fatorial de: ") )
resultado=1
count=1
while count <= numero:
resultado *= count
count --1
print(resultado)
| [
22510,
3529,
796,
493,
7,
15414,
7203,
37,
21592,
390,
25,
366,
8,
1267,
198,
198,
20274,
4533,
28,
16,
198,
9127,
28,
16,
198,
198,
4514,
954,
19841,
997,
3529,
25,
198,
220,
220,
220,
1255,
4533,
1635,
28,
954,
198,
220,
220,
... | 2.464286 | 56 |
import torch
import os
import torch.nn as nn
import logging
import time
from sklearn.metrics import f1_score, classification_report, confusion_matrix
from transformers import BertForSequenceClassification
| [
11748,
28034,
198,
11748,
28686,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
18931,
198,
11748,
640,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
277,
16,
62,
26675,
11,
17923,
62,
13116,
11,
10802,
62,
6759,
8609,
198,
... | 3.961538 | 52 |
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import cv2
numDem=500
numRep=500
numColumns=50
numRows=50
numGrid=numColumns*numRows
windowSize=3
kernel=np.ones((windowSize,windowSize))
kernel[(windowSize-1)/2,(windowSize-1)/2]=0
numIter=100
valueThreshold=0.375*((windowSize**2)-1) #Slightly xenophilic, 37.5% corresponds to a threshold of 3
populationGrid=randomPopulationGrid()
emptyHouses=np.asarray(np.asarray(np.where(populationGrid==0)).transpose())
print(np.shape(emptyHouses))
cv2.namedWindow('Population Grid')
cv2.namedWindow('Dem Value Grid')
cv2.namedWindow('Rep Value Grid')
for iter in range(0,numIter):
print("Iteration "+ str(iter))
populationGridOne=np.copy(populationGrid)
populationGridOne[np.where(populationGridOne==-1)]=0 #Masking out opposition
populationGridNegativeOne=np.copy(populationGrid)
populationGridNegativeOne[np.where(populationGridNegativeOne==1)]=0 #Masking out opposition
valueGridOne=signal.fftconvolve(populationGridOne, kernel, mode='same')#gives a map of the number of similar individuals -satisfaction
valueGridNegativeOne=-1*signal.fftconvolve(populationGridNegativeOne, kernel, mode='same')#gives a map of the number of dissimilar individuals -satisfaction
cv2.imshow('Dem Value Grid', (valueGridOne)/((windowSize**2)-1))
cv2.imshow('Rep Value Grid', (valueGridNegativeOne)/((windowSize**2)-1))
cv2.imshow('Population Grid', visualMap(populationGrid))
cv2.waitKey(1)
repopulationGrid=populationGrid
if((iter%10)==0):
cv2.imwrite('iteration'+str(iter)+'.bmp', visualMap(populationGrid)*(2**8))
numSatisfied=0
for i in range(0,numRows):
for j in range(0,numColumns):
if(repopulationGrid[i,j]==1):
valueGrid=valueGridOne
if(repopulationGrid[i,j]==-1):
valueGrid=valueGridNegativeOne
if(repopulationGrid[i,j]==0 or valueGrid[i,j]>valueThreshold):
numSatisfied+=1
continue
numSatisfied+=1
emptyIndex=np.random.randint(0,numGrid-numDem-numRep)
shiftIndex=emptyHouses[emptyIndex][:]
repopulationGrid[i,j], repopulationGrid[shiftIndex[0],shiftIndex[1]]=repopulationGrid[shiftIndex[0],shiftIndex[1]], repopulationGrid[i,j]
emptyHouses[0:-1,:]=np.append(emptyHouses[0:emptyIndex,:], emptyHouses[emptyIndex+1:,:], axis=0)
emptyHouses[-1,:]=(np.array([i, j]))
populationGrid=repopulationGrid
cv2.imwrite('iteration99.bmp', visualMap(populationGrid)*(2**8))
cv2.waitKey()
cv2.destroyAllWindows()
| [
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
1330,
6737,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
269,
85,
17,
198,
198,
22510,
11522,
28,
4059,
198,
22510,
6207,
28,
4059,
198,
198,
22510,
... | 2.644468 | 931 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# download some test data to run example notebook
#
# Author: M. Giomi (matteo.giomi@desy.de)
import os
from urllib.request import urlretrieve
from shutil import unpack_archive
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
4321,
617,
1332,
1366,
284,
1057,
1672,
20922,
198,
2,
198,
2,
6434,
25,
337,
13,
8118,
12753,
357,
6759... | 2.72619 | 84 |
import plasma
import plasmafx
from plasmafx import plugins
import time
FPS = 60
NUM_LIGHTS = 10
plasma.set_light_count(10)
sequence = plasmafx.Sequence(NUM_LIGHTS)
for x in range(NUM_LIGHTS):
sequence.set_plugin(x, plugins.FXCycle(
speed=2,
spread=5,
offset=(360.0/NUM_LIGHTS) * x
))
sequence.set_plugin(0, plugins.Pulse([
(0, 0, 0),
(255, 0, 255)
]))
sequence.set_plugin(1, plugins.Pulse([
(255, 0, 0),
(0, 0, 255),
(0, 0, 0)
], speed=0.5))
while True:
values = sequence.get_leds()
for index, rgb in enumerate(values):
# print("Setting pixel: {} to {}:{}:{}".format(index, *rgb))
plasma.set_pixel(index, *rgb)
plasma.show()
time.sleep(1.0 / FPS)
| [
11748,
16074,
198,
11748,
458,
8597,
1878,
87,
198,
6738,
458,
8597,
1878,
87,
1330,
20652,
198,
11748,
640,
198,
198,
37,
3705,
796,
3126,
198,
41359,
62,
43,
34874,
796,
838,
198,
198,
489,
11797,
13,
2617,
62,
2971,
62,
9127,
7,
... | 2.122507 | 351 |
# Copyright 2020 Akamai Technologies, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Conversions from strings returned by Athena to Python types.
"""
from __future__ import annotations
import datetime as dt
import json
from abc import ABCMeta, abstractmethod
from decimal import Decimal
from typing import Dict, Generic, Iterable, List, Optional, Sequence, TypeVar
from pallas._compat import numpy as np
from pallas._compat import pandas as pd
T_co = TypeVar("T_co", covariant=True)
class Converter(Generic[T_co], metaclass=ABCMeta):
"""
Convert values returned by Athena to Python types.
"""
@property
@abstractmethod
def dtype(self) -> object:
"""Pandas dtype"""
def read(self, value: Optional[str]) -> Optional[T_co]:
"""
Read value returned from Athena.
Expect a string or ``None`` because optional strings
are what Athena returns at its API and that is also
what can be parsed from CSV stored in S3.
"""
if value is None:
return None
return self.read_str(value)
@abstractmethod
def read_str(self, value: str) -> T_co:
"""
Read value from string
To be implemented in subclasses.
"""
def read_array(
self, values: Iterable[Optional[str]], dtype: Optional[object] = None,
) -> object: # Pandas array
"""
Convert values returned from Athena to Pandas array.
:param values: Iterable yielding strings and ``None``
:param dtype: optional Pandas dtype to force
"""
if dtype is None:
dtype = self.dtype
converted = [self.read(value) for value in values]
return _pd_array(converted, dtype=dtype)
class ArrayConverter(Converter[List[str]]):
"""
Parse string returned by Athena to a list.
Array parsing has multiple limitations because of the
serialization format that Athena uses:
- Always returns a list of strings because Athena does
not send more details about item types.
- It is not possible to distinguish comma in values from
an item separator. We assume that values do not contain the comma.
- We are not able to distinguish an empty array
and an array with one empty string.
This converter returns an empty array in that case.
"""
@property
class MapConverter(Converter[Dict[str, str]]):
"""
Convert string value returned from Athena to a dictionary.
Map parsing has multiple limitations because of the
serialization format that Athena uses:
- Always returns a mapping from strings to strings because
Athena does not send more details about item types.
- It is not possible to distinguish a comma or an equal sign
in values from control characters.
We assume that values do not contain the comma or the equal sign.
"""
@property
default_converter = TextConverter()
CONVERTERS: Dict[str, Converter[object]] = {
"boolean": BooleanConverter(),
"tinyint": IntConverter(8),
"smallint": IntConverter(16),
"integer": IntConverter(32),
"bigint": IntConverter(64),
"float": FloatConverter(32),
"double": FloatConverter(64),
"decimal": DecimalConverter(),
"date": DateConverter(),
"timestamp": DateTimeConverter(),
"varbinary": BinaryConverter(),
"array": ArrayConverter(),
"map": MapConverter(),
"json": JSONConverter(),
}
def get_converter(column_type: str) -> Converter[object]:
"""
Return a converter for a column type.
:param column_type: a column type as reported by Athena
:return: a converter instance.
"""
return CONVERTERS.get(column_type, default_converter)
| [
2,
15069,
12131,
9084,
1689,
72,
21852,
11,
3457,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
1... | 2.950729 | 1,441 |
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
| [
2220,
7203,
31,
65,
41319,
62,
31391,
1003,
31391,
14,
11249,
62,
4299,
82,
14,
260,
7501,
25,
26791,
13,
65,
48274,
1600,
366,
25991,
4943,
198,
2220,
7203,
31,
65,
41319,
62,
31391,
1003,
31391,
14,
11249,
62,
4299,
82,
14,
260,
... | 2.357143 | 56 |
url = r"onlinelibrary.wiley.com/journal/{ID}/(?P<ISSN>\(ISSN\)[\d-]*)"
extractor_args = dict(restrict_text=[r"author\s*guidelines"])
template = (
"https://onlinelibrary.wiley.com/page/journal/{ID}/{ISSN}/homepage/forauthors.html"
)
| [
6371,
796,
374,
1,
261,
2815,
417,
4115,
13,
86,
9618,
13,
785,
14,
24891,
14,
90,
2389,
92,
29006,
30,
47,
27,
1797,
15571,
29,
59,
7,
1797,
15571,
22725,
58,
59,
67,
12,
60,
9,
16725,
198,
2302,
40450,
62,
22046,
796,
8633,
... | 2.226415 | 106 |
import json
from flask import Flask, request, jsonify, make_response
from flask_restful import Api, Resource, reqparse
from simplexml import dumps
from estimator import estimator
app = Flask(__name__)
api = Api(app, default_mediatype=None)
@api.representation('application/json')
def output_xml(data, code, headers=None):
"""Make a Flask response with a XML encoded body"""
resp = make_response(dumps({'response': data}), code)
resp.headers.extend(headers or {})
return resp
@app.after_request
api.add_resource(Covid19EstimatorApi, '/api/v1/on-covid-19')
api.add_resource(Covid19EstimatorApi, '/api/v1/on-covid-19/json',
resource_class_kwargs={'representations': {'application/json': output_json}},
endpoint='covid19_estimator_api_json'
)
api.add_resource(Covid19EstimatorApi, '/api/v1/on-covid-19/xml',
resource_class_kwargs={'representations': {'application/xml': output_xml}},
endpoint='covid19_estimator_api_xml'
)
app.run(debug=True)
| [
11748,
33918,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
33918,
1958,
11,
787,
62,
26209,
198,
6738,
42903,
62,
2118,
913,
1330,
5949,
72,
11,
20857,
11,
43089,
29572,
198,
6738,
2829,
19875,
1330,
45514,
198,
6738,
3959,
1352,
1330,
... | 2.157895 | 532 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
from .layers import PixelShuffle_ICNR
| [
201,
198,
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
201,
198,
11748,
25064,
201,
198,
6738,
764,
75,
6962,
1330,
11349,
2484,
18137,
62,
2149,
24723,
220,
20... | 2.421053 | 57 |
import os
import pandas as pd
df = pd.read_csv(
"{}/practice-pandas/data/test-participant.csv".format(os.getcwd()), sep=',', engine='python', verbose=True)
df_grouped = df.groupby("GENRE_CODE").count()
df_sorted = df_grouped["ID"].sort_values(ascending=False)
# Top 1000.
print(df_sorted.head(1000))
"""
GENRE_CODE
Blue 14
Green 10
Yellow 8
Red 8
White 4
Orange 3
Black 3
Violet 2
Pink 2
Gray 2
YellowGreen 1
SkyBlue 1
Purple 1
Brown 1
Name: ID, dtype: int64
"""
print("Info : Finished.")
| [
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
7568,
796,
279,
67,
13,
961,
62,
40664,
7,
198,
220,
220,
220,
45144,
92,
14,
39541,
12,
79,
392,
292,
14,
7890,
14,
9288,
12,
48013,
415,
13,
40664,
1911,
18982,
7,
... | 1.886567 | 335 |
from vietocr.tool.translate import build_model, translate, translate_beam_search, process_input, predict
from vietocr.tool.utils import download_weights
from vietocr.tool.config import Cfg
import sys
import os
import cv2
import numpy as np
import math
import pandas as pd
import torch
import time
from cropper import Cropper
from detector import Detector
from format_info import format_information
###multi threading
#from threading import Thread
if __name__ == "__main__":
cropper = Cropper()
detector = Detector()
reader = Reader()
type_img = ['jpg', 'png']
image_folder = 'test_images/'
images = os.listdir(image_folder)
for image_file in images:
if image_file[-3:] in type_img:
start = time.time()
path = image_folder + image_file
image = cv2.imread(path)
H, W = image.shape[:2]
image_resized = cv2.resize(image, (416, int(416 * H/W)))
cv2.imshow("raw_image", image_resized)
dictInformationText = dict()
dictInformationImage = dict()
return_code, aligned_image = cropper.crop_and_align_image(image)
#print('cropper: ', time.time() - start)
tmp = 0
if return_code == 0:
for c in detector.classes:
dictInformationText[c] = 'N/A'
print (dictInformationText)
elif return_code == 2:
tmp = 1
index = 0
aligned_image = image
while(index < 4):
dictInformationImage = detector.detect_information(aligned_image)
keys = dictInformationImage.keys()
if 'id' in keys and 'ho_ten' in keys and 'ngay_sinh' in keys:
tmp = 2
break
else:
aligned_image = cv2.rotate(aligned_image, cv2.cv2.ROTATE_90_CLOCKWISE)
index+=1
if tmp == 0:
dictInformationImage = detector.detect_information(aligned_image)
if tmp == 1:
for c in detector.classes:
dictInformationText[c] = 'N/A'
print(dictInformationText)
#print('detector: ', time.time() - start)
for key in dictInformationImage.keys():
dictInformationText[key] = reader.read_information(dictInformationImage[key])
#cv2.imwrite('images_uploaded/' + dictInformationText['id'] + '.jpg', image)
output_dict = format_information(dictInformationText)
print('Time processing: ', time.time() - start)
for key in output_dict.keys():
info = key + ': ' + output_dict[key]
print(info)
#print(output_dict)
cv2.waitKey()
cv2.destroyAllWindows() | [
6738,
410,
1155,
1696,
13,
25981,
13,
7645,
17660,
1330,
1382,
62,
19849,
11,
15772,
11,
15772,
62,
40045,
62,
12947,
11,
1429,
62,
15414,
11,
4331,
198,
6738,
410,
1155,
1696,
13,
25981,
13,
26791,
1330,
4321,
62,
43775,
198,
6738,
... | 2.052817 | 1,420 |
a= 30
a //= 2
print(a)
| [
64,
28,
1542,
201,
198,
64,
3373,
28,
362,
201,
198,
4798,
7,
64,
8,
201,
198
] | 1.529412 | 17 |
##script used to combine multiple files into a matrix
import os, sys
import pandas as pd
import numpy as np
sum_matrix = open(sys.argv[1]+"_binary.matrix.txt","w")
# open with pandas
df = pd.read_csv(sys.argv[1], sep='\t', index_col = 0)
#get first line as title list
col_list= list(df.columns.values)
print (col_list)
#title_list = start_inp.readline()
#turn column to array, get each column and add their unique categorical value to make a title list
final_list = []
for i in range(0,len(col_list)):
print (i)
colname= col_list[i]
print(colname)
dfx= df.as_matrix([df.columns[i]])
dfx_un= np.unique(dfx)
for j in dfx_un:
if str(j) == 'nan':
pass
else:
string= str(df.columns[i]) + "."+str(j)
if string not in final_list:
final_list.append(string)
print (final_list)
final_str = "\t".join(str(j) for j in final_list)
sum_matrix.write("gene\t%s\n" % (final_str))
start_inp = open(sys.argv[1], "r") #categorical matrix
#loop through directory for each file to add input
D={}
add_data_to_dict(start_inp,D)
print(D)
y = len(final_list)
for gene in D:
feature_list= []
for i in range(y):
feature_list.append(0)
#feature_list= [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
#print(feature_list)
data_list= D[gene]
for data in data_list:
for xx in final_list:
ind= final_list.index(xx)
x1= xx.split(".")[0]
#print (x1)
x2= xx.split(".")[1]
#print (x2)
for x in col_list:
if x1 == x:
if x2 == data:
feature_list[ind] = 1
elif data == 'NA':
feature_list[ind] = 'NA'
#print (feature_list)
feat_str= "\t".join(str(k) for k in feature_list)
sum_matrix.write("%s\t%s\n" % (gene, feat_str))
sum_matrix.close() | [
2235,
12048,
973,
284,
12082,
3294,
3696,
656,
257,
17593,
198,
11748,
28686,
11,
25064,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
628,
198,
16345,
62,
6759,
8609,
796,
1280,
7,
17597,
13,
853,
85,
58,
... | 1.983385 | 963 |
from django.urls import path
from .views import index, TodoDetailView
from django.conf import settings
urlpatterns = [
path('', index),
path('edit/<int:pk>', TodoDetailView.as_view()),
path('delete/<int:pk>', TodoDetailView.as_view()),
]
react_routes = getattr(settings, 'REACT_ROUTES', [])
for route in react_routes:
urlpatterns += [
path('{}'.format(route), index)
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
33571,
1330,
6376,
11,
309,
24313,
11242,
603,
7680,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
107... | 2.39645 | 169 |
#!/usr/bin/python
#
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Generates a groundtruth map for a given input bagfile. The groundtruth map
creation process merges images from the input bagfile with an existing map.
This is the first step for groundtruth creation, where once a groundtruth map
is created for a bagfile the bagfile can then be localized using the groundtruth
map to generate groundtruth poses.
"""
import argparse
import os
import shutil
import sys
import utilities
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("bagfile", help="Input bagfile to generate groundtruth for.")
parser.add_argument(
"base_surf_map",
help="Existing map to use as basis for groundtruth. Should largely overlap area covered in input bagfile.",
)
parser.add_argument(
"maps_directory",
help="Location of images used for each bagfile use to generate base_surf_map.",
)
parser.add_argument(
"-o", "--output-directory", default="groundtruth_creation_output"
)
parser.add_argument("-w", "--world", default="iss")
parser.add_argument("-r", "--robot-name", default="bumble")
parser.add_argument("-m", "--map-name", default="groundtruth")
args = parser.parse_args()
if not os.path.isfile(args.bagfile):
print("Bag file " + args.bagfile + " does not exist.")
sys.exit()
if not os.path.isfile(args.base_surf_map):
print("Base surf map " + args.base_surf_map + " does not exist.")
sys.exit()
if not os.path.isdir(args.maps_directory):
print("Maps directory " + args.maps_directory + " does not exist.")
sys.exit()
if os.path.isdir(args.output_directory):
print("Output directory " + args.output_directory + " already exists.")
sys.exit()
bagfile = os.path.abspath(args.bagfile)
base_surf_map = os.path.abspath(args.base_surf_map)
maps_directory = os.path.abspath(args.maps_directory)
os.mkdir(args.output_directory)
os.chdir(args.output_directory)
create_groundtruth(
bagfile,
base_surf_map,
maps_directory,
args.map_name,
args.world,
args.robot_name,
)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
15069,
357,
66,
8,
2177,
11,
1578,
1829,
5070,
11,
355,
7997,
416,
262,
198,
2,
22998,
286,
262,
2351,
15781,
261,
2306,
873,
290,
4687,
8694,
13,
198,
2,
198,
2,
1439,
2489,... | 2.885906 | 1,043 |
from datetime import timedelta
from flask import request, current_app
from flask_jwt_extended import jwt_required, create_access_token, get_jwt_identity
from marshmallow.exceptions import ValidationError
from sqlalchemy import or_
from app.api.utils import success_response, error_response, get_items_per_page, get_request_page
from app.api.v1.main import api_v1
from app.api.models import User
from app.api.v1.user.serializer import user_schema, users_schema
from app.ext.db import db
@api_v1.route('/users', methods=['GET'])
@jwt_required
@api_v1.route('/users', methods=['POST'])
@api_v1.route('/auth/login', methods=['POST']) | [
6738,
4818,
8079,
1330,
28805,
12514,
198,
198,
6738,
42903,
1330,
2581,
11,
1459,
62,
1324,
198,
6738,
42903,
62,
73,
46569,
62,
2302,
1631,
1330,
474,
46569,
62,
35827,
11,
2251,
62,
15526,
62,
30001,
11,
651,
62,
73,
46569,
62,
7... | 2.96729 | 214 |
import os
import torch
import warnings
warnings.filterwarnings('ignore')
from hpbandster.core.worker import Worker
from nes.ensemble_selection.create_baselearners import create_baselearner
| [
11748,
28686,
198,
11748,
28034,
198,
11748,
14601,
198,
40539,
654,
13,
24455,
40539,
654,
10786,
46430,
11537,
198,
198,
6738,
27673,
3903,
1706,
13,
7295,
13,
28816,
1330,
35412,
198,
6738,
299,
274,
13,
1072,
11306,
62,
49283,
13,
1... | 3.641509 | 53 |
import tkinter
from tkinter import ttk
mainclass()
| [
11748,
256,
74,
3849,
201,
198,
6738,
256,
74,
3849,
1330,
256,
30488,
201,
198,
201,
198,
201,
198,
201,
198,
12417,
4871,
3419,
201,
198,
220,
220,
220,
220,
201,
198,
201,
198,
220,
220,
220,
220,
220,
220,
220,
220,
201,
198
] | 1.772727 | 44 |
#!/usr/bin/env python3
from math import sqrt
# init conds
x = [15, 15, f(15, 15)]
lamb = 2
xold = [99, 99, f(99, 99)]
while dist(xold, x) > 0.5 and lamb >= 0.0001:
print("x:", x)
print("xold", xold)
xnew = grad(*x)
xnew = [x[0] - lamb * xnew[0], x[1] - lamb * xnew[1], 0]
xnew[2] = f(xnew[0], xnew[1])
print("xnew:", xnew)
if (f(x[0], x[1]) > f(xnew[0], xnew[1])):
lamb *= 2
else:
lamb /= 2
xold = x.copy()
x = xnew.copy()
print("result:", x)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
10688,
1330,
19862,
17034,
628,
628,
198,
198,
2,
2315,
1779,
82,
198,
87,
796,
685,
1314,
11,
1315,
11,
277,
7,
1314,
11,
1315,
15437,
198,
2543,
65,
796,
362,
198,... | 1.857664 | 274 |
# coding=utf-8
"""
Definition of models.
"""
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
| [
2,
19617,
28,
40477,
12,
23,
198,
37811,
198,
36621,
286,
4981,
13,
198,
37811,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
637... | 3.208333 | 48 |
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QSlider, QStyleOptionSlider, QStyle)
import time
| [
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
33734,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
357,
48,
11122,
1304,
11,
1195,
21466,
19722,
11122,
1304,
11,
1195,
21466,
8,
198,
11748,
640,
628
] | 2.547619 | 42 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'ConfigurationAggregatorAccountAggregationSourceArgs',
'ConfigurationAggregatorOrganizationAggregationSourceArgs',
'ConformancePackInputParameterArgs',
'DeliveryChannelSnapshotDeliveryPropertiesArgs',
'RecorderRecordingGroupArgs',
'RemediationConfigurationParameterArgs',
'RuleScopeArgs',
'RuleSourceArgs',
'RuleSourceSourceDetailArgs',
]
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 3.03 | 300 |
from django import forms
from django.utils.translation import gettext_lazy as _
from rusel.base.forms import BaseCreateForm, BaseEditForm
from rusel.widgets import DateInput, Select, NumberInput, UrlsInput
from task.const import NUM_ROLE_SERVICE, APART_SERVICE
from task.models import Task
from apart.config import app_config
role = 'price'
#----------------------------------
#----------------------------------
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
62,
75,
12582,
355,
4808,
198,
198,
6738,
7422,
741,
13,
8692,
13,
23914,
1330,
7308,
16447,
8479,
11,
7308,
18378,
8479,
198,
6738,
7422,
... | 3.601695 | 118 |
import sensor, time, image
# Reset sensor
sensor.reset()
# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.QCIF)
sensor.set_pixformat(sensor.GRAYSCALE)
# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=16)
print(face_cascade)
# FPS clock
clock = time.clock()
while (True):
clock.tick()
# Capture snapshot
img = sensor.snapshot()
# Find objects.
# Note: Lower scale factor scales-down the image more and detects smaller objects.
# Higher threshold results in a higher detection rate, with more false positives.
objects = img.find_features(face_cascade, threshold=0.65, scale=1.65)
# Draw objects
for r in objects:
img.draw_rectangle(r)
if (len(objects)):
# Add a small delay to see the drawing on the FB
time.sleep(100)
# Print FPS.
# Note: Actual FPS is higher, streaming the FB makes it slower.
print(clock.fps())
| [
11748,
12694,
11,
640,
11,
2939,
198,
198,
2,
30027,
12694,
198,
82,
22854,
13,
42503,
3419,
198,
198,
2,
35367,
6460,
198,
82,
22854,
13,
2617,
62,
3642,
5685,
7,
16,
8,
198,
82,
22854,
13,
2617,
62,
48544,
344,
4386,
7,
1433,
... | 2.838275 | 371 |
from rti_python.Ensemble.Ensemble import Ensemble
import logging
class InstrumentVelocity:
"""
Instrument Velocity DataSet.
[Bin x Beam] data.
"""
def decode(self, data):
"""
Take the data bytearray. Decode the data to populate
the velocities.
:param data: Bytearray for the dataset.
"""
packetpointer = Ensemble.GetBaseDataSize(self.name_len)
for beam in range(self.element_multiplier):
for bin_num in range(self.num_elements):
self.Velocities[bin_num][beam] = Ensemble.GetFloat(packetpointer, Ensemble().BytesInFloat, data)
packetpointer += Ensemble().BytesInFloat
logging.debug(self.Velocities)
def encode(self):
"""
Encode the data into RTB format.
:return:
"""
result = []
# Generate header
result += Ensemble.generate_header(self.ds_type,
self.num_elements,
self.element_multiplier,
self.image,
self.name_len,
self.Name)
# Add the data
for beam in range(self.element_multiplier):
for bin_num in range(self.num_elements):
val = self.Velocities[bin_num][beam]
result += Ensemble.float_to_bytes(val)
return result
def encode_csv(self, dt, ss_code, ss_config, blank, bin_size):
"""
Encode into CSV format.
:param dt: Datetime object.
:param ss_code: Subsystem code.
:param ss_config: Subsystem Configuration
:param blank: Blank or First bin position in meters.
:param bin_size: Bin size in meters.
:return: List of CSV lines.
"""
str_result = []
for beam in range(self.element_multiplier):
for bin_num in range(self.num_elements):
# Get the value
val = self.Velocities[bin_num][beam]
# Create the CSV string
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_INSTR_VEL, ss_code, ss_config, bin_num, beam, blank, bin_size, val))
return str_result
| [
6738,
374,
20259,
62,
29412,
13,
4834,
15140,
13,
4834,
15140,
1330,
2039,
15140,
201,
198,
11748,
18931,
201,
198,
201,
198,
4871,
42410,
46261,
11683,
25,
201,
198,
220,
220,
220,
37227,
201,
198,
220,
220,
220,
42410,
43137,
6060,
... | 1.936637 | 1,231 |
from django.core.management.base import BaseCommand, CommandError
from odds.domain.models.manager.betTypeManager import BetTypeManager
from odds.domain.models.bet import Bet
from odds.domain.models.sureBet import SureBet
from odds.domain.models.manager.SureBetManager import SureBetManager
from odds.domain.models.event import Event
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
11,
9455,
12331,
198,
198,
6738,
10402,
13,
27830,
13,
27530,
13,
37153,
13,
11181,
6030,
13511,
1330,
5147,
6030,
13511,
198,
6738,
10402,
13,
27830,
13,
27530,
13,
... | 3.895349 | 86 |
# -*- coding: utf-8 -*-
# flake8: noqa
"""
Defintion of the campaign and datasets for 2016 legacy rereco data.
"""
import order as od
from analysis.config.processes import *
# campaign
campaign_name = "Run2_pp_13TeV_Legacy16"
campaign = od.Campaign(
campaign_name, 2,
ecm=13,
bx=25,
)
# datasets
dataset_data_B_ee = od.Dataset(
"data_B_ee", 1,
campaign=campaign,
is_data=True,
n_files=922,
keys=["/DoubleEG/Run2016B-17Jul2018_ver2-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_C_ee = od.Dataset(
"data_C_ee", 2,
campaign=campaign,
is_data=True,
n_files=427,
keys=["/DoubleEG/Run2016C-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_D_ee = od.Dataset(
"data_D_ee", 3,
campaign=campaign,
is_data=True,
n_files=471,
keys=["/DoubleEG/Run2016D-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_E_ee = od.Dataset(
"data_E_ee", 4,
campaign=campaign,
is_data=True,
n_files=375,
keys=["/DoubleEG/Run2016E-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_F_ee = od.Dataset(
"data_F_ee", 5,
campaign=campaign,
is_data=True,
n_files=309,
keys=["/DoubleEG/Run2016F-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_G_ee = od.Dataset(
"data_G_ee", 6,
campaign=campaign,
is_data=True,
n_files=715,
keys=["/DoubleEG/Run2016G-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_H_ee = od.Dataset(
"data_H_ee", 7,
campaign=campaign,
is_data=True,
n_files=736,
keys=["/DoubleEG/Run2016H-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
datasets_data_ee = [
dataset_data_B_ee, dataset_data_C_ee, dataset_data_D_ee, dataset_data_E_ee,
dataset_data_F_ee, dataset_data_G_ee, dataset_data_H_ee
]
dataset_data_B_emu = od.Dataset(
"data_B_emu", 11,
campaign=campaign,
is_data=True,
n_files=249,
keys=["/MuonEG/Run2016B-17Jul2018_ver2-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_C_emu = od.Dataset(
"data_C_emu", 12,
campaign=campaign,
is_data=True,
n_files=112,
keys=["/MuonEG/Run2016C-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_D_emu = od.Dataset(
"data_D_emu", 13,
campaign=campaign,
is_data=True,
n_files=192,
keys=["/MuonEG/Run2016D-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_E_emu = od.Dataset(
"data_E_emu", 14,
campaign=campaign,
is_data=True,
n_files=209,
keys=["/MuonEG/Run2016E-17Jul2018-v2/MINIAOD"],
context=campaign_name,
)
dataset_data_F_emu = od.Dataset(
"data_F_emu", 15,
campaign=campaign,
is_data=True,
n_files=159,
keys=["/MuonEG/Run2016F-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_G_emu = od.Dataset(
"data_G_emu", 16,
campaign=campaign,
is_data=True,
n_files=302,
keys=["/MuonEG/Run2016G-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_H_emu = od.Dataset(
"data_H_emu", 17,
campaign=campaign,
is_data=True,
n_files=267,
keys=["/MuonEG/Run2016H-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
datasets_data_emu = [
dataset_data_B_emu, dataset_data_C_emu, dataset_data_D_emu, dataset_data_E_emu,
dataset_data_F_emu, dataset_data_G_emu, dataset_data_H_emu
]
dataset_data_B_mumu = od.Dataset(
"data_B_mumu", 21,
campaign=campaign,
is_data=True,
n_files=451,
keys=["/DoubleMuon/Run2016B-17Jul2018_ver2-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_C_mumu = od.Dataset(
"data_C_mumu", 22,
campaign=campaign,
is_data=True,
n_files=203,
keys=["/DoubleMuon/Run2016C-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_D_mumu = od.Dataset(
"data_D_mumu", 23,
campaign=campaign,
is_data=True,
n_files=215,
keys=["/DoubleMuon/Run2016D-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_E_mumu = od.Dataset(
"data_E_mumu", 24,
campaign=campaign,
is_data=True,
n_files=186,
keys=["/DoubleMuon/Run2016E-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_F_mumu = od.Dataset(
"data_F_mumu", 25,
campaign=campaign,
is_data=True,
n_files=155,
keys=["/DoubleMuon/Run2016F-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_G_mumu = od.Dataset(
"data_G_mumu", 26,
campaign=campaign,
is_data=True,
n_files=346,
keys=["/DoubleMuon/Run2016G-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
dataset_data_H_mumu = od.Dataset(
"data_H_mumu", 27,
campaign=campaign,
is_data=True,
n_files=378,
keys=["/DoubleMuon/Run2016H-17Jul2018-v1/MINIAOD"],
context=campaign_name,
)
datasets_data_mumu = [
dataset_data_B_mumu, dataset_data_C_mumu, dataset_data_D_mumu, dataset_data_E_mumu,
dataset_data_F_mumu, dataset_data_G_mumu, dataset_data_H_mumu
]
# single electron
dataset_data_B_e = od.Dataset(
"data_B_e", 31,
campaign = campaign,
n_files=11+1560,
keys=["/SingleElectron/Run2016B-17Jul2018_ver1-v1/MINIAOD",
"/SingleElectron/Run2016B-17Jul2018_ver2-v1/MINIAOD"],
is_data=True,
context=campaign_name,
)
dataset_data_C_e = od.Dataset(
"data_C_e", 32,
campaign = campaign,
n_files=674,
keys=["/SingleElectron/Run2016C-17Jul2018-v1/MINIAOD"],
is_data=True,
context=campaign_name,
)
dataset_data_D_e = od.Dataset(
"data_D_e", 33,
campaign = campaign,
n_files=966,
keys=["/SingleElectron/Run2016D-17Jul2018-v1/MINIAOD"],
is_data=True,
context=campaign_name,
)
dataset_data_E_e = od.Dataset(
"data_E_e", 34,
campaign = campaign,
n_files=819,
keys=["/SingleElectron/Run2016E-17Jul2018-v1/MINIAOD"],
is_data=True,
context=campaign_name,
)
dataset_data_F_e = od.Dataset(
"data_F_e", 35,
campaign = campaign,
n_files=499,
keys=["/SingleElectron/Run2016F-17Jul2018-v1/MINIAOD"],
is_data=True,
context=campaign_name,
)
dataset_data_G_e = od.Dataset(
"data_G_e", 36,
campaign = campaign,
n_files=1188,
keys=["/SingleElectron/Run2016G-17Jul2018-v1/MINIAOD"],
is_data=True,
context=campaign_name,
)
dataset_data_H_e = od.Dataset(
"data_H_e", 37,
campaign = campaign,
n_files=968,
keys=["/SingleElectron/Run2016H-17Jul2018-v1/MINIAOD"],
is_data=True,
context=campaign_name,
)
datasets_data_e = [
dataset_data_B_e, dataset_data_C_e, dataset_data_D_e, dataset_data_E_e,
dataset_data_F_e, dataset_data_G_e, dataset_data_H_e
]
# single muon
dataset_data_B_mu = od.Dataset(
"data_B_mu", 41,
campaign = campaign,
n_files=19+915,
keys=["/SingleMuon/Run2016B-17Jul2018_ver1-v1/MINIAOD",
"/SingleMuon/Run2016B-17Jul2018_ver2-v1/MINIAOD"],
is_data=True,
context=campaign_name,
)
dataset_data_C_mu = od.Dataset(
"data_C_mu", 42,
campaign = campaign,
n_files=369,
keys=["/SingleMuon/Run2016C-17Jul2018-v1/MINIAOD"],
is_data=True,
context=campaign_name,
)
dataset_data_D_mu = od.Dataset(
"data_D_mu", 43,
campaign = campaign,
n_files=670,
keys=["/SingleMuon/Run2016D-17Jul2018-v1/MINIAOD"],
is_data=True,
context=campaign_name,
)
dataset_data_E_mu = od.Dataset(
"data_E_mu", 44,
campaign = campaign,
n_files=565,
keys=["/SingleMuon/Run2016E-17Jul2018-v1/MINIAOD"],
is_data=True,
context=campaign_name,
)
dataset_data_F_mu = od.Dataset(
"data_F_mu", 45,
campaign = campaign,
n_files=462,
keys=["/SingleMuon/Run2016F-17Jul2018-v1/MINIAOD"],
is_data=True,
context=campaign_name,
)
dataset_data_G_mu = od.Dataset(
"data_G_mu", 46,
campaign = campaign,
n_files=963,
keys=["/SingleMuon/Run2016G-17Jul2018-v1/MINIAOD"],
is_data=True,
context=campaign_name,
)
dataset_data_H_mu = od.Dataset(
"data_H_mu", 47,
campaign = campaign,
n_files=1131,
keys=["/SingleMuon/Run2016H-17Jul2018-v1/MINIAOD"],
is_data=True,
context=campaign_name,
)
datasets_data_mu = [
dataset_data_B_mu, dataset_data_C_mu, dataset_data_D_mu, dataset_data_E_mu,
dataset_data_F_mu, dataset_data_G_mu, dataset_data_H_mu
]
# MC datasets
# tt
dataset_tt_dl = od.Dataset(
"tt_dl", 101,
campaign=campaign,
n_files=777,
keys=[
"/TTTo2L2Nu_TuneCP5_PSweights_13TeV-powheg-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v1/MINIAODSIM",
],
context=campaign_name,
)
dataset_tt_sl = od.Dataset(
"tt_sl", 102,
campaign=campaign,
n_files=1105,
keys=[
"/TTToSemiLeptonic_TuneCP5_PSweights_13TeV-powheg-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v1/MINIAODSIM",
],
context=campaign_name,
)
# Drell-Yan
dataset_dy_lep_10To50 = od.Dataset(
"dy_lep_10To50", 2230,
campaign=campaign,
n_files=264,
keys=[
"/DYJetsToLL_M-10to50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v2/MINIAODSIM",
],
context=campaign_name,
)
dataset_dy_lep_50ToInf = od.Dataset(
"dy_lep_50ToInf", 2231,
campaign=campaign,
n_files=360+701,
keys=[
"/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3_ext1-v2/MINIAODSIM",
"/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3_ext2-v2/MINIAODSIM",
],
context=campaign_name,
)
# single top
# s-channel
dataset_st_s_lep = od.Dataset(
"st_s_lep", 300,
campaign=campaign,
n_files=104,
keys=[
"/ST_s-channel_4f_leptonDecays_TuneCP5_PSweights_13TeV-amcatnlo-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v1/MINIAODSIM",
],
context=campaign_name,
)
# t-channel
dataset_st_t_t = od.Dataset(
"st_t_t", 301,
campaign=campaign,
n_files=307,
keys= [
"/ST_t-channel_top_4f_InclusiveDecays_TuneCP5_PSweights_13TeV-powheg-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v1/MINIAODSIM",
],
context=campaign_name,
)
dataset_st_t_tbar = od.Dataset(
"st_t_tbar", 302,
campaign=campaign,
n_files=224,
keys= [
"/ST_t-channel_antitop_4f_InclusiveDecays_TuneCP5_PSweights_13TeV-powheg-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v1/MINIAODSIM",
],
context=campaign_name,
)
# tW-channel
dataset_st_tW_t = od.Dataset(
"st_tW_t", 321,
campaign=campaign,
n_files=65,
keys=[
"/ST_tW_top_5f_inclusiveDecays_TuneCP5_PSweights_13TeV-powheg-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v1/MINIAODSIM",
],
context=campaign_name,
)
dataset_st_tW_tbar = od.Dataset(
"st_tW_tbar", 322,
campaign=campaign,
n_files=98,
keys=[
"/ST_tW_antitop_5f_inclusiveDecays_TuneCP5_PSweights_13TeV-powheg-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v1/MINIAODSIM",
],
context=campaign_name,
)
# diboson
dataset_WW = od.Dataset(
"WW", 401,
campaign=campaign,
n_files=7+53,
keys=[
"/WW_TuneCUETP8M1_13TeV-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v2/MINIAODSIM",
"/WW_TuneCUETP8M1_13TeV-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3_ext1-v2/MINIAODSIM",
],
context=campaign_name,
)
dataset_WZ = od.Dataset(
"WZ", 402,
campaign=campaign,
n_files=8+29,
keys=[
"/WZ_TuneCUETP8M1_13TeV-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v2/MINIAODSIM",
"/WZ_TuneCUETP8M1_13TeV-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3_ext1-v2/MINIAODSIM",
],
context=campaign_name,
)
dataset_ZZ = od.Dataset(
"ZZ", 403,
campaign=campaign,
n_files=7,
keys=[
"/ZZ_TuneCUETP8M1_13TeV-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v2/MINIAODSIM",
],
context=campaign_name,
)
# W + jets
dataset_W_lep = od.Dataset(
"W_lep", 500,
campaign=campaign,
n_files=215+410,
keys=[
"/WJetsToLNu_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v2/MINIAODSIM",
"/WJetsToLNu_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3_ext2-v2/MINIAODSIM"
],
context=campaign_name,
)
# tt+X
dataset_ttH_bb = od.Dataset(
"ttH_bb", 601,
campaign=campaign,
n_files=188,
keys=[
"/ttHTobb_M125_TuneCP5_13TeV-powheg-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v1/MINIAODSIM",
],
context=campaign_name,
)
dataset_ttH_nonbb = od.Dataset(
"ttH_nonbb", 602,
campaign=campaign,
n_files=143,
keys=[
"/ttHToNonbb_M125_TuneCP5_13TeV-powheg-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v1/MINIAODSIM",
],
context=campaign_name,
)
dataset_ttWJets_lep = od.Dataset(
"ttWJets_lep", 700,
campaign=campaign,
n_files=31,
keys=[
"/TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3_ext2-v1/MINIAODSIM",
],
context=campaign_name,
)
dataset_ttWJets_had = od.Dataset(
"ttWJets_had", 701,
campaign=campaign,
n_files=7,
keys=[
"/TTWJetsToQQ_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v2/MINIAODSIM",
],
context=campaign_name,
)
dataset_ttZJets_lep = od.Dataset(
"ttZJets_lep", 710,
campaign=campaign,
n_files=49+48,
keys=[
"/TTZToLLNuNu_M-10_TuneCUETP8M1_13TeV-amcatnlo-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3_ext2-v1/MINIAODSIM",
"/TTZToLLNuNu_M-10_TuneCUETP8M1_13TeV-amcatnlo-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3_ext3-v1/MINIAODSIM",
],
context=campaign_name,
)
dataset_ttZJets_had = od.Dataset(
"ttZJets_had", 711,
campaign=campaign,
n_files=7,
keys=[
"/TTZToQQ_TuneCUETP8M1_13TeV-amcatnlo-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v2/MINIAODSIM",
],
context=campaign_name,
)
# link processes to datasets
for d in datasets_data_ee:
d.add_process(process_data_ee)
for d in datasets_data_emu:
d.add_process(process_data_emu)
for d in datasets_data_mumu:
d.add_process(process_data_mumu)
for d in datasets_data_e:
d.add_process(process_data_e)
for d in datasets_data_mu:
d.add_process(process_data_mu)
dataset_tt_dl.add_process(process_tt_dl)
dataset_tt_sl.add_process(process_tt_sl)
dataset_dy_lep_10To50.add_process(process_dy_lep_10To50)
dataset_dy_lep_50ToInf.add_process(process_dy_lep_50ToInf)
dataset_st_s_lep.add_process(process_st_s_lep)
dataset_st_t_t.add_process(process_st_t_t)
dataset_st_t_tbar.add_process(process_st_t_tbar)
dataset_st_tW_t.add_process(process_st_tW_t)
dataset_st_tW_tbar.add_process(process_st_tW_tbar)
dataset_WW.add_process(process_WW)
dataset_WZ.add_process(process_WZ)
dataset_ZZ.add_process(process_ZZ)
dataset_W_lep.add_process(process_W_lep)
dataset_ttH_bb.add_process(process_ttH_bb)
dataset_ttH_nonbb.add_process(process_ttH_nonbb)
dataset_ttWJets_lep.add_process(process_ttWJets_lep)
dataset_ttWJets_had.add_process(process_ttWJets_had)
dataset_ttZJets_lep.add_process(process_ttZJets_lep)
dataset_ttZJets_had.add_process(process_ttZJets_had)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
781,
539,
23,
25,
645,
20402,
198,
198,
37811,
198,
7469,
600,
295,
286,
262,
1923,
290,
40522,
329,
1584,
10655,
302,
260,
1073,
1366,
13,
198,
37811,
628,
198,
... | 1.936888 | 8,065 |
#!/usr/bin/env python
import csv
import argparse
import numpy as np
import pandas as pd
import tqdm
# Modified from: CosmiQ Solaris
# https://github.com/CosmiQ/solaris/blob/master/solaris/preproc/sar.py
def haversine(lat1, lon1, lat2, lon2, rad=False, radius=6.371E6):
"""
Haversine formula for distance between two points given their
latitude and longitude, assuming a spherical earth.
"""
if not rad:
lat1 = np.radians(lat1)
lon1 = np.radians(lon1)
lat2 = np.radians(lat2)
lon2 = np.radians(lon2)
dlat = lat2 - lat1
dlon = lon2 - lon1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
return 2 * radius * np.arcsin(np.sqrt(a))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_path')
parser.add_argument('output_path')
parser.add_argument('threshold', nargs='?', type=float, default=10.)
args = parser.parse_args()
main(args.input_path, args.output_path, args.threshold)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
269,
21370,
198,
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
256,
80,
36020,
198,
198,
2,
40499,
422,
25,
1043... | 2.327314 | 443 |
# import the main window object (mw) from aqt
from aqt import mw
# import the "show info" tool from utils.py
from aqt.utils import showInfo
# import all of the Qt GUI library
from aqt.qt import *
# We're going to add a menu item below. First we want to create a function to
# be called when the menu item is activated.
# create a new menu item, "test"
action = QAction("test", mw)
# set it to call testFunction when it's clicked
action.triggered.connect(add_note)
# and add it to the tools menu
mw.form.menuTools.addAction(action)
action.setShortcut(QKeySequence("Ctrl+t"))
| [
2,
1330,
262,
1388,
4324,
2134,
357,
76,
86,
8,
422,
257,
39568,
198,
6738,
257,
39568,
1330,
285,
86,
198,
2,
1330,
262,
366,
12860,
7508,
1,
2891,
422,
3384,
4487,
13,
9078,
198,
6738,
257,
39568,
13,
26791,
1330,
905,
12360,
19... | 3.20442 | 181 |
from __future__ import absolute_import
import logging
from flask import current_app
from changes.api.build_index import BuildIndexAPIView
from changes.models import ProjectStatus, Project, ProjectConfigError, ProjectOptionsHelper, Revision
from changes.utils.diff_parser import DiffParser
from changes.utils.project_trigger import files_changed_should_trigger_project
from changes.vcs.base import UnknownRevision
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
18931,
198,
198,
6738,
42903,
1330,
1459,
62,
1324,
198,
6738,
2458,
13,
15042,
13,
11249,
62,
9630,
1330,
10934,
15732,
2969,
3824,
769,
198,
6738,
2458,
13,
27530,
1330,... | 4.17 | 100 |
import timeit
import selenium.webdriver
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
import pandas as pd
driver_path = 'msedgedriver.exe'
constituents_url = 'https://www.stoxx.com/index-details?symbol=SXXP'
table_id = "stoxx_index_detail_component"
constituents = {}
driver = webdriver.Edge(driver_path)
driver.get(url=constituents_url)
components = driver.find_element_by_link_text('Components')
components.click()
driver.implicitly_wait(2)
table = driver.find_element_by_id('component-table')
for row in table.find_elements_by_xpath(".//tr"):
try:
href = row.find_element_by_xpath("./td[1]/input")
constituents[row.text] = href.get_property('value')
except:
# TODO: Add Logger
continue
WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH,'//*[@id="onetrust-accept-btn-handler"]'))).click()
button_list = driver.find_elements_by_xpath("//*/li[contains(@onclick,'paginate')]")
counter = len(button_list)
driver.implicitly_wait(2)
idx = 0
while idx < counter:
print("Loading page {0}".format(idx))
button_list = driver.find_elements_by_xpath("//*/li[contains(@onclick,'paginate')]")
button_list[idx].click()
time.sleep(2)
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID,'component-table')))
table = driver.find_element_by_id('component-table')
rows = table.find_elements_by_xpath(".//tr")
print(len(rows))
for row in rows:
driver.implicitly_wait(2)
try:
href = row.find_element_by_xpath("./td[1]/input")
constituents[row.text] = href.get_property('value')
except Exception as err:
print("Issue: {0}".format(err))# TODO: Add Logger
driver.implicitly_wait(2)
continue
idx = idx+1
href = constituents.popitem()[1]
driver.get(href)
table = driver.find_element_by_class_name('flat-table')
static_data = table.text.split('\n')
output = []
for key_value in static_data:
key, value = key_value.split(': ', 1)
if not output or key in output[-1]:
output.append({})
output[-1][key] = value | [
11748,
640,
270,
198,
198,
11748,
384,
11925,
1505,
13,
12384,
26230,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
17077,
1330,
5313,
32103,
21321,
198,
6738,
384,
11925,
... | 2.470339 | 944 |
import sys
import logging
import time
import tensorflow as tf
tf.compat.v1.enable_v2_behavior()
from tf_agents.drivers import dynamic_step_driver
from tf_agents.drivers import dynamic_episode_driver
from modules.runtime.commons.parameters import ParameterServer
from tf_agents.metrics import tf_metrics
from tf_agents.eval import metric_utils
from tf_agents.utils import common
from tf_agents.trajectories import time_step as ts
from src.runners.base_runner import BaseRunner
logger = logging.getLogger()
# NOTE(@hart): this will print all statements
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
class TFARunner(BaseRunner):
"""Runner that takes the runtime and agent
and runs the training and evaluation as specified.
"""
def get_initial_collection_driver(self):
"""Sets the initial collection driver for tf-agents.
"""
self._initial_collection_driver = []
for agent in self._agent:
self._initial_collection_driver.append(dynamic_episode_driver.DynamicEpisodeDriver(
env=self._runtime,
policy=agent._agent.collect_policy,
observers=[agent._replay_buffer.add_batch],
num_episodes=self._params["ML"]["Runner"]["initial_collection_steps"]))
def get_collection_driver(self):
"""Sets the collection driver for tf-agents.
"""
self._collection_driver = []
for agent in self._agent:
self._collection_driver.append(dynamic_step_driver.DynamicStepDriver(
env=self._runtime,
policy=agent._agent.collect_policy, # this is the agents policy
observers=[agent._replay_buffer.add_batch],
num_steps = 1
))
def collect_initial_episodes(self):
"""Function that collects the initial episodes
"""
for i in range(len(self._initial_collection_driver)):
self._initial_collection_driver[i].run()
def train(self):
"""Wrapper that sets the summary writer.
This enables a seamingless integration with TensorBoard.
"""
# collect initial episodes
self.collect_initial_episodes()
# main training cycle
if self._summary_writer is not None:
with self._summary_writer.as_default():
self._train()
else:
self._train()
def _train(self):
"""Trains the agent as specified in the parameter file
"""
pass
def evaluate(self):
"""Evaluates the agent
"""
global_iteration = self._agent._agent._train_step_counter.numpy()
logger.info("Evaluating the agent's performance in {} episodes."
.format(str(self._params["ML"]["Runner"]["evaluation_steps"])))
metric_utils.eager_compute(
self._eval_metrics,
self._runtime,
self._agent._agent.policy,
num_episodes=self._params["ML"]["Runner"]["evaluation_steps"])
metric_utils.log_metrics(self._eval_metrics)
tf.summary.scalar("mean_reward",
self._eval_metrics[0].result().numpy(),
step=global_iteration)
tf.summary.scalar("mean_steps",
self._eval_metrics[1].result().numpy(),
step=global_iteration)
logger.info(
"The agent achieved on average {} reward and {} steps in \
{} episodes." \
.format(str(self._eval_metrics[0].result().numpy()),
str(self._eval_metrics[1].result().numpy()),
str(self._params["ML"]["Runner"]["evaluation_steps"]))) | [
11748,
25064,
198,
11748,
18931,
198,
11748,
640,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
27110,
13,
5589,
265,
13,
85,
16,
13,
21633,
62,
85,
17,
62,
46571,
3419,
198,
198,
6738,
48700,
62,
49638,
13,
36702,
1330,
8925,
62,
... | 2.683835 | 1,262 |
#!/usr/bin/env python
from setuptools import setup, find_packages
import versioneer
INSTALL_REQUIRES = open("requirements.txt").readlines()
setup(
name="lagtraj",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Python trajectory code for Lagrangian simulations",
url="https://github.com/EUREC4A-UK/lagtraj",
maintainer="Leif Denby",
maintainer_email="l.c.denby@leeds.ac.uk",
py_modules=["lagtraj"],
packages=find_packages(),
package_data={"": ["*.csv", "*.yml", "*.html", "*.dat", "*.yaml"]},
include_package_data=True,
install_requires=INSTALL_REQUIRES,
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
zip_safe=False,
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
11748,
2196,
28153,
198,
198,
38604,
7036,
62,
2200,
10917,
4663,
1546,
796,
1280,
7203,
8897,
18883,
13,
14116,
11074,
961... | 2.60274 | 292 |
#executar um audio mp3
import pygame
pygame.init()
pygame.mixer.music.load('BlackDog.mp3')
pygame.mixer.music.play()
pygame.event.wait()
| [
2,
18558,
315,
283,
23781,
6597,
29034,
18,
198,
198,
11748,
12972,
6057,
198,
9078,
6057,
13,
15003,
3419,
198,
9078,
6057,
13,
19816,
263,
13,
28965,
13,
2220,
10786,
9915,
32942,
13,
3149,
18,
11537,
198,
9078,
6057,
13,
19816,
263... | 2.464286 | 56 |
from dataclasses import dataclass
from bindings.gmd.geometric_complex_type import GeometricComplexType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
34111,
13,
70,
9132,
13,
469,
16996,
62,
41887,
62,
4906,
1330,
2269,
16996,
5377,
11141,
6030,
198,
198,
834,
45,
29559,
47,
11598,
834,
796,
366,
4023,
1378,
2503,
13,
404,
... | 2.842105 | 57 |
from .base import registered_device_types # noqa
from .kettle_redmond import RedmondKettle # noqa
from .xiaomi_ht import XiaomiHumidityTemperatureV1 # noqa
from .xiaomi_lywsd03 import XiaomiHumidityTemperatureLYWSD # noqa
| [
6738,
764,
8692,
1330,
6823,
62,
25202,
62,
19199,
220,
1303,
645,
20402,
198,
6738,
764,
74,
23570,
62,
445,
6327,
1330,
49420,
42,
23570,
220,
1303,
645,
20402,
198,
6738,
764,
36072,
12753,
62,
4352,
1330,
46726,
32661,
17995,
42492,... | 3.228571 | 70 |
import sys, getopt, subprocess
from src.common.load_h5 import H5COUNTS
from src.preprocess.build_h5_GSE103224 import build_h5
import pandas as pd
# # Load data
# scRNAdata = H5COUNTS('data/GSE103224.h5')
# # Preprocess data
# scRNAdata.preprocess_data(log_normalize=True, filter_genes=False, n_neighbors=False, umap=False)
# # Add clustering results
# scRNAdata.add_clustering_results(path='data/interim/', tumor_ids=[1, 2, 3, 4, 5, 6, 7, 8])
#
# # Get a list of biomarkers associated to Glioma survival
# BIOMARKER_F = "data/glioma_survival_associated_genes_Fatai.csv"
# biomarkers_df = pd.read_table(BIOMARKER_F, )
# biomarkers = pd.Index(scRNAdata.GENE_NAMES) & biomarkers_df["Gene"].unique()
#
# # Aggregate all cell expressions to find clusters with the biomarkers expressed
# scRNAdata.get_aggregated_cluster_expression(biomarkers, quantile_threshold=0.75,)
#
# # Run GSEA on all the DE genes for each cluster
# from src.analysis.gsea_analysis import GSEA_Analysis
# gsea = GSEA_Analysis(scRNAdata, path='data/interim/', threshold=0.05,) # path leads the file with the DE genes list for each cluster
# gsea.get_gsea_result()
#
# # Get the GSEA results of only the clusters which have a query biomarker expressed
# query_biomarker = ["CDC6"]
# result = gsea.get_gsea_result_by_cluster(scRNAdata.get_clusters_with_biomarker_expression(query_biomarker))
#
# # Visualize
# from src.visualization import heatmap
# heatmap(result, height=1000, width=600)
if __name__== "__main__":
main(sys.argv[1:]) | [
11748,
25064,
11,
651,
8738,
11,
850,
14681,
198,
6738,
12351,
13,
11321,
13,
2220,
62,
71,
20,
1330,
367,
20,
34,
19385,
4694,
198,
6738,
12351,
13,
3866,
14681,
13,
11249,
62,
71,
20,
62,
38,
5188,
940,
2624,
1731,
1330,
1382,
6... | 2.720217 | 554 |