content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from ..utils import ApiUtil
from ..utils import StringUtil
from ..utils import BlueprintUtil
from ..utils.ApiUtil import Url
from functools import reduce
| [
6738,
11485,
26791,
1330,
5949,
72,
18274,
346,
198,
6738,
11485,
26791,
1330,
10903,
18274,
346,
198,
6738,
11485,
26791,
1330,
39932,
18274,
346,
198,
6738,
11485,
26791,
13,
32,
14415,
18274,
346,
1330,
8799,
75,
198,
6738,
1257,
310,
... | 3.395833 | 48 |
#
# scikit-rf (aka skrf) documentation build configuration file
#
# -- Project information -----------------------------------------------------
project = 'scikit-rf'
copyright = '2021, scikit-rf team'
author = 'scikit-rf team'
# -- General configuration ---------------------------------------------------
import sys, os
import sphinx_rtd_theme
import nbsphinx
import warnings
warnings.filterwarnings('ignore')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
import skrf as rf
rf.setup_pylab()
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'nbsphinx',
#'inheritance_diagram',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# NBsphinx settings
nbsphinx_execute = 'always'
nbsphinx_allow_errors = True
nbsphinx_kernel_name = 'python'
numpydoc_show_class_members = False
nbsphinx_timeout = 120
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', "**/*.rst.rst", '**.ipynb_checkpoints']
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
'''
setup_lines = open('../../setup.py').readlines()
version = VERSION#'vUndefined'
for l in setup_lines:
if l.startswith('VERSION'):
version = l.split("'")[1]
break
'''
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# Add any paths that contain custom themes here, relative to this directory.
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'scikit-rf Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = html_title
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = 'scikit-rf-logo-flat.svg'
#html_logo = '_static/scikit-rf-logo-flat-docs.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'skrfdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'scikit-rf.tex', 'scikit-rf Documentation',
'scikit-rf Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_static/scikit-rf-title-flat.pdf'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'\usepackage{epstopdf}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
autosummary_generate = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'scikit-rf', 'scikit-rf Documentation',
['alex arsenovic'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'http://docs.python.org/': None,
'http://docs.scipy.org/doc/numpy': None,
'http://docs.scipy.org/doc/scipy/reference': None,
}
| [
2,
198,
2,
629,
1134,
270,
12,
41871,
357,
8130,
1341,
41871,
8,
10314,
1382,
8398,
2393,
198,
2,
198,
198,
2,
1377,
4935,
1321,
20368,
19351,
12,
198,
198,
16302,
796,
705,
36216,
15813,
12,
41871,
6,
198,
22163,
4766,
796,
705,
... | 3.107143 | 1,988 |
"""Contains all the server values to ensure communication"""
# GAE Project
project_id_us = "[PROJECT_NAME_US]"
project_id_eu = "[PROJECT_NAME_EU]"
cloud_region_us = "us"
cloud_region_eu = "eu"
cloud_regions = {cloud_region_us:"[US_SERVER_ID]", cloud_region_eu:"[EU_SERVER_NAME]"}
#service
service_name_us = "[US_SERVICE_ACCOUNT_NAME]"
service_file_us = "[US_SERVICE_ACCOUNT_FILE]"
service_name_eu = "[EU_SERVICE_ACCOUNT_NAME]"
service_file_eu = "[EU_SERVICE_ACCOUNT_FILE]" | [
37811,
4264,
1299,
477,
262,
4382,
3815,
284,
4155,
6946,
37811,
198,
198,
2,
402,
14242,
4935,
198,
16302,
62,
312,
62,
385,
796,
12878,
31190,
23680,
62,
20608,
62,
2937,
30866,
198,
16302,
62,
312,
62,
12496,
796,
12878,
31190,
236... | 2.615385 | 182 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-06 19:38
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
20,
319,
2177,
12,
940,
12,
3312,
678,
25,
2548,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 2.736842 | 57 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import click
from click_default_group import DefaultGroup
from omegaconf import MISSING
# @click.group(cls=DefaultGroup, default="web", default_if_no_args=True)
@click.group(cls=DefaultGroup)
@cli.command("web")
def web():
"""Launch a local webserver with the Mephisto UI"""
from mephisto.client.full.server import app
app.run(debug=False)
@cli.command("config")
@click.argument("identifier", type=(str), default=None, required=False)
@click.argument("value", type=(str), default=None, required=False)
@cli.command("review")
@click.argument("review_app_dir", type=click.Path(exists=True))
@click.option("-p", "--port", type=(int), default=5000)
@click.option("-o", "--output", type=(str), default="")
@click.option("--stdout", "output_method", flag_value="stdout")
@click.option("--file", "output_method", flag_value="file", default=True)
@click.option("--csv-headers/--no-csv-headers", default=False)
@click.option("--json/--csv", default=False)
@click.option("--db", "database_task_name", type=(str), default=None)
@click.option("--all/--one-by-one", "all_data", default=False)
@click.option("-d", "--debug", type=(bool), default=False)
def review(
review_app_dir,
port,
output,
output_method,
csv_headers,
json,
database_task_name,
all_data,
debug,
):
"""Launch a local review UI server. Reads in rows froms stdin and outputs to either a file or stdout."""
from mephisto.client.review.review_server import run
if output == "" and output_method == "file":
raise click.UsageError(
"You must specify an output file via --output=<filename>, unless the --stdout flag is set."
)
if database_task_name is not None:
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from mephisto.tools.data_browser import DataBrowser as MephistoDataBrowser
db = LocalMephistoDB()
mephisto_data_browser = MephistoDataBrowser(db=db)
name_list = mephisto_data_browser.get_task_name_list()
if database_task_name not in name_list:
raise click.BadParameter(
f'The task name "{database_task_name}" did not exist in MephistoDB.\n\nPerhaps you meant one of these? {", ".join(name_list)}\n\nFlag usage: mephisto review --db [task_name]\n'
)
run(
review_app_dir,
port,
output,
csv_headers,
json,
database_task_name,
all_data,
debug,
)
@cli.command("check")
def check():
"""Checks that mephisto is setup correctly"""
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from mephisto.operations.utils import get_mock_requester
try:
db = LocalMephistoDB()
get_mock_requester(db)
except Exception as e:
click.echo("Something went wrong.")
click.echo(e)
return
click.echo("Mephisto seems to be set up correctly.")
@cli.command("requesters")
def list_requesters():
"""Lists all registered requesters"""
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from tabulate import tabulate
db = LocalMephistoDB()
requesters = db.find_requesters()
dict_requesters = [r.to_dict() for r in requesters]
click.echo(tabulate(dict_requesters, headers="keys"))
@cli.command("register", context_settings={"ignore_unknown_options": True})
@click.argument("args", nargs=-1)
def register_provider(args):
"""Register a requester with a crowd provider"""
if len(args) == 0:
click.echo("Usage: mephisto register <provider_type> arg1=value arg2=value")
return
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from mephisto.operations.registry import get_crowd_provider_from_type
from mephisto.operations.utils import parse_arg_dict, get_extra_argument_dicts
provider_type, requester_args = args[0], args[1:]
args_dict = dict(arg.split("=", 1) for arg in requester_args)
crowd_provider = get_crowd_provider_from_type(provider_type)
RequesterClass = crowd_provider.RequesterClass
if len(requester_args) == 0:
from tabulate import tabulate
params = get_extra_argument_dicts(RequesterClass)
for param in params:
click.echo(param["desc"])
click.echo(tabulate(param["args"].values(), headers="keys"))
return
try:
parsed_options = parse_arg_dict(RequesterClass, args_dict)
except Exception as e:
click.echo(str(e))
if parsed_options.name is None:
click.echo("No name was specified for the requester.")
db = LocalMephistoDB()
requesters = db.find_requesters(requester_name=parsed_options.name)
if len(requesters) == 0:
requester = RequesterClass.new(db, parsed_options.name)
else:
requester = requesters[0]
try:
requester.register(parsed_options)
click.echo("Registered successfully.")
except Exception as e:
click.echo(str(e))
@cli.command("wut", context_settings={"ignore_unknown_options": True})
@click.argument("args", nargs=-1)
if __name__ == "__main__":
cli()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,... | 2.593359 | 2,078 |
#!/usr/bin/env python
"""Glassdoor Python API"""
__version__ = "0.0.8"
__author__ = [
"Mek <michael.karpeles@gmail.com>"
]
__license__ = "public domain"
__contributors__ = "see AUTHORS"
from glassdoor.gd import get, parse
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
47698,
9424,
11361,
7824,
37811,
198,
198,
834,
9641,
834,
796,
366,
15,
13,
15,
13,
23,
1,
198,
834,
9800,
834,
796,
685,
198,
220,
220,
220,
366,
44,
988,
1279,
76,
... | 2.544444 | 90 |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# SAMMON Perform Sammon mapping on a dataset, with weighted map axises.
#
# x = sammon(p) applies the Sammon nonlinear mapping algorithm on
# multivariate data p, where each row represents a pattern and each
# column represents a feature. On completion, x contains the
# corresponding coordinates of each point on the low-dimensional map.
# By default, a two-dimensional Sammon's map is created.
#
# Since Sammon's algorithm relies on pairwise distances between
# data points, preliminary feature scaling e.g., through normalization
# or standartization might be helpful. Also, even though RProp
# implementation works well with duplicate data samples it might
# be useful to remove them from p before applying Sammon's algorithm
# to avoid unnecessary computation.
#
# Function:
# x = sammon(p, alpha=[1,1], epochs=100, dnorm=1e-6, prnt=0,
# x=None, dist_p=None)
#
# Inputs:
# p - npatterns-by-ndim data matrix, each row represents a pattern
# (data sample) and each column represents a feature
#
# alpha - list of nmap axis weights,
# default: [1,1]
#
# epochs - maximum number of RProp training epochs,
# default: 100
#
# dnorm - Frobenius norm of dx / sqrt(npatterns*nmap) for stopping,
# default: 1e-6
#
# prnt - output frequency, in increments, prnt=0 suppresses print output,
# default: 0
#
# x - initial Sammon projection of data e.g., obtained using PCA
# or in the previous run of Sammon's algorithm, default: None
#
# dist_p - npatterns-by-npatterns matrix of pre-computed
# pairwise distances between data samples in p, default: None
#
# Outputs:
# x - npatterns-by-nmap matrix with Sammon's embeddings of p
#
# File : sammon.py
# Date : 01 October 2021
# Authors : Nelli Fedorova (nellix.fedorova@intel.com)
# : Based on MATLAB sammonsa.m, 21-Aug-1999, by S.A.^2, S.D., and T.M.
# (Serge A. Terekhov, Serge A. Shumsky,
# Svetlana Diyankova, Tatyana Muhamadieva)
#
# Description : Python implementation of Sammon's non-linear mapping algorithm [1]
# using RProp [2].
#
# References : [1] Sammon, John W. Jr., "A Nonlinear Mapping for Data
# Structure Analysis", IEEE Transactions on Computers,
# vol. C-18, no. 5, pp 401-409, May 1969.
#
# [2] Martin Riedmiller und Heinrich Braun:
# Rprop - A Fast Adaptive Learning Algorithm.
# Proceedings of the International Symposium on
# Computer and Information Science VII, 1992
import numpy as np
from scipy.spatial.distance import cdist
if False: # self-test using iris data
precompute_distances = True
remove_duplicates = False
# Load iris data
from sklearn.datasets import load_iris
iris = load_iris()
if remove_duplicates: # remove duplicates
(iris_x, iris_index) = np.unique(iris.data,axis=0,return_index=True)
iris_target = iris.target[iris_index]
else: # keep duplicates
iris_x = iris.data
iris_target = iris.target
iris_names = iris.target_names
# Build the Sammon projection
if not precompute_distances: # let Sammon compute the distances
y = sammon(iris_x, [1,1], epochs=100, dnorm=1e-5, prnt=10, dist_p=None)
else: # use pre-computed distances
y = sammon(iris_x, [1,1], epochs=100, dnorm=1e-5, prnt=10,
dist_p=cdist(iris_x,iris_x))
# Plot
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
plt.scatter(y[iris_target == 0, 0], y[iris_target == 0, 1], s=20, c='r', marker='o',label=iris_names[0])
plt.scatter(y[iris_target == 1, 0], y[iris_target == 1, 1], s=20, c='b', marker='D',label=iris_names[1])
plt.scatter(y[iris_target == 2, 0], y[iris_target == 2, 1], s=20, c='y', marker='v',label=iris_names[2])
plt.title('Sammon projection of iris flower data')
plt.legend(loc=2)
plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
2361,
25,
628,
198,
2,
220,
220,
28844,
27857,
220,
35006,
3409,
2144,
16855,
319,
257,
27039,
11,
351,
26356,
3975,
7877,
269... | 2.166426 | 2,079 |
# This script is the model definitioimportn
# Copyright 2020 Masao Someki
# MIT License (https://opensource.org/licenses/MIT)
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from net import ConvRNN
| [
2,
770,
4226,
318,
262,
2746,
2730,
270,
952,
11748,
77,
198,
198,
2,
15069,
12131,
11066,
5488,
2773,
4106,
198,
2,
220,
17168,
13789,
357,
5450,
1378,
44813,
1668,
13,
2398,
14,
677,
4541,
14,
36393,
8,
198,
11748,
28034,
198,
117... | 3.243243 | 74 |
from .TaskFactory import TaskFactory
from .AnaplanConnection import AnaplanConnection
from .Action import Action
from .Parser import Parser
from .ActionParser import ActionParser
from .util.Util import TaskParameterError
class ActionTask(TaskFactory):
"""
Factory to generate an Anaplan action task and corresponding parser
"""
@staticmethod
def get_action(conn: AnaplanConnection, action_id: str, retry_count: int, mapping_params: dict = None) -> Action:
"""Get an ActionTask object
:param conn: AnaplanConnection object containing the Workspace and Model IDs, and AuthToken object
:type conn: AnaplanConnection
:param action_id: ID of the Anaplan action to execute
:type action_id: str
:param retry_count: Number of time to attempt to retry if there's an HTTP error during execution
:type retry_count: int
:param mapping_params: Runtime mapping parameters for an Import action
:type mapping_params: dict, optional
:raises TaskParameterError: Exception if mapping parameters are provided for any other action type than import
:return: Instantiated Action object
:rtype: Action
"""
if not mapping_params:
return Action(conn=conn, action_id=action_id, retry_count=retry_count, mapping_params=mapping_params)
else:
raise TaskParameterError("Only Anaplan imports accept mapping parameters.")
@staticmethod
def get_parser(conn: AnaplanConnection, results: dict, url: str) -> Parser:
"""Get an ActionParser
:param conn: AnaplanConnection object containing the Workspace and Model IDs, and AuthToken object
:type conn: AnaplanConnection
:param results: JSON object with executed task results
:type results: dict
:param url: URL of the executed action task
:type url: str
:return: Instantiated Parser object that parses and stores the results of an Anaplan task
:rtype: Parser
"""
return ActionParser(results, url)
| [
6738,
764,
25714,
22810,
1330,
15941,
22810,
198,
6738,
764,
2025,
64,
11578,
32048,
1330,
17639,
11578,
32048,
198,
6738,
764,
12502,
1330,
7561,
198,
6738,
764,
46677,
1330,
23042,
263,
198,
6738,
764,
12502,
46677,
1330,
7561,
46677,
1... | 3.551985 | 529 |
# Generated by Django 2.1.7 on 2019-03-01 07:19
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
22,
319,
13130,
12,
3070,
12,
486,
8753,
25,
1129,
198,
198,
11748,
42625,
14208,
13,
3642,
822,
13,
7353,
34239,
13,
25747,
13,
17752,
65,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
... | 2.897059 | 68 |
''' draft script for removing bands 11 and 13
these band indices (from 1) refer to the indexing used,
in the output from sentinel2 superresolution utility http://nicolas.brodu.net/recherche/sen2res/
'''
import os
import sys
a = os.system("mkdir new")
files = os.popen("ls -1 *.bin").readlines()
files = [f.strip() for f in files]
for f in files:
f = f.strip()
hf = f[:-3] + 'hdr'
hf2 = "new" + os.path.sep + hf
lines = open(hf).readlines()
lines = [line.strip() for line in lines]
for i in range(0, len(lines)):
line = lines[i]
if line == "bands = 13":
lines[i] = "bands = 11"
print(hf2)
#for line in lines: print(" " + line)
del lines[-1]
del lines[-2]
lines[-1] = lines[-1].replace(",", "}")
#print("**")
#for line in lines:
# print(" " + line)
print(hf2)
open(hf2, "wb").write(("\n".join(lines)).encode())
print("write binary file using c++ program..")
if not os.path.exists("rm_band"):
a = os.system("g++ misc.cpp rm_band.cpp -o rm_band")
cmd = "./rm_band " + f + " new" + os.path.sep + f + " 11 13"
print(cmd)
a = os.system(cmd)
| [
7061,
6,
4538,
4226,
329,
10829,
11760,
1367,
290,
1511,
198,
777,
4097,
36525,
357,
6738,
352,
8,
3522,
284,
262,
6376,
278,
973,
11,
198,
287,
262,
5072,
422,
1908,
20538,
17,
2208,
29268,
10361,
2638,
1378,
6988,
12456,
13,
65,
2... | 2.272201 | 518 |
__________________________________________________________________________________________________
sample 20 ms submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
__________________________________________________________________________________________________
sample 12956 kb submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
__________________________________________________________________________________________________
| [
27193,
10221,
834,
198,
39873,
1160,
13845,
14498,
198,
2,
30396,
329,
257,
13934,
5509,
10139,
13,
198,
2,
1398,
12200,
19667,
25,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
2124,
2599,
198,
2,
220,
220,
220,
... | 3.884393 | 173 |
import unittest
import demon as d
import os
import networkx as nx
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
3222,
355,
288,
198,
11748,
28686,
198,
11748,
3127,
87,
355,
299,
87,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.738095 | 42 |
#!/usr/bin/env python2
from ConfigParser import ConfigParser
import os
import re
import subprocess
import sys
from termcolor import colored
config = ConfigParser()
ANSI_ESCAPE_RE = re.compile(r'\x1b[^m]*m')
RANGE_RE = re.compile(r'(\d+)-(\d+)')
ID_FILE = 'hgnids.txt'
CONFIG_FILE = '.hgnrc'
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
198,
6738,
17056,
46677,
1330,
17056,
46677,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
6738,
3381,
8043,
1330,
16396,
628,
198,
11250,
796,
... | 2.447552 | 143 |
import torch
import itertools
import pytest
from data.complex import ComplexBatch
from data.dummy_complexes import get_testing_complex_list
from mp.molec_models import EmbedSparseCIN, OGBEmbedSparseCIN, EmbedSparseCINNoRings, EmbedGIN
from data.data_loading import DataLoader, load_dataset
def test_zinc_sparse_cin0_model_with_batching():
"""Check this runs without errors and that batching and no batching produce the same output."""
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1, 2]
bs = list(range(2, len(data_list)+1))
params = itertools.product(bs, dims, dims)
torch.manual_seed(0)
for batch_size, batch_max_dim, model_max_dim in params:
if batch_max_dim > model_max_dim:
continue
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
model = EmbedSparseCIN(atom_types=32, bond_types=4, out_size=3, num_layers=3, hidden=5,
jump_mode='cat', max_dim=model_max_dim)
# We use the model in eval mode to avoid problems with batch norm.
model.eval()
batched_res = {}
for batch in data_loader:
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
batched_pred, res = model.forward(batch, include_partial=True)
for key in res:
if key not in batched_res:
batched_res[key] = []
batched_res[key].append(res[key])
for key in batched_res:
batched_res[key] = torch.cat(batched_res[key], dim=0)
unbatched_res = {}
for complex in data_list:
batch = ComplexBatch.from_complex_list([complex], max_dim=batch_max_dim)
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
pred, res = model.forward(batch, include_partial=True)
for key in res:
if key not in unbatched_res:
unbatched_res[key] = []
unbatched_res[key].append(res[key])
for key in unbatched_res:
unbatched_res[key] = torch.cat(unbatched_res[key], dim=0)
for key in set(list(unbatched_res.keys()) + list(batched_res.keys())):
assert torch.allclose(unbatched_res[key], batched_res[key], atol=1e-6), (
print(key, torch.max(torch.abs(unbatched_res[key] - batched_res[key]))))
def test_embed_sparse_cin_no_rings_model_with_batching():
"""Check this runs without errors and that batching and no batching produce the same output."""
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1]
bs = list(range(2, len(data_list)+1))
params = itertools.product(bs, dims, dims)
torch.manual_seed(0)
for batch_size, batch_max_dim, model_max_dim in params:
if batch_max_dim > model_max_dim:
continue
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
model = EmbedSparseCINNoRings(atom_types=32, bond_types=4, out_size=3, num_layers=3, hidden=5)
# We use the model in eval mode to avoid problems with batch norm.
model.eval()
batched_res = []
for batch in data_loader:
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
batched_pred = model.forward(batch)
batched_res.append(batched_pred)
batched_res = torch.cat(batched_res, dim=0)
unbatched_res = []
for complex in data_list:
batch = ComplexBatch.from_complex_list([complex], max_dim=batch_max_dim)
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
pred = model.forward(batch)
unbatched_res.append(pred)
unbatched_res = torch.cat(unbatched_res, dim=0)
assert torch.allclose(unbatched_res, batched_res, atol=1e-6)
def test_embed_gin_model_with_batching():
"""Check this runs without errors and that batching and no batching produce the same output."""
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1]
bs = list(range(2, len(data_list)+1))
params = itertools.product(bs, dims, dims)
torch.manual_seed(0)
for batch_size, batch_max_dim, model_max_dim in params:
if batch_max_dim > model_max_dim:
continue
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
model = EmbedGIN(atom_types=32, bond_types=4, out_size=3, num_layers=3, hidden=5)
# We use the model in eval mode to avoid problems with batch norm.
model.eval()
batched_res = []
for batch in data_loader:
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
batched_pred = model.forward(batch)
batched_res.append(batched_pred)
batched_res = torch.cat(batched_res, dim=0)
unbatched_res = []
for complex in data_list:
batch = ComplexBatch.from_complex_list([complex], max_dim=batch_max_dim)
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
pred = model.forward(batch)
unbatched_res.append(pred)
unbatched_res = torch.cat(unbatched_res, dim=0)
assert torch.allclose(unbatched_res, batched_res, atol=1e-6)
@pytest.mark.data
def test_zinc_sparse_cin0_model_with_batching_on_proteins():
"""Check this runs without errors and that batching and no batching produce the same output."""
dataset = load_dataset('PROTEINS', max_dim=2, fold=0, init_method='mean')
assert len(dataset) == 1113
split_idx = dataset.get_idx_split()
dataset = dataset[split_idx['valid']]
assert len(dataset) == 111
max_dim = 2
torch.manual_seed(0)
data_loader = DataLoader(dataset, batch_size=32, max_dim=max_dim)
model = EmbedSparseCIN(atom_types=64, bond_types=4, out_size=3, num_layers=3, hidden=5,
jump_mode='cat', max_dim=max_dim)
model.eval()
batched_res = {}
for batch in data_loader:
# Simulate no edge and two_cell features to test init layer
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
# ZincSparseCIN assumes features are unidimensional like in ZINC
batch.cochains[0].x = batch.cochains[0].x[:, :1]
batched_pred, res = model.forward(batch, include_partial=True)
for key in res:
if key not in batched_res:
batched_res[key] = []
batched_res[key].append(res[key])
for key in batched_res:
batched_res[key] = torch.cat(batched_res[key], dim=0)
unbatched_res = {}
for complex in dataset:
batch = ComplexBatch.from_complex_list([complex], max_dim=max_dim)
# Simulate no edge and two_cell features to test init layer
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
# ZincSparseCIN assumes features are unidimensional like in ZINC
batch.cochains[0].x = batch.cochains[0].x[:, :1]
pred, res = model.forward(batch, include_partial=True)
for key in res:
if key not in unbatched_res:
unbatched_res[key] = []
unbatched_res[key].append(res[key])
for key in unbatched_res:
unbatched_res[key] = torch.cat(unbatched_res[key], dim=0)
for key in set(list(unbatched_res.keys()) + list(batched_res.keys())):
assert torch.allclose(unbatched_res[key], batched_res[key], atol=1e-6), (
print(key, torch.max(torch.abs(unbatched_res[key] - batched_res[key]))))
def test_ogb_sparse_cin0_model_with_batching():
"""Check this runs without errors and that batching and no batching produce the same output."""
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1, 2]
bs = list(range(2, len(data_list)+1))
params = itertools.product(bs, dims, dims)
torch.manual_seed(0)
for batch_size, batch_max_dim, model_max_dim in params:
if batch_max_dim > model_max_dim:
continue
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
model = OGBEmbedSparseCIN(out_size=3, num_layers=3, hidden=5,
jump_mode=None, max_dim=model_max_dim)
# We use the model in eval mode to avoid problems with batch norm.
model.eval()
batched_res = {}
for batch in data_loader:
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
batched_pred, res = model.forward(batch, include_partial=True)
for key in res:
if key not in batched_res:
batched_res[key] = []
batched_res[key].append(res[key])
for key in batched_res:
batched_res[key] = torch.cat(batched_res[key], dim=0)
unbatched_res = {}
for complex in data_list:
batch = ComplexBatch.from_complex_list([complex], max_dim=batch_max_dim)
# Simulate no edge and two_cell features to test init layer
if len(batch.cochains) >= 2:
batch.cochains[1].x = None
if len(batch.cochains) == 3:
batch.cochains[2].x = None
pred, res = model.forward(batch, include_partial=True)
for key in res:
if key not in unbatched_res:
unbatched_res[key] = []
unbatched_res[key].append(res[key])
for key in unbatched_res:
unbatched_res[key] = torch.cat(unbatched_res[key], dim=0)
for key in set(list(unbatched_res.keys()) + list(batched_res.keys())):
assert torch.allclose(unbatched_res[key], batched_res[key], atol=1e-6), (
print(key, torch.max(torch.abs(unbatched_res[key] - batched_res[key]))))
| [
11748,
28034,
198,
11748,
340,
861,
10141,
198,
11748,
12972,
9288,
198,
198,
6738,
1366,
13,
41887,
1330,
19157,
33,
963,
198,
6738,
1366,
13,
67,
13513,
62,
41887,
274,
1330,
651,
62,
33407,
62,
41887,
62,
4868,
198,
6738,
29034,
13... | 2.148584 | 5,189 |
'''
A container class for CSC sparse matrices. Note that sparse matrices can only
hold floats or integers; string and datetime data will need to be cleaned.
Currently, the class accounts for a datetime hours column.
Constructor is called with an exclude_before (int), which tells
the class to move columns before exclude_before into the 'excluded' attribute.
This is currently set to 2, such that the constructor moves cell_id and
date to the excluded columns.
Attributes:
- data: a csc matrix
- columns: the column names, in order
'''
import csv
import numpy as np
import datetime
from scipy import sparse
| [
7061,
6,
198,
32,
9290,
1398,
329,
327,
6173,
29877,
2603,
45977,
13,
5740,
326,
29877,
2603,
45977,
460,
691,
198,
2946,
36016,
393,
37014,
26,
4731,
290,
4818,
8079,
1366,
481,
761,
284,
307,
20750,
13,
198,
21327,
11,
262,
1398,
... | 3.832298 | 161 |
# https://leetcode.com/problems/divide-two-integers/
import unittest
MINIMUM_NUMBER_VALUE = -2**31
MAXIMUM_NUMBER_VALUE = 2**31 - 1
MAXIMUM_POWER_NUMBER = 31
if __name__ == "__main__":
main() | [
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
7146,
485,
12,
11545,
12,
18908,
364,
14,
198,
198,
11748,
555,
715,
395,
198,
198,
23678,
3955,
5883,
62,
41359,
13246,
62,
39488,
796,
532,
17,
1174,
3132,
198,
22921,
... | 2.298851 | 87 |
import sys
from colt import Colt
import numpy as np
from ase.units import Hartree, mol, kJ
#
from .qm_base import WriteABC, ReadABC
from ..elements import ATOM_SYM
| [
11748,
25064,
198,
6738,
951,
83,
1330,
37360,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
257,
325,
13,
41667,
1330,
11345,
631,
11,
18605,
11,
479,
41,
198,
2,
198,
6738,
764,
80,
76,
62,
8692,
1330,
19430,
24694,
11,
4149,
246... | 2.982143 | 56 |
############################################################################
# This Python file is part of PyFEM, the code that accompanies the book: #
# #
# 'Non-Linear Finite Element Analysis of Solids and Structures' #
# R. de Borst, M.A. Crisfield, J.J.C. Remmers and C.V. Verhoosel #
# John Wiley and Sons, 2012, ISBN 978-0470666449 #
# #
# The code is written by J.J.C. Remmers, C.V. Verhoosel and R. de Borst. #
# #
# The latest stable version can be downloaded from the web-site: #
# http://www.wiley.com/go/deborst #
# #
# A github repository, with the most up to date version of the code, #
# can be found here: #
# https://github.com/jjcremmers/PyFEM #
# #
# The code is open source and intended for educational and scientific #
# purposes only. If you use PyFEM in your research, the developers would #
# be grateful if you could cite the book. #
# #
# Disclaimer: #
# The authors reserve all rights but do not guarantee that the code is #
# free from errors. Furthermore, the authors shall not be liable in any #
# event caused by the use of the program. #
############################################################################
from .Element import Element
from pyfem.util.shapeFunctions import getElemShapeData
from pyfem.util.kinematics import Kinematics
from numpy import zeros, dot, outer, ones, eye, sqrt,hstack
from scipy.linalg import norm
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
| [
29113,
29113,
7804,
4242,
201,
198,
2,
220,
770,
11361,
2393,
318,
636,
286,
9485,
37,
3620,
11,
262,
2438,
326,
48159,
262,
1492,
25,
220,
1303,
201,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.338735 | 1,296 |
s = """
.a.fy
integer: a=1, b=1, c=1, n=1, m=1
"""
from fython.test import *
writer(s)
w = load('.a', force=1, release=1, verbose=0, run_main=0)
# print(open(w.module.url.fortran_path, 'r').read())
| [
82,
796,
37227,
198,
13,
64,
13,
24928,
198,
197,
41433,
25,
257,
28,
16,
11,
275,
28,
16,
11,
269,
28,
16,
11,
299,
28,
16,
11,
285,
28,
16,
198,
37811,
198,
198,
6738,
277,
7535,
13,
9288,
1330,
1635,
198,
198,
16002,
7,
8... | 2.050505 | 99 |
from src import common
required_parameter_list = ["name", "number"]
| [
6738,
12351,
1330,
2219,
198,
198,
35827,
62,
17143,
2357,
62,
4868,
796,
14631,
3672,
1600,
366,
17618,
8973,
628,
628,
628,
628,
628,
628,
628,
198
] | 3.074074 | 27 |
import pandas as pd
import numpy as np
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
from defaults_2 import default_config
import os | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1657,
70,
20475,
355,
300,
22296,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
1612,
62,
16485,
1144,
62,
18224,
198,
6738,
26235,
62,
17,
1330,
4277,
... | 3.391304 | 46 |
from taskmanager import TaskManager
#from playsound import playsound
t = TaskManager()
# youtube : Passed
"""
t.get_youtube_audio("https://www.youtube.com/watch?v=svT7uKdNphU")
t.play("play")"""
# google/wikipedia api : Passed
query = "do you know who is donald trump"
if 'who' in query:
gr = query[query.index('who')+4:]
elif 'whom' in query:
gr = query[query.index('whom')+4:]
elif 'what' in query:
gr = query[query.index('what')+4:]
elif 'which' in query:
gr = query[query.index('which')+4:]
elif 'how' in query:
gr = query[query.index('how')+4:]
elif 'where' in query:
gr = query[query.index('where')+4:]
if gr == None:
print(t.wiki(gr))
else:
print(t.google(gr))
#joke: Passed
#print(t.joke())
#print(t.memorise("what is my favourite colour", "my favourite colour is lime"))
"""
desc, temp, humid = t.weather('f317f1f507f2d9f0a8aa1316d86507b8', 'delhi')
print(f"Desc: {desc}, temp: {temp}, humid: {humid}")
"""
#print(t.parse_youtube_query('Unstopable tony junior'))
#playsound('music/Tony Junior NIGHT MOVES ft Lasse Meling - Unstoppable (Lyric Video).mp4')
#print(t.news(2)) | [
6738,
4876,
37153,
1330,
15941,
13511,
201,
198,
201,
198,
2,
6738,
5341,
633,
1330,
5341,
633,
201,
198,
201,
198,
83,
796,
15941,
13511,
3419,
201,
198,
201,
198,
2,
35116,
1058,
23228,
201,
198,
37811,
201,
198,
83,
13,
1136,
62,... | 2.323529 | 510 |
"""Unicycle go-to-pose (Hybrid) Example.
TODO: UPDATE DESCRIPTION
Written by: The Robotarium Team
Modified by: Zahi Kakish (zmk5)
"""
import time
import numpy as np
from robotarium_node.robotarium import Robotarium
from robotarium_node.utilities.transformations import *
from robotarium_node.utilities.barrier_certificates import *
from robotarium_node.utilities.misc import *
from robotarium_node.utilities.controllers import *
def main() -> None:
"""Run script."""
# Instantiate Robotarium object
N = 5
initial_conditions = np.array(
np.mat('1 0.5 -0.5 0 0.28; 0.8 -0.3 -0.75 0.1 0.34; 0 0 0 0 0'))
r = Robotarium(
number_of_robots=N, show_figure=True,
initial_conditions=initial_conditions, sim_in_real_time=True)
# Define goal points by removing orientation from poses
goal_points = generate_initial_conditions(N)
# Create unicycle pose controller
unicycle_pose_controller = create_hybrid_unicycle_pose_controller()
# Create barrier certificates to avoid collision
uni_barrier_cert = create_unicycle_barrier_certificate()
# define x initially
x = r.get_poses()
r.step()
# While the number of robots at the required poses is less
# than N...
while np.size(at_pose(x, goal_points)) != N:
# Get poses of agents
x = r.get_poses()
# Create unicycle control inputs
dxu = unicycle_pose_controller(x, goal_points)
# Create safe control inputs (i.e., no collisions)
dxu = uni_barrier_cert(dxu, x)
# Set the velocities
r.set_velocities(np.arange(N), dxu)
# Iterate the simulation
r.step()
# Call at end of script to print debug information and for your script to
# run on the Robotarium server properly.
r.call_at_scripts_end()
if __name__ == '__main__':
main()
| [
37811,
3118,
35298,
467,
12,
1462,
12,
3455,
357,
21217,
10236,
8,
17934,
13,
198,
198,
51,
3727,
46,
25,
35717,
22196,
40165,
198,
198,
25354,
416,
25,
383,
16071,
17756,
4816,
198,
5841,
1431,
416,
25,
1168,
32810,
31250,
680,
357,
... | 2.651494 | 703 |
import logging
from .. import *
from ..i2c_master import I2CMasterApplet
| [
11748,
18931,
198,
198,
6738,
11485,
1330,
1635,
198,
6738,
11485,
72,
17,
66,
62,
9866,
1330,
314,
17,
34,
18254,
4677,
1616,
628,
198
] | 3.04 | 25 |
import os
import nimp.sys.platform
class XboxOne(nimp.sys.platform.Platform):
''' XboxOne platform description '''
| [
198,
11748,
28686,
198,
11748,
299,
11011,
13,
17597,
13,
24254,
198,
198,
4871,
9445,
3198,
7,
77,
11011,
13,
17597,
13,
24254,
13,
37148,
2599,
198,
220,
220,
220,
705,
7061,
9445,
3198,
3859,
6764,
705,
7061,
198
] | 3.102564 | 39 |
NEWS_API_TOP_HEADLINES_SOURCES = 'https://newsapi.org/v2/top-headlines/sources?apiKey={}'
NEWS_API_TOP_HEADLINES_SOURCE = 'https://newsapi.org/v2/top-headlines/sources={}?apiKey={}'
NEWS_API_EVERYTHING = 'https://newsapi.org/v2/everything?q={}&apiKey={}'
NEWS_API_KEY = 'e4886eff42244ffaabebbfb0a9bbd892' | [
49597,
62,
17614,
62,
35222,
62,
37682,
34509,
1546,
62,
50,
2606,
7397,
1546,
796,
705,
5450,
1378,
10827,
15042,
13,
2398,
14,
85,
17,
14,
4852,
12,
2256,
6615,
14,
82,
2203,
30,
15042,
9218,
34758,
92,
6,
198,
49597,
62,
17614,
... | 2.202899 | 138 |
x = (C(), D(), E())
a, b, c = x
x[0].bar() # NO bar
x[1].baz() # NO baz
x[2].foo() # baz
a.bar() # NO bar
b.baz() # NO baz
c.foo() # NO foo
| [
628,
628,
198,
198,
87,
796,
357,
34,
22784,
360,
22784,
412,
28955,
198,
198,
64,
11,
275,
11,
269,
796,
2124,
198,
198,
87,
58,
15,
4083,
5657,
3419,
220,
1303,
8005,
2318,
198,
87,
58,
16,
4083,
65,
1031,
3419,
220,
1303,
800... | 1.741573 | 89 |
from logger_builder import LoggerBuilder
from logger_builder.formatter import create_basic_formatter
from logger_builder.handler import StreamHandler
import pandas as pd
from sklearn.datasets import load_diabetes
from features_fixer import FeaturesFixer
from features_fixer.reducer import PCA
from features_fixer.scaler import Standardizer
if __name__ == '__main__':
# Initialize logging
formatter = create_basic_formatter()
stream_handler = StreamHandler(formatter)
handlers = [stream_handler]
logger_builder = LoggerBuilder(handlers)
logger = logger_builder.create_logger('FeaturesFixer')
# Initialize a scaler and dimensionality reducing objects
scaler = Standardizer()
reducer = PCA()
ff = FeaturesFixer(logger, scaler=scaler, reducer=reducer)
# Example on the diabetes dataset
diabetes = load_diabetes()
df = pd.DataFrame(diabetes['data'], columns=diabetes['feature_names'])
df = ff.scale_features(df)
df = ff.reduce_features_number(df)
logger.info(df.head()) | [
6738,
49706,
62,
38272,
1330,
5972,
1362,
32875,
198,
6738,
49706,
62,
38272,
13,
687,
1436,
1330,
2251,
62,
35487,
62,
687,
1436,
198,
6738,
49706,
62,
38272,
13,
30281,
1330,
13860,
25060,
198,
11748,
19798,
292,
355,
279,
67,
198,
... | 3.108108 | 333 |
import tensorflow as tf
def get_activation_fn(type='relu'):
"""
Return tensorflow activation function given string name.
Args:
type:
Returns:
"""
if type == 'relu':
return tf.nn.relu
elif type == 'elu':
return tf.nn.elu
elif type == 'tanh':
return tf.nn.tanh
elif type == 'sigmoid':
return tf.nn.sigmoid
elif type == 'softplus':
return tf.nn.softplus
elif type == None:
return None
else:
raise Exception("Activation function is not supported.")
def linear(input, output_size, activation_fn=None, batch_norm=False, is_training=True):
"""
Creates a linear layer.
Args:
input:
output_size:
activation_fn : tensorflow activation function such as tf.nn.relu, tf.nn.sigmoid, etc.
batch_norm (bool): whether use batch normalization layer or not.
is_training (bool): whether in training mode or not.
Returns:
"""
dense_layer = tf.layers.dense(input, output_size)
if batch_norm == True and activation_fn is not None:
dense_layer = tf.layers.batch_normalization(dense_layer, axis=1, training=is_training)
if isinstance(activation_fn, str):
activation_fn = get_activation_fn(activation_fn)
if activation_fn is not None:
dense_layer = activation_fn(dense_layer)
return dense_layer
def fully_connected_layer(input, is_training=True, **kwargs):
"""
Creates fully connected layers.
Args:
input:
is_training (bool): whether in training mode or not.
**kwargs: `size`, `activation_fn`, `num_layers`
Returns:
"""
activation_fn = get_activation_fn(kwargs.get('activation_fn', 'relu'))
num_layers = kwargs.get('num_layers', 1)
hidden_size = kwargs.get('size', 256)
use_batch_norm = kwargs.get('use_batch_norm', False)
hidden_layer = input
for i in range(num_layers):
hidden_layer = linear(hidden_layer, hidden_size, activation_fn=activation_fn, batch_norm=use_batch_norm,
is_training=is_training)
return hidden_layer
def get_reduce_loss_func(type="sum_mean", seq_len=None):
"""
Args:
loss: expects [batch_size, loss_size] or [batch_size, sequence_length, loss_size].
type: "sum_mean", "mean", "sum".
Returns:
"""
def reduce_sum_mean(loss):
"""
Average batch loss. First calculates per sample loss by summing over the second and third dimensions and then
takes the average.
"""
rank = len(loss.get_shape())
if rank > 3 or rank < 2:
raise Exception("Loss rank must be 2 or 3.")
if rank == 3:
return tf.reduce_mean(tf.reduce_sum(loss, axis=[1,2]))
elif rank == 2:
return tf.reduce_mean(tf.reduce_sum(loss, axis=[1]))
def reduce_mean_per_step(loss):
"""
First calculates average loss per sample (loss per step), and then takes average over samples. Loss per step
requires sequence length. If all samples have the same sequence length then this is equivalent to `mean`.
"""
rank = len(loss.get_shape())
if rank > 3 or rank < 2:
raise Exception("Loss rank must be 2 or 3.")
# Calculate loss per step.
if rank == 3:
step_loss_per_sample = tf.reduce_sum(loss, axis=[1, 2])/tf.cast(seq_len, tf.float32)
elif rank == 2:
step_loss_per_sample = tf.reduce_sum(loss, axis=[1])/tf.cast(seq_len, tf.float32)
# Calculate average (per step) sample loss.
return tf.reduce_mean(step_loss_per_sample)
if type == "sum_mean":
return reduce_sum_mean
elif type == "sum":
return tf.reduce_sum
elif type == "mean":
return tf.reduce_mean
elif type == "mean_per_step":
return reduce_mean_per_step
def get_rnn_cell(**kwargs):
"""
Creates an rnn cell object.
Args:
**kwargs: must contain `cell_type`, `size` and `num_layers` key-value pairs. `dropout_keep_prob` is optional.
`dropout_keep_prob` can be a list of ratios where each cell has different dropout ratio in a stacked
architecture. If it is a scalar value, then the whole architecture (either a single cell or stacked cell)
has one DropoutWrapper.
Returns:
"""
cell_type = kwargs['cell_type']
size = kwargs['size']
num_layers = kwargs['num_layers']
dropout_keep_prob = kwargs.get('dropout_keep_prob', 1.0)
separate_dropout = False
if isinstance(dropout_keep_prob, list) and len(dropout_keep_prob) == num_layers:
separate_dropout = True
if cell_type.lower() == 'LSTM'.lower():
rnn_cell_constructor = tf.nn.rnn_cell.LSTMCell
elif cell_type.lower() == 'GRU'.lower():
rnn_cell_constructor = tf.nn.rnn_cell.GRUCell
elif cell_type.lower() == 'LayerNormBasicLSTMCell'.lower():
rnn_cell_constructor = tf.contrib.rnn.LayerNormBasicLSTMCell
else:
raise Exception("Unsupported RNN Cell.")
rnn_cells = []
for i in range(num_layers):
cell = rnn_cell_constructor(size)
if separate_dropout:
cell = tf.contrib.rnn.DropoutWrapper(cell,
input_keep_prob=dropout_keep_prob[i],
output_keep_prob=dropout_keep_prob,
state_keep_prob=1,
dtype=tf.float32,
seed=1)
rnn_cells.append(cell)
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell(cells=rnn_cells, state_is_tuple=True)
else:
cell = rnn_cells[0]
if separate_dropout and dropout_keep_prob < 1.0:
cell = tf.contrib.rnn.DropoutWrapper(cell,
input_keep_prob=dropout_keep_prob,
output_keep_prob=dropout_keep_prob,
state_keep_prob=1,
dtype=tf.float32,
seed=1)
return cell
| [
11748,
11192,
273,
11125,
355,
48700,
198,
198,
4299,
651,
62,
48545,
62,
22184,
7,
4906,
11639,
260,
2290,
6,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
8229,
11192,
273,
11125,
14916,
2163,
1813,
4731,
1438,
13,
628,
220,
... | 2.12555 | 2,955 |
if __name__ == '__main__':
T = int(input())
for x in range(1, T + 1):
l1 = input().split()
l2 = input().split()
N = int(l1[0])
K = int(l1[1])
V = []
for v in l2:
V.append(int(v))
print('Case #', x, ':', ' ', '{0:.6f}'.format(E(K, V)), sep='')
| [
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
309,
796,
493,
7,
15414,
28955,
198,
220,
220,
220,
329,
2124,
287,
2837,
7,
16,
11,
309,
1343,
352,
2599,
198,
220,
220,
220,
220,
220,
220,
... | 1.707447 | 188 |
#Copyright 2007-2009 WebDriver committers
#Copyright 2007-2009 Google Inc.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from marionette_test import MarionetteTestCase
| [
2,
15269,
4343,
12,
10531,
5313,
32103,
4589,
1010,
198,
2,
15269,
4343,
12,
10531,
3012,
3457,
13,
198,
2,
198,
2,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
5832,
743,
407,
... | 3.698324 | 179 |
# (c) Shrimadhav U K
#
# This file is part of @UniBorg
#
# @UniBorg is free software; you cannot redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# @UniBorg is not distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from telethon import custom, events
from telethon.tl.types import Chat
from telethon.utils import get_display_name
from userbot.Config import Var
@borg.on(
events.NewMessage(
incoming=True,
blacklist_chats=Var.UB_BLACK_LIST_CHAT,
func=lambda e: (e.mentioned),
)
)
| [
2,
357,
66,
8,
911,
3036,
24411,
615,
471,
509,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
2488,
3118,
72,
33,
2398,
198,
2,
198,
2,
2488,
3118,
72,
33,
2398,
318,
1479,
3788,
26,
345,
2314,
17678,
4163,
340,
290,
14,
273,
1309... | 3.094891 | 274 |
import numpy as np
import pytest
from sktime.datasets import load_gunpoint
from sktime.transformers.matrix_profile import MatrixProfile
| [
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
6738,
1341,
2435,
13,
19608,
292,
1039,
1330,
3440,
62,
7145,
4122,
198,
6738,
1341,
2435,
13,
35636,
364,
13,
6759,
8609,
62,
13317,
1330,
24936,
37046,
198
] | 3.578947 | 38 |
import csv
import sys
import re
sys.path.append('/Users/jkc023/Documents/homecode/fantasycalc-utils')
from services.MongoDB import MongoDB
import numpy as np
# if player has increased or decreased in value over both of the last weeks, return that value
# if player went up and down return 0
season_rankings = load_season(7)
print('created rankings')
# connect to db and insert rankings
db = MongoDB()
db.connect('localhost')
print('connected to db')
db.insert_rankings(season_rankings)
print('finished inserting rankings')
# print(week_rankings)
| [
11748,
269,
21370,
198,
11748,
25064,
198,
11748,
302,
198,
17597,
13,
6978,
13,
33295,
10786,
14,
14490,
14,
73,
74,
66,
45310,
14,
38354,
14,
11195,
8189,
14,
69,
34921,
9948,
66,
12,
26791,
11537,
198,
6738,
2594,
13,
44,
25162,
... | 3.375758 | 165 |
input = """
p(X) :- q(Y), r(Z), X=Y+Z.
q(X) :- p(X).
q(0).
r(5).
"""
output = """
p(X) :- q(Y), r(Z), X=Y+Z.
q(X) :- p(X).
q(0).
r(5).
"""
| [
15414,
796,
37227,
198,
79,
7,
55,
8,
1058,
12,
10662,
7,
56,
828,
374,
7,
57,
828,
1395,
28,
56,
10,
57,
13,
201,
198,
80,
7,
55,
8,
1058,
12,
279,
7,
55,
737,
201,
198,
80,
7,
15,
737,
201,
198,
81,
7,
20,
737,
201,
... | 1.413462 | 104 |
# Copyright DST Group. Licensed under the MIT license.
from CybORG.Simulator.Entity import Entity
# __init__(): Constructor for local user groups, assigns group name, group ID(gid) and lists of users within group
# get_state(self): returns dictionary of the group and group id
# remove_user(self, user): if user is found within list of group, removes them from the list.
| [
2,
15069,
360,
2257,
4912,
13,
49962,
739,
262,
17168,
5964,
13,
198,
6738,
5934,
65,
1581,
38,
13,
8890,
8927,
13,
32398,
1330,
20885,
198,
198,
2,
11593,
15003,
834,
33529,
28407,
273,
329,
1957,
2836,
2628,
11,
46974,
1448,
1438,
... | 3.845361 | 97 |
# Copyright 2016-2017 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Autnum policy module for prngmgr."""
import re
_as_regex = re.compile(r'^AS(\d+)$')
_asdot_regex = re.compile(r'^(\d+)\.(\d+)$')
class AutNum(object):
"""AutNum policy object class."""
def __init__(self, asn=None):
"""Init new AutNum instance."""
try:
m = _as_regex.match(asn)
if m:
asn = m.group(1)
m = _asdot_regex.match(asn)
if m:
asn = int(m.group(1)) * 2**16 + int(m.group(2))
except TypeError:
pass
asn = int(asn)
if 0 < asn < 2 ** 32:
self._autnum = asn
else:
raise ValueError("ASN must be a postive 32-bit integer")
@property
def autnum(self):
"""Get ASN."""
return self._autnum
@property
def is_4byte(self):
"""Check whether the ASN is a 4 byte ASN."""
if self.autnum < 2**16:
return False
else:
return True
def __str__(self):
"""Render as string."""
return str(self.autnum)
def __unicode__(self):
"""Render as unicode."""
return self.__str__()
| [
2,
15069,
1584,
12,
5539,
5521,
25119,
14620,
357,
47,
774,
8,
12052,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
383,
10154,
286,
428,
2393,
389,
11971,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
198,
2,
357,
1169,
366,
34156... | 2.338501 | 774 |
#coding=utf-8
from sampler import outputSource
from sampler_uniq_with_md5 import preprocess_with_uniq_test
class unic_diff(object):
"""docstring for unic_diff"""
if __name__ == '__main__':
unic_differ = unic_diff()
outputSource(*preprocess_with_uniq_test('example.gif', unic_differ.check_img)) | [
2,
66,
7656,
28,
40477,
12,
23,
198,
198,
6738,
6072,
20053,
1330,
5072,
7416,
198,
6738,
6072,
20053,
62,
403,
25011,
62,
4480,
62,
9132,
20,
1330,
662,
14681,
62,
4480,
62,
403,
25011,
62,
9288,
198,
198,
4871,
28000,
62,
26069,
... | 2.82243 | 107 |
# Useful doc on Python magic methods:
# https://rszalski.github.io/magicmethods/
| [
2,
49511,
2205,
319,
11361,
5536,
5050,
25,
198,
2,
3740,
1378,
3808,
89,
874,
4106,
13,
12567,
13,
952,
14,
32707,
24396,
82,
14,
628
] | 3.153846 | 26 |
# Specialization: Google IT Automation with Python
# Course 02: Using Python to Interact with the Operating System
# Week 2 Module Part 2 - Practice Quiz
# Student: Shawn Solomon
# Learning Platform: Coursera.org
# Scripting examples encountered during the Module Part 2 Practice Quiz:
# 01. The create_python_script function creates a new python script in the current working directory,
# adds the line of comments to it declared by the 'comments' variable, and returns the size of the
# new file. Fill in the gaps to create a script called "program.py".
# def create_python_script(filename):
# comments = "# Start of a new Python program"
# with ___:
# filesize = ___
# return(filesize)
#
# print(create_python_script("program.py"))
import os
print(create_python_script("testwrite.py"))
# 02. The new_directory function creates a new directory inside the current working directory, then creates
# a new empty file inside the new directory, and returns the list of files in that directory. Fill in the
# gaps to create a file "script.py" in the directory "PythonPrograms".
# import os
#
# def new_directory(directory, filename):
# # Before creating a new directory, check to see if it already exists
# if os.path.isdir(directory) == False:
# ___
#
# # Create the new file inside of the new directory
# os.chdir(___)
# with open (___) as file:
# pass
#
# # Return the list of files in the new directory
# return ___
#
# print(new_directory("PythonPrograms", "script.py"))
import os
print(new_directory("PythonPrograms", "script.py"))
# 04. The file_date function creates a new file in the current working directory, checks the date that the
# file was modified, and returns just the date portion of the timestamp in the format of yyyy-mm-dd. Fill in
# the gaps to create a file called "newfile.txt" and check the date that it was modified.
# import os
# import datetime
#
# def file_date(filename):
# # Create the file in the current directory
# ___
# timestamp = ___
# # Convert the timestamp into a readable format, then into a string
# ___
# # Return just the date portion
# # Hint: how many characters are in “yyyy-mm-dd”?
# return ("{___}".format(___))
#
# print(file_date("newfile.txt"))
# # Should be today's date in the format of yyyy-mm-dd
import os
import datetime
print(file_date("newfile.txt"))
# Should be today's date in the format of yyyy-mm-dd
# 05. The parent_directory function returns the name of the directory that's located just above the current
# working directory. Remember that '..' is a relative path alias that means "go up to the parent directory".
# Fill in the gaps to complete this function.
# import os
# def parent_directory():
# # Create a relative path to the parent
# # of the current working directory
# relative_parent = os.path.join(___, ___)
# # Return the absolute path of the parent directory
# return ___
# print(parent_directory())
import os
print(parent_directory()) | [
2,
6093,
1634,
25,
3012,
7283,
17406,
341,
351,
11361,
201,
198,
2,
20537,
7816,
25,
8554,
11361,
284,
4225,
529,
351,
262,
24850,
4482,
201,
198,
2,
6119,
362,
19937,
2142,
362,
532,
19939,
2264,
528,
201,
198,
2,
13613,
25,
25225,... | 3.149239 | 985 |
import dxrip
import sqlite3
| [
11748,
44332,
5528,
198,
11748,
44161,
578,
18,
198
] | 3.111111 | 9 |
from django.contrib import admin
from django.urls import reverse
from django.utils.safestring import mark_safe
from .models import Message
@admin.register(Message) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
13,
26791,
13,
49585,
395,
1806,
1330,
1317,
62,
21230,
198,
198,
6738,
764,
27530,
1330,
16000,
198,
198,
31,
... | 3.510638 | 47 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU embedding APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import math
import re
from typing import Optional
import six
from tensorflow.core.protobuf.tpu import optimization_parameters_pb2
from tensorflow.core.protobuf.tpu import tpu_embedding_configuration_pb2 as elc
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.util.tf_export import tf_export
TRAINING = elc.TPUEmbeddingConfiguration.TRAINING
INFERENCE = elc.TPUEmbeddingConfiguration.INFERENCE
# TODO(shizhiw): a more future-proof way is to have optimization_parameter such
# as AdagradParameters etc instead of learning_rate.
class TableConfig(
collections.namedtuple('TableConfig', [
'vocabulary_size', 'dimension', 'initializer', 'combiner',
'hot_id_replication', 'learning_rate', 'learning_rate_fn',
'optimization_parameters',
])):
"""Embedding table configuration."""
def __new__(cls,
vocabulary_size,
dimension,
initializer=None,
combiner='mean',
hot_id_replication=False,
learning_rate=None,
learning_rate_fn=None,
optimization_parameters=None):
"""Embedding table configuration.
Args:
vocabulary_size: Number of vocabulary (/rows) in the table.
dimension: The embedding dimension.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and standard
deviation `1/sqrt(dimension)`.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn', 'sum' and None are
supported, with 'mean' the default. 'sqrtn' often achieves good
accuracy, in particular with bag-of-words columns. For more information,
see `tf.nn.embedding_lookup_sparse`. None is only valid for dense rather
than sparse tensors.
hot_id_replication: If true, enables hot id replication, which can make
embedding lookups faster if there are some hot rows in the table.
learning_rate: float, static learning rate for this table. If
learning_rate and learning_rate_fn are both `None`, static learning
rate as specified in local `optimization_parameters` will be used.
In case local `optimization_parameters` is `None`, global
`optimization_parameters` in `TPUEmbedding` constructor will be used.
`learning_rate_fn` must be `None` if `learning_rate` is not `None.
learning_rate_fn: string, use dynamic learning rate given by the function.
This function function will be passed the current global step. If
learning_rate and learning_rate_fn are both `None`, static
learning rate as specified in `optimization_parameters` is used.
`learning_rate` must be `None` if `learning_rate_fn` is not `None.
optimization_parameters: `AdagradParameters`, `AdamParameters`,
`Stochasticgradientdescentparameters`. Specifies table level optimizer.
If it's `None` global optimizer in `TPUEmbedding` constructor is used.
Returns:
`TableConfig`.
Raises:
ValueError: if `vocabulary_size` is not positive integer.
ValueError: if `dimension` is not positive integer.
ValueError: if `initializer` is specified and is not callable.
ValueError: if `combiner` is not supported.
ValueError: if `learning_rate` and `learning_rate_fn` are both not
`None`.
"""
if not isinstance(vocabulary_size, int) or vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size {}.'.format(vocabulary_size))
if not isinstance(dimension, int) or dimension < 1:
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
if combiner not in ('mean', 'sum', 'sqrtn', None):
raise ValueError('Invalid combiner {}'.format(combiner))
if learning_rate is not None and learning_rate_fn is not None:
raise ValueError('At most one of learning_rate and learning_rate_fn '
'can be None; got {} and {}'
.format(learning_rate, learning_rate_fn))
if optimization_parameters is not None:
if not isinstance(optimization_parameters, _OptimizationParameters):
raise ValueError('`optimization_parameters` must inherit from '
'`_OptimizationParameters`. '
'`type(optimization_parameters)`={}'.format(
type(optimization_parameters)))
return super(TableConfig,
cls).__new__(cls, vocabulary_size, dimension, initializer,
combiner, hot_id_replication, learning_rate,
learning_rate_fn, optimization_parameters)
class FeatureConfig(
collections.namedtuple(
'FeatureConfig',
['table_id', 'max_sequence_length', 'weight_key'])):
"""Feature configuration."""
def __new__(cls,
table_id,
max_sequence_length=0,
weight_key=None):
"""Feature configuration.
Args:
table_id: Which table the feature is uses for embedding lookups.
max_sequence_length: If positive, the feature is a sequence feature with
the corresponding maximum sequence length. If the sequence is longer
than this, it will be truncated. If 0, the feature is not a sequence
feature.
weight_key: If using weights for the combiner, this key specifies which
input feature contains the weights.
Returns:
`FeatureConfig`.
Raises:
ValueError: if `max_sequence_length` non-negative.
"""
if not isinstance(max_sequence_length, int) or max_sequence_length < 0:
raise ValueError('Invalid max_sequence_length {}.'.format(
max_sequence_length))
return super(FeatureConfig, cls).__new__(cls, table_id, max_sequence_length,
weight_key)
class EnqueueData(
collections.namedtuple(
'EnqueueData',
['embedding_indices', 'sample_indices', 'aggregation_weights'])):
"""Data to be enqueued through generate_enqueue_ops()."""
def __new__(cls,
embedding_indices,
sample_indices=None,
aggregation_weights=None):
"""Data to be enqueued through generate_enqueue_ops().
Args:
embedding_indices: A rank 1 Tensors, indices into the embedding tables. It
corresponds to sp_ids.values in embedding_lookup_sparse(). Both int32
and int64 are allowed and will be converted to int32 internally.
sample_indices: A rank 2 Tensors specifying the training example to which
the corresponding embedding_indices and aggregation_weights values
belong. It corresponds to sp_ids.indices in embedding_lookup_sparse().
If it is None, we assume each embedding_indices belongs to a different
sample. Both int32 and int64 are allowed and will be converted to int32
internally.
aggregation_weights: A rank 1 Tensors containing aggregation weights.
It corresponds to sp_weights.values in embedding_lookup_sparse(). If it
is None, we assume all weights are 1. Both float32 and float64 are
allowed and will be converted to float32 internally.
Returns:
An EnqueueData tuple.
"""
return super(EnqueueData, cls).__new__(cls, embedding_indices,
sample_indices, aggregation_weights)
@staticmethod
class RaggedEnqueueData(
collections.namedtuple(
'RaggedEnqueueData',
['embedding_indices', 'sample_splits', 'aggregation_weights'])):
"""RaggedTensor Data to be enqueued through generate_enqueue_ops()."""
def __new__(cls,
embedding_indices,
sample_splits=None,
aggregation_weights=None):
"""Data to be enqueued through generate_enqueue_ops().
Args:
embedding_indices: A rank 1 Tensor, indices into the embedding tables. It
corresponds to ids.values in embedding_lookup(), when ids is a
RaggedTensor. Both int32 and int64 are allowed and will be converted to
int32 internally.
sample_splits: A rank 1 Tensor specifying the break points for splitting
embedding_indices and aggregation_weights into rows. It corresponds to
ids.row_splits in embedding_lookup(), when ids is a RaggedTensor. Both
int32 and int64 are allowed and will be converted to int32 internally.
aggregation_weights: A rank 1 Tensor containing per training example
aggregation weights. It corresponds to the values field of a
RaggedTensor with the same row_splits as ids in embedding_lookup(), when
ids is a RaggedTensor.
Returns:
An RaggedEnqueueData tuple.
"""
return super(RaggedEnqueueData,
cls).__new__(cls, embedding_indices, sample_splits,
aggregation_weights)
@staticmethod
def get_enqueue_datas_list_from_sparse_tensors_list(sp_tensors_list):
"""Convenient function for generate_enqueue_ops().
Args:
sp_tensors_list: a list of dictionary mapping from string of feature names
to SparseTensor. Each dictionary is for one TPU core. Dictionaries for the
same host should be contiguous on the list.
Returns:
enqueue_datas_list: a list of dictionary mapping from string
of feature names to EnqueueData. Each dictionary is for one
TPU core. Dictionaries for the same host should be contiguous
on the list.
"""
enqueue_datas_list = []
for sp_tensors in sp_tensors_list:
enqueue_datas = collections.OrderedDict(
(k, EnqueueData.from_sparse_tensor(v))
for k, v in six.iteritems(sp_tensors))
enqueue_datas_list.append(enqueue_datas)
return enqueue_datas_list
def get_enqueue_datas_list_from_ragged_tensors_list(rg_tensors_list):
"""Convenient function for generate_enqueue_ops().
Args:
rg_tensors_list: a list of dictionary mapping from string of feature names
to RaggedTensor. Each dictionary is for one TPU core. Dictionaries for the
same host should be contiguous on the list.
Returns:
enqueue_datas_list: a list of dictionary mapping from string
of feature names to RaggedEnqueueData. Each dictionary is for one
TPU core. Dictionaries for the same host should be contiguous
on the list.
"""
enqueue_datas_list = []
for rg_tensors in rg_tensors_list:
enqueue_datas = collections.OrderedDict(
(k, RaggedEnqueueData.from_ragged_tensor(v))
for k, v in six.iteritems(rg_tensors))
enqueue_datas_list.append(enqueue_datas)
return enqueue_datas_list
AdamSlotVariableNames = collections.namedtuple(
'AdamSlotVariableNames', ['m', 'v'])
AdagradSlotVariableName = collections.namedtuple(
'AdagradSlotVariableName', ['accumulator'])
MomentumSlotVariableName = collections.namedtuple('MomentumSlotVariableName',
['momenta'])
RMSPropSlotVariableNames = collections.namedtuple('RMSPropSlotVariableNames',
['ms', 'mom'])
ProximalAdagradSlotVariableName = collections.namedtuple(
'ProximalAdagradSlotVariableName', ['accumulator'])
FtrlSlotVariableName = collections.namedtuple(
'FtrlSlotVariableName', ['accumulator', 'linear'])
ProximalYogiSlotVariableNames = collections.namedtuple(
'ProximalYogiSlotVariableNames', ['v', 'm'])
AdamSlotVariables = collections.namedtuple(
'AdamSlotVariables', ['m', 'v'])
MomentumSlotVariable = collections.namedtuple('MomentumSlotVariable',
['momenta'])
RMSPropSlotVariables = collections.namedtuple('RMSPropSlotVariables',
['ms', 'mom'])
AdagradSlotVariable = collections.namedtuple(
'AdagradSlotVariable', ['accumulator'])
ProximalAdagradSlotVariable = collections.namedtuple(
'ProximalAdagradSlotVariable', ['accumulator'])
FtrlSlotVariable = collections.namedtuple(
'FtrlSlotVariable', ['accumulator', 'linear'])
ProximalYogiSlotVariables = collections.namedtuple('ProximalYogiSlotVariables',
['v', 'm'])
VariablesAndOps = collections.namedtuple(
'VariablesAndOps',
['embedding_variables_by_table', 'slot_variables_by_table',
'load_ops', 'retrieve_ops']
)
class _OptimizationParameters(object):
"""Parameters common to all optimizations."""
@tf_export(v1=['tpu.experimental.AdagradParameters'])
class AdagradParameters(_OptimizationParameters):
"""Optimization parameters for Adagrad with TPU embeddings.
Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the
`optimization_parameters` argument to set the optimizer and its parameters.
See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec`
for more details.
```
estimator = tf.estimator.tpu.TPUEstimator(
...
embedding_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
...
optimization_parameters=tf.tpu.experimental.AdagradParameters(0.1),
...))
```
"""
def __init__(
self,
learning_rate: float,
initial_accumulator: float = 0.1,
use_gradient_accumulation: bool = True,
clip_weight_min: Optional[float] = None,
clip_weight_max: Optional[float] = None,
weight_decay_factor: Optional[float] = None,
multiply_weight_decay_factor_by_learning_rate: Optional[bool] = None,
clip_gradient_min: Optional[float] = None,
clip_gradient_max: Optional[float] = None,
):
"""Optimization parameters for Adagrad.
Args:
learning_rate: used for updating embedding table.
initial_accumulator: initial accumulator for Adagrad.
use_gradient_accumulation: setting this to `False` makes embedding
gradients calculation less accurate but faster. Please see
`optimization_parameters.proto` for details.
for details.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
weight_decay_factor: amount of weight decay to apply; None means that the
weights are not decayed.
multiply_weight_decay_factor_by_learning_rate: if true,
`weight_decay_factor` is multiplied by the current learning rate.
clip_gradient_min: the minimum value to clip by; None means -infinity.
clip_gradient_max: the maximum value to clip by; None means +infinity.
"""
super(AdagradParameters, self).__init__(
learning_rate=learning_rate,
use_gradient_accumulation=use_gradient_accumulation,
clip_weight_min=clip_weight_min,
clip_weight_max=clip_weight_max,
weight_decay_factor=weight_decay_factor,
multiply_weight_decay_factor_by_learning_rate=(
multiply_weight_decay_factor_by_learning_rate),
clip_gradient_min=clip_gradient_min,
clip_gradient_max=clip_gradient_max,
)
if initial_accumulator <= 0:
raise ValueError('Adagrad initial_accumulator must be positive')
self.initial_accumulator = initial_accumulator
class ProximalAdagradParameters(_OptimizationParameters):
"""Optimization parameters for ProximalAdagrad with TPU embeddings.
Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the
`optimization_parameters` argument to set the optimizer and its parameters.
See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec`
for more details.
"""
def __init__(
self,
learning_rate: float,
initial_accumulator: float = 0.1,
l1_regularization_strength: float = 0.0,
l2_regularization_strength: float = 0.0,
use_gradient_accumulation: bool = True,
clip_weight_min: Optional[float] = None,
clip_weight_max: Optional[float] = None,
weight_decay_factor: Optional[float] = None,
multiply_weight_decay_factor_by_learning_rate: Optional[bool] = None,
clip_gradient_min: Optional[float] = None,
clip_gradient_max: Optional[float] = None,
):
"""Optimization parameters for Adagrad.
Args:
learning_rate: used for updating embedding table.
initial_accumulator: initial accumulator for Adagrad.
l1_regularization_strength: A float value, must be greater than or equal
to zero.
l2_regularization_strength: A float value, must be greater than or equal
to zero.
use_gradient_accumulation: setting this to `False` makes embedding
gradients calculation less accurate but faster. Please see
`optimization_parameters.proto` for details. for details.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
weight_decay_factor: amount of weight decay to apply; None means that the
weights are not decayed.
multiply_weight_decay_factor_by_learning_rate: if true,
`weight_decay_factor` is multiplied by the current learning rate.
clip_gradient_min: the minimum value to clip by; None means -infinity.
clip_gradient_max: the maximum value to clip by; None means +infinity.
"""
super(ProximalAdagradParameters, self).__init__(
learning_rate=learning_rate,
use_gradient_accumulation=use_gradient_accumulation,
clip_weight_min=clip_weight_min,
clip_weight_max=clip_weight_max,
weight_decay_factor=weight_decay_factor,
multiply_weight_decay_factor_by_learning_rate=(
multiply_weight_decay_factor_by_learning_rate),
clip_gradient_min=clip_gradient_min,
clip_gradient_max=clip_gradient_max,
)
if initial_accumulator <= 0:
raise ValueError('Adagrad initial_accumulator must be positive')
if l1_regularization_strength < 0.:
raise ValueError('l1_regularization_strength must be greater than or '
'equal to 0. got {}.'.format(l1_regularization_strength))
if l2_regularization_strength < 0.:
raise ValueError('l2_regularization_strength must be greater than or '
'equal to 0. got {}.'.format(l2_regularization_strength))
self.initial_accumulator = initial_accumulator
self.l1_regularization_strength = l1_regularization_strength
self.l2_regularization_strength = l2_regularization_strength
@tf_export(v1=['tpu.experimental.AdamParameters'])
class AdamParameters(_OptimizationParameters):
"""Optimization parameters for Adam with TPU embeddings.
Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the
`optimization_parameters` argument to set the optimizer and its parameters.
See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec`
for more details.
```
estimator = tf.estimator.tpu.TPUEstimator(
...
embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
...
optimization_parameters=tf.tpu.experimental.AdamParameters(0.1),
...))
```
"""
def __init__(
self,
learning_rate: float,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-08,
lazy_adam: bool = True,
sum_inside_sqrt: bool = True,
use_gradient_accumulation: bool = True,
clip_weight_min: Optional[float] = None,
clip_weight_max: Optional[float] = None,
weight_decay_factor: Optional[float] = None,
multiply_weight_decay_factor_by_learning_rate: Optional[bool] = None,
clip_gradient_min: Optional[float] = None,
clip_gradient_max: Optional[float] = None,
):
"""Optimization parameters for Adam.
Args:
learning_rate: a floating point value. The learning rate.
beta1: A float value.
The exponential decay rate for the 1st moment estimates.
beta2: A float value.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability.
lazy_adam: Use lazy Adam instead of Adam. Lazy Adam trains faster.
Please see `optimization_parameters.proto` for details.
sum_inside_sqrt: This improves training speed. Please see
`optimization_parameters.proto` for details.
use_gradient_accumulation: setting this to `False` makes embedding
gradients calculation less accurate but faster. Please see
`optimization_parameters.proto` for details.
for details.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
weight_decay_factor: amount of weight decay to apply; None means that the
weights are not decayed.
multiply_weight_decay_factor_by_learning_rate: if true,
`weight_decay_factor` is multiplied by the current learning rate.
clip_gradient_min: the minimum value to clip by; None means -infinity.
clip_gradient_max: the maximum value to clip by; None means +infinity.
"""
super(AdamParameters, self).__init__(
learning_rate=learning_rate,
use_gradient_accumulation=use_gradient_accumulation,
clip_weight_min=clip_weight_min,
clip_weight_max=clip_weight_max,
weight_decay_factor=weight_decay_factor,
multiply_weight_decay_factor_by_learning_rate=(
multiply_weight_decay_factor_by_learning_rate),
clip_gradient_min=clip_gradient_min,
clip_gradient_max=clip_gradient_max,
)
if beta1 < 0. or beta1 >= 1.:
raise ValueError('beta1 must be between 0. and 1; got {}.'.format(beta1))
if beta2 < 0. or beta2 >= 1.:
raise ValueError('beta2 must be between 0. and 1; got {}.'.format(beta2))
if epsilon <= 0.:
raise ValueError('epsilon must be positive; got {}.'.format(epsilon))
if not use_gradient_accumulation and not lazy_adam:
raise ValueError(
'When disabling Lazy Adam, gradient accumulation must be used.')
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.lazy_adam = lazy_adam
self.sum_inside_sqrt = sum_inside_sqrt
@tf_export(v1=['tpu.experimental.FtrlParameters'])
class FtrlParameters(_OptimizationParameters):
"""Optimization parameters for Ftrl with TPU embeddings.
Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the
`optimization_parameters` argument to set the optimizer and its parameters.
See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec`
for more details.
```
estimator = tf.estimator.tpu.TPUEstimator(
...
embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
...
optimization_parameters=tf.tpu.experimental.FtrlParameters(0.1),
...))
```
"""
def __init__(
self,
learning_rate: float,
learning_rate_power: float = -0.5,
initial_accumulator_value: float = 0.1,
l1_regularization_strength: float = 0.0,
l2_regularization_strength: float = 0.0,
use_gradient_accumulation: bool = True,
clip_weight_min: Optional[float] = None,
clip_weight_max: Optional[float] = None,
weight_decay_factor: Optional[float] = None,
multiply_weight_decay_factor_by_learning_rate: Optional[bool] = None,
multiply_linear_by_learning_rate: bool = False,
beta: float = 0,
allow_zero_accumulator: bool = False,
clip_gradient_min: Optional[float] = None,
clip_gradient_max: Optional[float] = None,
):
"""Optimization parameters for Ftrl.
Implements FTRL as described in the following [paper](
https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/41159.pdf)
Args:
learning_rate: a floating point value. The learning rate.
learning_rate_power: A float value, must be less or equal to zero.
Controls how the learning rate decreases during training. Use zero for
a fixed learning rate. See section 3.1 in the
[paper](https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf).
initial_accumulator_value: The starting value for accumulators.
Only zero or positive values are allowed.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_gradient_accumulation: setting this to `False` makes embedding
gradients calculation less accurate but faster. Please see
`optimization_parameters.proto` for details.
for details.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
weight_decay_factor: amount of weight decay to apply; None means that the
weights are not decayed.
multiply_weight_decay_factor_by_learning_rate: if true,
`weight_decay_factor` is multiplied by the current learning rate.
multiply_linear_by_learning_rate: When true, multiplies the usages of the
linear slot in the weight update by the learning rate. This is useful
when ramping up learning rate from 0 (which would normally produce
NaNs).
beta: The beta parameter for FTRL.
allow_zero_accumulator: Changes the implementation of the square root to
allow for the case of initial_accumulator_value being zero. This will
cause a slight performance drop.
clip_gradient_min: the minimum value to clip by; None means -infinity.
clip_gradient_max: the maximum value to clip by; None means +infinity.
"""
super(FtrlParameters, self).__init__(
learning_rate=learning_rate,
use_gradient_accumulation=use_gradient_accumulation,
clip_weight_min=clip_weight_min,
clip_weight_max=clip_weight_max,
weight_decay_factor=weight_decay_factor,
multiply_weight_decay_factor_by_learning_rate=(
multiply_weight_decay_factor_by_learning_rate),
clip_gradient_min=clip_gradient_min,
clip_gradient_max=clip_gradient_max,
)
if learning_rate_power > 0.:
raise ValueError('learning_rate_power must be less than or equal to 0. '
'got {}.'.format(learning_rate_power))
if initial_accumulator_value < 0.:
raise ValueError('initial_accumulator_value must be greater than or equal'
' to 0. got {}.'.format(initial_accumulator_value))
if l1_regularization_strength < 0.:
raise ValueError('l1_regularization_strength must be greater than or '
'equal to 0. got {}.'.format(l1_regularization_strength))
if l2_regularization_strength < 0.:
raise ValueError('l2_regularization_strength must be greater than or '
'equal to 0. got {}.'.format(l2_regularization_strength))
self.learning_rate_power = learning_rate_power
self.initial_accumulator_value = initial_accumulator_value
self.initial_linear_value = 0.0
self.l1_regularization_strength = l1_regularization_strength
self.l2_regularization_strength = l2_regularization_strength
self.multiply_linear_by_learning_rate = multiply_linear_by_learning_rate
self.beta = beta
self.allow_zero_accumulator = allow_zero_accumulator
class ProximalYogiParameters(_OptimizationParameters):
# pylint: disable=line-too-long
"""Optimization parameters for Proximal Yogi with TPU embeddings.
Implements the Yogi optimizer as described in
[Adaptive Methods for Nonconvex Optimization](https://papers.nips.cc/paper/8186-adaptive-methods-for-nonconvex-optimization).
Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the
`optimization_parameters` argument to set the optimizer and its parameters.
See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec`
for more details.
"""
# pylint: enable=line-too-long
def __init__(
self,
learning_rate: float = 0.01,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-3,
l1_regularization_strength: float = 0.0,
l2_regularization_strength: float = 0.0,
initial_accumulator_value: float = 1e-6,
use_gradient_accumulation: bool = True,
clip_weight_min: Optional[float] = None,
clip_weight_max: Optional[float] = None,
weight_decay_factor: Optional[float] = None,
multiply_weight_decay_factor_by_learning_rate: Optional[bool] = None,
clip_gradient_min: Optional[float] = None,
clip_gradient_max: Optional[float] = None,
):
"""Optimization parameters for Proximal Yogi.
Args:
learning_rate: a floating point value. The learning rate.
beta1: A float value. The exponential decay rate for the 1st moment
estimates.
beta2: A float value. The exponential decay rate for the 2nd moment
estimates.
epsilon: A small constant for numerical stability.
l1_regularization_strength: A float value, must be greater than or equal
to zero.
l2_regularization_strength: A float value, must be greater than or equal
to zero.
initial_accumulator_value: The starting value for accumulators. Only zero
or positive values are allowed.
use_gradient_accumulation: setting this to `False` makes embedding
gradients calculation less accurate but faster. Please see
`optimization_parameters.proto` for details. for details.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
weight_decay_factor: amount of weight decay to apply; None means that the
weights are not decayed.
multiply_weight_decay_factor_by_learning_rate: if true,
`weight_decay_factor` is multiplied by the current learning rate.
clip_gradient_min: the minimum value to clip by; None means -infinity.
clip_gradient_max: the maximum value to clip by; None means +infinity.
"""
super(ProximalYogiParameters, self).__init__(
learning_rate=learning_rate,
use_gradient_accumulation=use_gradient_accumulation,
clip_weight_min=clip_weight_min,
clip_weight_max=clip_weight_max,
weight_decay_factor=weight_decay_factor,
multiply_weight_decay_factor_by_learning_rate=(
multiply_weight_decay_factor_by_learning_rate),
clip_gradient_min=clip_gradient_min,
clip_gradient_max=clip_gradient_max,
)
if beta1 < 0. or beta1 >= 1.:
raise ValueError('beta1 must be between 0. and 1; got {}.'.format(beta1))
if beta2 < 0. or beta2 >= 1.:
raise ValueError('beta2 must be between 0. and 1; got {}.'.format(beta2))
if epsilon <= 0.:
raise ValueError('epsilon must be positive; got {}.'.format(epsilon))
if l1_regularization_strength < 0.:
raise ValueError('l1_regularization_strength must be greater than or '
'equal to 0. got {}.'.format(l1_regularization_strength))
if l2_regularization_strength < 0.:
raise ValueError('l2_regularization_strength must be greater than or '
'equal to 0. got {}.'.format(l2_regularization_strength))
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.l1_regularization_strength = l1_regularization_strength
self.l2_regularization_strength = l2_regularization_strength
self.initial_accumulator_value = initial_accumulator_value
class MomentumParameters(_OptimizationParameters):
"""Optimization parameters for Momentum with TPU embeddings.
Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the
`optimization_parameters` argument to set the optimizer and its parameters.
See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec`
for more details.
```
estimator = tf.estimator.tpu.TPUEstimator(
...
embedding_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
...
optimization_parameters=tf.tpu.experimental.MomentumParameters(0.1),
...))
```
"""
def __init__(
self,
learning_rate: float,
momentum: float,
use_nesterov: bool = False,
use_gradient_accumulation: bool = True,
clip_weight_min: Optional[float] = None,
clip_weight_max: Optional[float] = None,
weight_decay_factor: Optional[float] = None,
multiply_weight_decay_factor_by_learning_rate: Optional[bool] = None,
clip_gradient_min: Optional[float] = None,
clip_gradient_max: Optional[float] = None,
):
"""Optimization parameters for momentum.
Args:
learning_rate: a floating point value. The learning rate.
momentum: A `Tensor` or a floating point value. The momentum.
use_nesterov: If `True` use Nesterov Momentum. See (Sutskever et al.,
2013). This implementation always computes gradients at the value of the
variable(s) passed to the optimizer. Using Nesterov Momentum makes the
variable(s) track the values called `theta_t + mu*v_t` in the paper.
This implementation is an approximation of the original formula, valid
for high values of momentum. It will compute the "adjusted gradient" in
NAG by assuming that the new gradient will be estimated by the current
average gradient plus the product of momentum and the change in the
average gradient.
use_gradient_accumulation: setting this to `False` makes embedding
gradients calculation less accurate but faster. Please see
`optimization_parameters.proto` for details.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
weight_decay_factor: amount of weight decay to apply; None means that the
weights are not decayed.
multiply_weight_decay_factor_by_learning_rate: if true,
`weight_decay_factor` is multiplied by the current learning rate.
clip_gradient_min: the minimum value to clip by; None means -infinity.
clip_gradient_max: the maximum value to clip by; None means +infinity.
"""
super(MomentumParameters, self).__init__(
learning_rate=learning_rate,
use_gradient_accumulation=use_gradient_accumulation,
clip_weight_min=clip_weight_min,
clip_weight_max=clip_weight_max,
weight_decay_factor=weight_decay_factor,
multiply_weight_decay_factor_by_learning_rate=(
multiply_weight_decay_factor_by_learning_rate),
clip_gradient_min=clip_gradient_min,
clip_gradient_max=clip_gradient_max,
)
self.momentum = momentum
self.use_nesterov = use_nesterov
class RMSPropParameters(_OptimizationParameters):
"""Optimization parameters for RMSProp with TPU embeddings.
Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the
`optimization_parameters` argument to set the optimizer and its parameters.
See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec`
for more details.
```
estimator = tf.estimator.tpu.TPUEstimator(
...
embedding_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
...
optimization_parameters=tf.tpu.experimental.MomentumParameters(0.1),
...))
```
"""
def __init__(
self,
learning_rate: float,
rho: float,
momentum: float,
epsilon: float,
use_gradient_accumulation: bool = True,
clip_weight_min: Optional[float] = None,
clip_weight_max: Optional[float] = None,
weight_decay_factor: Optional[float] = None,
multiply_weight_decay_factor_by_learning_rate: Optional[bool] = None,
clip_gradient_min: Optional[float] = None,
clip_gradient_max: Optional[float] = None,
):
"""Optimization parameters for RMS prop.
Args:
learning_rate: a floating point value. The learning rate.
rho: Discounting factor for the history/coming gradient
momentum: A scalar tensor.
epsilon: Small value to avoid zero denominator.
use_gradient_accumulation: setting this to `False` makes embedding
gradients calculation less accurate but faster. Please see
`optimization_parameters.proto` for details. for details.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
weight_decay_factor: amount of weight decay to apply; None means that the
weights are not decayed.
multiply_weight_decay_factor_by_learning_rate: if true,
`weight_decay_factor` is multiplied by the current learning rate.
clip_gradient_min: the minimum value to clip by; None means -infinity.
clip_gradient_max: the maximum value to clip by; None means +infinity.
"""
super(RMSPropParameters, self).__init__(
learning_rate=learning_rate,
use_gradient_accumulation=use_gradient_accumulation,
clip_weight_min=clip_weight_min,
clip_weight_max=clip_weight_max,
weight_decay_factor=weight_decay_factor,
multiply_weight_decay_factor_by_learning_rate=(
multiply_weight_decay_factor_by_learning_rate),
clip_gradient_min=clip_gradient_min,
clip_gradient_max=clip_gradient_max,
)
self.rho = rho
self.momentum = momentum
self.epsilon = epsilon
@tf_export(v1=['tpu.experimental.StochasticGradientDescentParameters'])
class StochasticGradientDescentParameters(_OptimizationParameters):
"""Optimization parameters for stochastic gradient descent for TPU embeddings.
Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the
`optimization_parameters` argument to set the optimizer and its parameters.
See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec`
for more details.
```
estimator = tf.estimator.tpu.TPUEstimator(
...
embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
...
optimization_parameters=(
tf.tpu.experimental.StochasticGradientDescentParameters(0.1))))
```
"""
def __init__(
self,
learning_rate: float,
clip_weight_min: Optional[float] = None,
clip_weight_max: Optional[float] = None,
weight_decay_factor: Optional[float] = None,
multiply_weight_decay_factor_by_learning_rate: Optional[bool] = None,
clip_gradient_min: Optional[float] = None,
clip_gradient_max: Optional[float] = None,
):
"""Optimization parameters for stochastic gradient descent.
Args:
learning_rate: a floating point value. The learning rate.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
weight_decay_factor: amount of weight decay to apply; None means that the
weights are not decayed.
multiply_weight_decay_factor_by_learning_rate: if true,
`weight_decay_factor` is multiplied by the current learning rate.
clip_gradient_min: the minimum value to clip by; None means -infinity.
clip_gradient_max: the maximum value to clip by; None means +infinity.
"""
super(StochasticGradientDescentParameters, self).__init__(
learning_rate=learning_rate,
use_gradient_accumulation=False,
clip_weight_min=clip_weight_min,
clip_weight_max=clip_weight_max,
weight_decay_factor=weight_decay_factor,
multiply_weight_decay_factor_by_learning_rate=(
multiply_weight_decay_factor_by_learning_rate),
clip_gradient_min=clip_gradient_min,
clip_gradient_max=clip_gradient_max,
)
DeviceConfig = collections.namedtuple('DeviceConfig',
['num_hosts', 'num_cores', 'job_name'])
class TPUEmbedding(object):
"""API for using TPU for embedding.
Example:
```
table_config_user = tpu_embedding.TableConfig(
vocabulary_size=4, dimension=2,
initializer=initializer, combiner='mean')
table_to_config_dict = {'video': table_config_video,
'user': table_config_user}
feature_to_config_dict = {'watched': tpu_embedding.FeatureConfig('video'),
'favorited': tpu_embedding.FeatureConfig('video'),
'friends': tpu_embedding.FeatureConfig('user')}
batch_size = 4
num_hosts = 1
optimization_parameters = tpu_embedding.AdagradParameters(1., 1.)
mode = tpu_embedding.TRAINING
embedding = tpu_embedding.TPUEmbedding(
table_to_config_dict, feature_to_config_dict,
batch_size, num_hosts, mode, optimization_parameters)
batch_size_per_core = embedding.batch_size_per_core
sparse_features_list = []
for host in hosts:
with ops.device(host):
for _ in range(embedding.num_cores_per_host):
sparse_features = {}
sparse_features['watched'] = sparse_tensor.SparseTensor(...)
sparse_features['favorited'] = sparse_tensor.SparseTensor(...)
sparse_features['friends'] = sparse_tensor.SparseTensor(...)
sparse_features_list.append(sparse_features)
enqueue_ops = embedding.generate_enqueue_ops(sparse_features_list)
embedding_variables_and_ops = embedding.create_variables_and_ops()
def computation():
activations = embedding.get_activations()
loss = compute_loss(activations)
base_optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=1)
cross_shard_optimizer = tpu_optimizer.CrossShardOptimizer(
base_optimizer)
train_op = cross_shard_optimizer.minimize(loss)
gradients = (
tpu_embedding_gradient.get_gradients_through_compute_gradients(
cross_shard_optimizer, loss, activations)
send_gradients_op = embedding.generate_send_gradients_op(gradients)
with ops.control_dependencies([train_op, send_gradients_op]):
loss = array_ops.identity(loss)
loss = tpu.shard(computation,
num_shards=embedding.num_cores)
with self.test_session() as sess:
sess.run(tpu.initialize_system(embedding_config=
embedding.config_proto))
sess.run(variables.global_variables_initializer())
sess.run(embedding_variables_and_ops.load_ops())
sess.run(enqueue_ops)
loss_val = sess.run(loss)
```
Example with weight decay:
>>> def learning_rate_fn(global_step):
... return tf.compat.v1.train.polynomial_decay(
... learning_rate=5e-5,
... global_step=global_step,
... decay_steps=100000,
... end_learning_rate=0.0)
>>> wordpiece_table_config = TableConfig(
... vocabulary_size=119547,
... dimension=256,
... learning_rate_fn=learning_rate_fn)
>>> wordpiece_feature_config = FeatureConfig(
... table_id='bert/embeddings/word_embeddings',
... max_sequence_length=512)
>>> optimization_parameters = AdamParameters(
... learning_rate=5e-5,
... epsilon=1e-6,
... weight_decay_factor=0.01,
... multiply_weight_decay_factor_by_learning_rate=True)
>>> tpu_embedding = TPUEmbedding(
... table_to_config_dict={
... 'bert/embeddings/word_embeddings': wordpiece_table_config,
... },
... feature_to_config_dict={'input_ids': wordpiece_feature_config},
... batch_size=128,
... mode=TRAINING,
... optimization_parameters=optimization_parameters,
... master='')
>>> with tf.Graph().as_default():
... init_tpu_op = tf.compat.v1.tpu.initialize_system(
... embedding_config=tpu_embedding.config_proto)
... tf.compat.v1.Session().run(init_tpu_op)
"""
# TODO(shizhiw): Consider adding a field to FeatureConfig that indicates that
# the feature should not be used to update embedding table (cr/204852758,
# cr/204940540). Also, this can support different combiners for different
# features within the same table.
# TODO(shizhiw, b/118512626): Remove `batch_size` from `__init__` and move it
# to `FeatureConfig`?
# TODO(shizhiw): will it be cleaner to make `table_to_config_dict` and
# `feature_to_config_dict` lists of `TableSpec` and `FeatureSpec`
# respectively?
# TODO(shizhiw): Consider adding `input_fn` as an option to remove boilerplate
# for-loops around construction of inputs.
# `optimization_parameter` applies to all tables. If the need arises,
# we can add `optimization_parameters` to `TableConfig` to override this
# global setting.
def __init__(self,
table_to_config_dict,
feature_to_config_dict,
batch_size,
mode,
master=None,
optimization_parameters=None,
cluster_def=None,
pipeline_execution_with_tensor_core=False,
partition_strategy='div',
device_config=None,
master_job_name=None):
"""API for using TPU for embedding lookups.
Args:
table_to_config_dict: A dictionary mapping from string of table name to
`TableConfig`. Table refers to an embedding table, e.g. `params`
argument to `tf.nn.embedding_lookup_sparse()`.
feature_to_config_dict: A dictionary mapping from string of feature name
to `FeatureConfig`. Feature refers to ids to lookup in embedding table,
e.g. `sp_ids` argument to `tf.nn.embedding_lookup_sparse()`.
batch_size: An `int` representing the global batch size.
mode: `TRAINING` or `INFERENCE`.
master: A `string` representing the TensorFlow master to use.
optimization_parameters: `AdagradParameters`, `AdamParameters`,
`Stochasticgradientdescentparameters`. Must be set in training unless
all tables specify their own optimizers. And it must be `None` in
inference.
cluster_def: A ClusterDef object describing the TPU cluster.
pipeline_execution_with_tensor_core: setting this to `True` makes training
faster, but trained model will be different if step N and step N+1
involve the same set of embedding IDs. Please see
`tpu_embedding_configuration.proto` for details.
partition_strategy: A string, either 'mod' or 'div', specifying how to map
the lookup id to the embedding tensor. For more information see
`tf.nn.embedding_lookup_sparse`.
device_config: A DeviceConfig instance, used when `master` and
`cluster_def` are both `None`.
master_job_name: if set, overrides the master job name used to schedule
embedding ops.
Raises:
ValueError: if any input is invalid.
"""
if partition_strategy not in ('div', 'mod'):
raise ValueError(
'Invalid partition_strategy {}'.format(partition_strategy))
self._partition_strategy = partition_strategy
_validate_table_to_config_dict(table_to_config_dict)
# Avoid nondeterminism from `Dict` iteration order by using `OrderedDict`.
self._table_to_config_dict = _create_ordered_dict(table_to_config_dict)
_validate_feature_to_config_dict(table_to_config_dict,
feature_to_config_dict)
self._feature_to_config_dict = _create_ordered_dict(feature_to_config_dict)
self._table_to_features_dict, self._table_to_num_features_dict = (
_create_table_to_features_and_num_features_dicts(
self._feature_to_config_dict))
self._combiners = _create_combiners(self._table_to_config_dict,
self._table_to_features_dict)
self._batch_size = batch_size
if master is None and cluster_def is None:
if device_config is None:
raise ValueError('When master and cluster_def are both None,'
'device_config must be set but is not.')
if device_config.num_cores % device_config.num_hosts:
raise ValueError('num_hosts ({}) should divide num_cores ({}) '
'but does not.'.format(device_config.num_cores,
device_config.num_hosts))
self._num_hosts = device_config.num_hosts
self._num_cores = device_config.num_cores
self._num_cores_per_host = self._num_cores // self._num_hosts
self._hosts = [
'{}/replica:0/task:{}/device:CPU:0'.format(device_config.job_name, i)
for i in range(self._num_hosts)
]
else:
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata( # pylint: disable=protected-access
master,
cluster_def=cluster_def))
if tpu_system_metadata.num_cores == 0:
raise ValueError('TPUEmbedding needs TPUs, but master {} does not have '
'TPUs.'.format(master))
self._num_hosts = tpu_system_metadata.num_hosts
if master_job_name is None:
try:
master_job_name = tpu_system_metadata_lib.master_job(master,
cluster_def)
except ValueError as e:
raise ValueError(str(e) + ' Please specify a master_job_name.')
self._hosts = []
for device in tpu_system_metadata.devices:
if 'device:CPU:' in device.name and (
master_job_name is None or master_job_name in device.name):
self._hosts.append(device.name)
self._num_cores_per_host = tpu_system_metadata.num_of_cores_per_host
self._num_cores = tpu_system_metadata.num_cores
_validate_batch_size(self._batch_size, self._num_cores)
self._batch_size_per_core = self._batch_size // self._num_cores
# TODO(shizhiw): remove `mode`?
if mode == TRAINING:
_validate_optimization_parameters(optimization_parameters,
self._table_to_config_dict)
self._optimization_parameters = optimization_parameters
elif mode == INFERENCE:
if optimization_parameters is not None:
raise ValueError('`optimization_parameters` should be `None` '
'for inference mode.')
self._optimization_parameters = (
StochasticGradientDescentParameters(1.))
else:
raise ValueError('`mode` only supports {} and {}; got {}.'
.format(TRAINING, INFERENCE, mode))
self._mode = mode
# TODO(shizhiw): move `optimization_parameters` into `_optimizer_handler`
# and create special handler for inference that inherits from
# StochasticGradientDescentHandler with more user-friendly error message
# on get_slot().
self._optimizer_handler_dict = self._get_optimizer_handler_by_table()
self._pipeline_execution_with_tensor_core = (
pipeline_execution_with_tensor_core)
self._learning_rate_fn = list(set(
c.learning_rate_fn for c in self._table_to_config_dict.values()
if c.learning_rate_fn is not None))
self._learning_rate_fn_to_tag = {
fn: id for id, fn in enumerate(self._learning_rate_fn)}
self._config_proto = self._create_config_proto()
@property
def hosts(self):
"""A list of device names for CPU hosts.
Returns:
A list of device names for CPU hosts.
"""
return copy.copy(self._hosts)
# TODO(shizhiw): change to num_tensor_cores_per_host to be more explicit and
# to be consistent with `tpu_embedding_configuration.proto`.
@property
def num_cores_per_host(self):
"""Number of TPU cores on a CPU host.
Returns:
Number of TPU cores on a CPU host.
"""
return self._num_cores_per_host
@property
def num_cores(self):
"""Total number of TPU cores on all hosts.
Returns:
Total number of TPU cores on all hosts.
"""
return self._num_cores
@property
def batch_size_per_core(self):
"""Batch size for each TPU core.
The sparse tensors in `sparse_features_list` to `generate_enqueue_ops`
must have batch dimension equal to this.
Returns:
Batch size for each TPU core.
"""
return self._batch_size_per_core
@property
def config_proto(self):
"""Create embedding config proto for `tpu.initialize_system()`.
Returns:
an `TPUEmbeddingConfiguration` proto describing the desired
configuration of the hardware embedding lookup tables, which
is passed to `tpu.initialize_system()`.
"""
return self._config_proto
@property
@property
@property
@property
def _create_config_proto(self):
"""Create `TPUEmbeddingConfiguration`."""
config_proto = elc.TPUEmbeddingConfiguration()
for table in self._table_to_config_dict:
table_descriptor = config_proto.table_descriptor.add()
table_descriptor.name = table
table_config = self._table_to_config_dict[table]
# For small tables, we pad to the number of hosts so that at least one
# id will be assigned to each host.
table_descriptor.vocabulary_size = max(table_config.vocabulary_size,
len(self.hosts))
table_descriptor.dimension = table_config.dimension
table_descriptor.num_features = self._table_to_num_features_dict[table]
optimization_parameters = (
self._optimizer_handler_dict[table].get_optimization_parameters())
parameters = table_descriptor.optimization_parameters
if table_config.learning_rate:
parameters.learning_rate.constant = table_config.learning_rate
elif table_config.learning_rate_fn:
parameters.learning_rate.dynamic.tag = (
self._learning_rate_fn_to_tag[table_config.learning_rate_fn])
else:
parameters.learning_rate.constant = (
optimization_parameters.learning_rate)
parameters.gradient_accumulation_status = (
optimization_parameters_pb2.GradientAccumulationStatus.ENABLED
if optimization_parameters.use_gradient_accumulation else
optimization_parameters_pb2.GradientAccumulationStatus.DISABLED)
if optimization_parameters.clip_gradient_min is not None:
parameters.gradient_clipping_limits.lower.value = (
optimization_parameters.clip_gradient_min)
if optimization_parameters.clip_gradient_max is not None:
parameters.gradient_clipping_limits.upper.value = (
optimization_parameters.clip_gradient_max)
if optimization_parameters.clip_weight_min is not None:
parameters.clipping_limits.lower.value = (
optimization_parameters.clip_weight_min)
if optimization_parameters.clip_weight_max is not None:
parameters.clipping_limits.upper.value = (
optimization_parameters.clip_weight_max)
if optimization_parameters.weight_decay_factor:
parameters.weight_decay_factor = (
optimization_parameters.weight_decay_factor)
if (optimization_parameters
.multiply_weight_decay_factor_by_learning_rate):
parameters.multiply_weight_decay_factor_by_learning_rate = True
if table_config.hot_id_replication:
parameters.hot_id_replication_configuration.status = (
optimization_parameters_pb2.HotIdReplicationConfiguration.ENABLED)
optimizer_handler = self._optimizer_handler_dict[table]
optimizer_handler.set_optimization_parameters(table_descriptor)
config_proto.mode = self._mode
config_proto.batch_size_per_tensor_core = self._batch_size_per_core
config_proto.num_hosts = self._num_hosts
config_proto.num_tensor_cores = self._num_cores
config_proto.sharding_strategy = (
elc.TPUEmbeddingConfiguration.DIV_DEFAULT
if self._partition_strategy == 'div' else
elc.TPUEmbeddingConfiguration.MOD)
config_proto.pipeline_execution_with_tensor_core = (
self._pipeline_execution_with_tensor_core)
return config_proto
def create_variables_and_ops(self, embedding_variable_name_by_table=None,
slot_variable_names_by_table=None):
"""Create embedding and slot variables, with ops to load and retrieve them.
N.B.: the retrieve embedding variables (including slot variables) ops are
returned as lambda fn, as the call side might want to impose control
dependencies between the TPU computation and retrieving actions. For
example, the following code snippet ensures the TPU computation finishes
first, and then we pull the variables back from TPU to CPU.
```
updates_ops = []
with ops.control_dependencies([loss]):
for op_fn in retrieve_parameters_op_fns:
update_ops.append(op_fn())
```
Args:
embedding_variable_name_by_table: A dictionary mapping from string of
table name to string of embedding variable name. If `None`,
defaults from `get_default_slot_variable_names()` will be used.
slot_variable_names_by_table: A dictionary mapping from string of table
name to `AdamSlotVariableNames`, `AdagradSlotVariableNames` etc. If
`None`, defaults from `get_default_slot_variable_names()` will be used.
Returns:
`tpu_embedding.VariablesAndOps` with:
A dictionary mapping from string of table name to embedding variables,
A dictionary mapping from string of table name to AdagradSlotVariable,
AdamSlotVariables etc with slot variables,
A function which returns a list of ops to load embedding and slot
variables from CPU to TPU.
A function which returns a list of ops to retrieve embedding and slot
variables from TPU to CPU.
"""
embedding_variables_by_table = {}
slot_variables_by_table = {}
load_op_fns = []
retrieve_op_fns = []
for i, table in enumerate(self._table_to_config_dict):
if embedding_variable_name_by_table:
embedding_variable_name = embedding_variable_name_by_table[table]
else:
embedding_variable_name = table
if slot_variable_names_by_table:
slot_variable_names = slot_variable_names_by_table[table]
else:
optimizer_handler = self._optimizer_handler_dict[table]
slot_variable_names = (
optimizer_handler.get_default_slot_variable_names(table))
# TODO(b/139144091): Multi-host support for mid-level API in
# eager context (TF 2.0)
# Workaround below allows single-host use case in TF 2.0
if context.executing_eagerly():
device = ''
else:
device = _create_device_fn(self._hosts)
with ops.device(device):
table_variables = _create_partitioned_variables(
name=embedding_variable_name,
num_hosts=self._num_hosts,
vocabulary_size=self._table_to_config_dict[table].vocabulary_size,
embedding_dimension=self._table_to_config_dict[table].dimension,
initializer=self._table_to_config_dict[table].initializer,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
embedding_variables_by_table[table] = table_variables
# Only loads embedding config to load/retrieve nodes for the first table
# on the first host, other nodes would use config from the first node.
config = None if i else self.config_proto.SerializeToString()
slot_variables_for_table, load_ops_fn, retrieve_ops_fn = (
self._optimizer_handler_dict[table].create_variables_and_ops(
table, slot_variable_names, self._num_hosts,
self._table_to_config_dict[table], table_variables, config))
slot_variables_by_table[table] = slot_variables_for_table
load_op_fns.append(load_ops_fn)
retrieve_op_fns.append(retrieve_ops_fn)
def load_ops():
"""Calls and returns the load ops for each embedding table.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_ops_list = []
for load_op_fn in load_op_fns:
load_ops_list.extend(load_op_fn())
return load_ops_list
def retrieve_ops():
"""Calls and returns the retrieve ops for each embedding table.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_ops_list = []
for retrieve_op_fn in retrieve_op_fns:
retrieve_ops_list.extend(retrieve_op_fn())
return retrieve_ops_list
return VariablesAndOps(embedding_variables_by_table,
slot_variables_by_table,
load_ops, retrieve_ops)
def generate_enqueue_ops(
self,
enqueue_datas_list,
mode_override=None,
ragged=False,
):
"""Generate enqueue ops.
Args:
enqueue_datas_list: a list of dictionary mapping from string
of feature names to EnqueueData. Each dictionary is for one
TPU core. Dictionaries for the same host should be contiguous
on the list.
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
ragged: If True, creates RaggedTensor enqueue ops rather than
SparseTensor.
Returns:
Ops to enqueue to TPU for embedding.
"""
self._validate_generate_enqueue_ops_enqueue_datas_list(enqueue_datas_list)
return [
self._generate_enqueue_op( # pylint: disable=g-complex-comprehension
enqueue_datas,
device_ordinal=i % self._num_cores_per_host,
mode_override=mode_override,
ragged=ragged,
) for i, enqueue_datas in enumerate(enqueue_datas_list)
]
def _validate_generate_enqueue_ops_enqueue_datas_list(self,
enqueue_datas_list):
"""Validate `enqueue_datas_list`."""
def _check_agreement(data, name, feature, enqueue_data):
"""Helper function to check device agreement."""
if (data is not None and
data.device != enqueue_data.embedding_indices.device):
raise ValueError('Device of {0} does not agree with that of'
'embedding_indices for feature {1}.'.format(
name, feature))
feature_set = set(self._feature_to_config_dict.keys())
contiguous_device = None
for i, enqueue_datas in enumerate(enqueue_datas_list):
used_feature_set = set(enqueue_datas.keys())
# Check features are valid.
missing_feature_set = feature_set - used_feature_set
if missing_feature_set:
raise ValueError('`enqueue_datas_list[{}]` misses a feature that is '
'in `feature_to_config_dict`: {}.'.format(
i, missing_feature_set))
extra_feature_set = used_feature_set - feature_set
if extra_feature_set:
raise ValueError('`enqueue_datas_list[{}]` has a feature that is not '
'in `feature_to_config_dict`: {}.'.format(
i, extra_feature_set))
device = None
device_feature = None
for feature, enqueue_data in six.iteritems(enqueue_datas):
combiner = self._table_to_config_dict[
self._feature_to_config_dict[feature].table_id].combiner
if isinstance(enqueue_data, EnqueueData):
if enqueue_data.sample_indices is None and combiner:
logging.warn(
'No sample indices set for features %f table %f but '
'combiner is set to %s.', feature,
self._feature_to_config_dict[feature].table_id, combiner)
_check_agreement(enqueue_data.sample_indices, 'sample_indices',
feature, enqueue_data)
_check_agreement(enqueue_data.aggregation_weights,
'aggregation_weights', feature, enqueue_data)
elif isinstance(enqueue_data, RaggedEnqueueData):
if enqueue_data.sample_splits is None and combiner:
logging.warn(
'No sample splits set for features %f table %f but '
'combiner is set to %s.', feature,
self._feature_to_config_dict[feature].table_id, combiner)
_check_agreement(enqueue_data.sample_splits, 'sample_splits', feature,
enqueue_data)
_check_agreement(enqueue_data.aggregation_weights,
'aggregation_weights', feature, enqueue_data)
else:
raise ValueError(
'`enqueue_datas_list[{}]` has a feature that is not mapped to '
'`EnqueueData` or `RaggedEnqueueData`. `feature`: {}'.format(
i, feature))
# Check all features are on the same device.
if device is None:
device = enqueue_data.embedding_indices.device
device_feature = feature
else:
if device != enqueue_data.embedding_indices.device:
raise ValueError('Devices are different between features in '
'`enqueue_datas_list[{}]`; '
'devices: {}, {}; features: {}, {}.'.format(
i, device,
enqueue_data.embedding_indices.device, feature,
device_feature))
if i % self._num_cores_per_host:
if device != contiguous_device:
raise ValueError('We expect the `enqueue_datas` which are on the '
'same host to be contiguous in '
'`enqueue_datas_list`, '
'`enqueue_datas_list[{}]` is on device {}, '
'but is expected to be on device {}.'.format(
i, device, contiguous_device))
else:
contiguous_device = device
def _generate_enqueue_op(self,
enqueue_datas,
device_ordinal,
mode_override=None,
ragged=False):
"""Creates op for enqueuing batch to TPU."""
enqueue_data0 = list(enqueue_datas.values())[0]
with ops.colocate_with(enqueue_data0.embedding_indices):
if ragged:
# note that this is currently identical in behavior
return tpu_ops.enqueue_tpu_embedding_ragged_tensor_batch(
device_ordinal=device_ordinal,
combiners=self._combiners,
mode_override=mode_override,
**self._format_for_tpu_embedding_ragged_tensor_batch(enqueue_datas))
else:
return tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch(
device_ordinal=device_ordinal,
combiners=self._combiners,
mode_override=mode_override,
**self._format_for_tpu_embedding_sparse_tensor_batch(enqueue_datas))
def _format_for_tpu_embedding_ragged_tensor_batch(self, enqueue_datas):
"""Format sparse features for `enqueue_tpu_embedding_ragged_tensor_batch()`.
Args:
enqueue_datas: a `Dict` of `RaggedEnqueueData` objects for embedding.
Returns:
Dict of arguments for `enqueue_tpu_embedding_ragged_tensor_batch()`.
"""
kwargs = {
'sample_splits': [],
'embedding_indices': [],
'aggregation_weights': [],
'table_ids': [],
'max_sequence_lengths': [],
}
int_zeros = array_ops.zeros((0,), dtype=dtypes.int64)
float_zeros = array_ops.zeros((0,), dtype=dtypes.float32)
for table_id, table in enumerate(self._table_to_features_dict):
features = self._table_to_features_dict[table]
for feature in features:
enqueue_data = enqueue_datas[feature]
kwargs['sample_splits'].append(
enqueue_data.sample_splits
if enqueue_data.sample_splits is not None else int_zeros)
kwargs['aggregation_weights'].append(
enqueue_data.aggregation_weights
if enqueue_data.aggregation_weights is not None else float_zeros)
kwargs['embedding_indices'].append(enqueue_data.embedding_indices)
kwargs['table_ids'].append(table_id)
kwargs['max_sequence_lengths'].append(
self._feature_to_config_dict[feature].max_sequence_length)
return kwargs
def _format_for_tpu_embedding_sparse_tensor_batch(self, enqueue_datas):
"""Format sparse features for `enqueue_tpu_embedding_sparse_tensor_batch()`.
Args:
enqueue_datas: a `Dict` of `EnqueueData` objects for embedding.
Returns:
Dict of arguments for `enqueue_tpu_embedding_sparse_tensor_batch()`.
"""
kwargs = {
'sample_indices': [],
'embedding_indices': [],
'aggregation_weights': [],
'table_ids': [],
'max_sequence_lengths': [],
}
int_zeros = array_ops.zeros((0,), dtype=dtypes.int64)
float_zeros = array_ops.zeros((0,), dtype=dtypes.float32)
for table_id, table in enumerate(self._table_to_features_dict):
features = self._table_to_features_dict[table]
for feature in features:
enqueue_data = enqueue_datas[feature]
kwargs['sample_indices'].append(
enqueue_data.sample_indices
if enqueue_data.sample_indices is not None else int_zeros)
kwargs['aggregation_weights'].append(
enqueue_data.aggregation_weights if
enqueue_data.aggregation_weights is not None else float_zeros)
kwargs['embedding_indices'].append(enqueue_data.embedding_indices)
kwargs['table_ids'].append(table_id)
kwargs['max_sequence_lengths'].append(
self._feature_to_config_dict[feature].max_sequence_length)
return kwargs
def get_activations(self):
"""Get activations for features.
This should be called within `computation` that is passed to
`tpu.replicate` and friends.
Returns:
A dictionary mapping from `String` of feature name to `Tensor`
of activation.
"""
recv_activations = tpu_ops.recv_tpu_embedding_activations(
num_outputs=len(self._table_to_config_dict),
config=self._config_proto.SerializeToString())
activations = collections.OrderedDict()
for table_id, table in enumerate(self._table_to_features_dict):
features = self._table_to_features_dict[table]
num_features = self._table_to_num_features_dict[table]
feature_index = 0
table_activations = array_ops.reshape(
recv_activations[table_id],
[self.batch_size_per_core, num_features, -1])
for feature in features:
seq_length = self._feature_to_config_dict[feature].max_sequence_length
if not seq_length:
activations[feature] = table_activations[:, feature_index, :]
feature_index = feature_index + 1
else:
activations[feature] = (
table_activations[:, feature_index:(feature_index+seq_length), :])
feature_index = feature_index + seq_length
return activations
def generate_send_gradients_op(self,
feature_to_gradient_dict,
step=None):
"""Send gradient to TPU embedding.
Args:
feature_to_gradient_dict: dict mapping feature names to gradient wrt
activations.
step: the current global step, used for dynamic learning rate.
Returns:
SendTPUEmbeddingGradients Op.
Raises:
RuntimeError: If `mode` is not `TRAINING`.
"""
if self._mode != TRAINING:
raise RuntimeError('Only in training mode gradients need to '
'be sent to TPU embedding; got mode {}.'
.format(self._mode))
if step is None and self._learning_rate_fn:
raise ValueError('There are dynamic learning rates but step is None.')
gradients = []
for table in self._table_to_features_dict:
features = self._table_to_features_dict[table]
table_gradients = []
for feature in features:
gradient = feature_to_gradient_dict[feature]
# Expand dims for non-sequence feature to match sequence features.
if gradient.shape.ndims == 2:
gradient = array_ops.expand_dims(gradient, 1)
table_gradients.append(gradient)
interleaved_table_grads = array_ops.reshape(
array_ops.concat(table_gradients, axis=1),
[-1, array_ops.shape(table_gradients[0])[-1]])
gradients.append(interleaved_table_grads)
return tpu_ops.send_tpu_embedding_gradients(
inputs=gradients,
learning_rates=[math_ops.cast(fn(step), dtype=dtypes.float32)
for fn in self._learning_rate_fn],
config=self.config_proto.SerializeToString())
def _validate_table_to_config_dict(table_to_config_dict):
"""Validate `table_to_config_dict`."""
for k, v in six.iteritems(table_to_config_dict):
if not isinstance(v, TableConfig):
raise ValueError('Value of `table_to_config_dict` must be of type '
'`TableConfig`, got {} for {}.'.format(type(v), k))
def _validate_feature_to_config_dict(table_to_config_dict,
feature_to_config_dict):
"""Validate `feature_to_config_dict`."""
used_table_set = set([feature.table_id
for feature in feature_to_config_dict.values()])
table_set = set(table_to_config_dict.keys())
unused_table_set = table_set - used_table_set
if unused_table_set:
raise ValueError('`table_to_config_dict` specifies table that is not '
'used in `feature_to_config_dict`: {}.'
.format(unused_table_set))
extra_table_set = used_table_set - table_set
if extra_table_set:
raise ValueError('`feature_to_config_dict` refers to a table that is not '
'specified in `table_to_config_dict`: {}.'
.format(extra_table_set))
def _validate_optimization_parameters(optimization_parameters,
table_to_config_dict):
"""Validate global optimization_parameters and per table optimizers.
If global optimizer is `None`, all table optimizers should be non `None`.
Args:
optimization_parameters: global optimizer provided in `TPUEmbedding`
constructor.
table_to_config_dict: A dictionary mapping from string of table name to
`TableConfig`.
"""
tbl_optimizer_missing = False
for _, table_config in table_to_config_dict.items():
if table_config.optimization_parameters is None:
tbl_optimizer_missing = True
break
if optimization_parameters:
if not isinstance(optimization_parameters, _OptimizationParameters):
raise ValueError('`optimization_parameters` must inherit from '
'`_OptimizationParameters`. '
'`type(optimization_parameters)`={}'.format(
type(optimization_parameters)))
else:
# Missing global optimization_parameters.
if tbl_optimizer_missing:
raise ValueError('`optimization_parameters` is missing.')
class _OptimizerHandler(object):
"""Interface class for handling optimizer specific logic."""
class _AdagradHandler(_OptimizerHandler):
"""Handles Adagrad specific logic."""
class _ProximalAdagradHandler(_OptimizerHandler):
"""Handles ProximalAdagrad specific logic."""
class _AdamHandler(_OptimizerHandler):
"""Handles Adam specific logic."""
class _FtrlHandler(_OptimizerHandler):
"""Handles Ftrl specific logic."""
class _ProximalYogiHandler(_OptimizerHandler):
"""Handles Proximal Yogi specific logic."""
class _MomentumHandler(_OptimizerHandler):
"""Handles Momentum specific logic."""
class _RMSPropHandler(_OptimizerHandler):
"""Handles RMS prop specific logic."""
class _StochasticGradientDescentHandler(_OptimizerHandler):
"""Handles stochastic gradient descent specific logic."""
def _get_optimization_handler(optimization_parameters):
"""Gets the optimization handler given the parameter type."""
if isinstance(optimization_parameters, AdagradParameters):
return _AdagradHandler(optimization_parameters)
elif isinstance(optimization_parameters, ProximalAdagradParameters):
return _ProximalAdagradHandler(optimization_parameters)
elif isinstance(optimization_parameters, AdamParameters):
return _AdamHandler(optimization_parameters)
elif isinstance(optimization_parameters, FtrlParameters):
return _FtrlHandler(optimization_parameters)
elif isinstance(optimization_parameters, ProximalYogiParameters):
return _ProximalYogiHandler(optimization_parameters)
elif isinstance(optimization_parameters, StochasticGradientDescentParameters):
return _StochasticGradientDescentHandler(optimization_parameters)
elif isinstance(optimization_parameters, MomentumParameters):
return _MomentumHandler(optimization_parameters)
elif isinstance(optimization_parameters, RMSPropParameters):
return _RMSPropHandler(optimization_parameters)
return NotImplementedError()
def _create_ordered_dict(d):
"""Create an OrderedDict from Dict."""
return collections.OrderedDict((k, d[k]) for k in sorted(d))
def _create_combiners(table_to_config_dict, table_to_features_dict):
"""Create a per feature list of combiners, ordered by table."""
combiners = []
for table in table_to_config_dict:
combiner = table_to_config_dict[table].combiner or 'sum'
combiners.extend([combiner] * len(table_to_features_dict[table]))
return combiners
def _create_table_to_features_and_num_features_dicts(feature_to_config_dict):
"""Create mapping from table to a list of its features."""
table_to_features_dict_tmp = {}
table_to_num_features_dict_tmp = {}
for feature, feature_config in six.iteritems(feature_to_config_dict):
if feature_config.table_id in table_to_features_dict_tmp:
table_to_features_dict_tmp[feature_config.table_id].append(feature)
else:
table_to_features_dict_tmp[feature_config.table_id] = [feature]
table_to_num_features_dict_tmp[feature_config.table_id] = 0
if feature_config.max_sequence_length == 0:
table_to_num_features_dict_tmp[feature_config.table_id] = (
table_to_num_features_dict_tmp[feature_config.table_id] + 1)
else:
table_to_num_features_dict_tmp[feature_config.table_id] = (
table_to_num_features_dict_tmp[feature_config.table_id] +
feature_config.max_sequence_length)
table_to_features_dict = collections.OrderedDict()
table_to_num_features_dict = collections.OrderedDict()
for table in sorted(table_to_features_dict_tmp):
table_to_features_dict[table] = sorted(table_to_features_dict_tmp[table])
table_to_num_features_dict[table] = table_to_num_features_dict_tmp[table]
return table_to_features_dict, table_to_num_features_dict
def _create_device_fn(hosts):
"""Create device_fn() to use with _create_partitioned_variables()."""
def device_fn(op):
"""Returns the `device` for `op`."""
part_match = re.match(r'.*/part_(\d+)(/|$)', op.name)
dummy_match = re.match(r'.*dummy_(\d+).*', op.name)
if not part_match and not dummy_match:
raise RuntimeError(
'Internal Error: Expected {} to contain /part_* or dummy_*'.format(
op.name))
if part_match:
idx = int(part_match.group(1))
else:
idx = int(dummy_match.group(1)) # pytype: disable=attribute-error
device = hosts[idx]
logging.debug('assigning {} to {}.', op, device)
return device
return device_fn
def _create_partitioned_variables(name,
num_hosts,
vocabulary_size,
embedding_dimension,
initializer,
collections=None): # pylint: disable=redefined-outer-name
"""Creates PartitionedVariables based on `num_hosts` for `table`."""
num_slices = min(vocabulary_size, num_hosts)
var_list = list(
variable_scope.get_variable(
name,
shape=(vocabulary_size, embedding_dimension),
partitioner=partitioned_variables.fixed_size_partitioner(num_slices),
dtype=dtypes.float32,
initializer=initializer,
collections=collections,
trainable=False))
if vocabulary_size >= num_hosts:
return var_list
# For padded part, define the dummy variable to be loaded into TPU system.
for idx in range(num_hosts - vocabulary_size):
var_list.append(
variable_scope.get_variable(
'dummy_{}_{}'.format(vocabulary_size + idx, name),
shape=(1, embedding_dimension),
dtype=dtypes.float32,
initializer=initializer,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
trainable=False))
return var_list
| [
2,
15069,
2864,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 2.526401 | 32,878 |
def generate_module_cmd(module, input_json, output_json):
"""Generates a command string to use for subprocess calling
Parameters
----------
module: str
The current module being run
input_json: str
The path of the input for the module
output_json: str
The path of the output for the module
Returns
-------
command_string: str
a string of the command string that will be used by the subprocess
"""
module_cmd = ["python", "-W", "ignore", "-m", module,
"--input_json", input_json,
"--output_json", output_json]
return module_cmd
| [
4299,
7716,
62,
21412,
62,
28758,
7,
21412,
11,
5128,
62,
17752,
11,
5072,
62,
17752,
2599,
198,
220,
220,
220,
37227,
8645,
689,
257,
3141,
4731,
284,
779,
329,
850,
14681,
4585,
628,
220,
220,
220,
40117,
198,
220,
220,
220,
24200... | 2.718615 | 231 |
#!/usr/bin/env python
import argparse, sys, os, gzip
from shutil import rmtree
from multiprocessing import cpu_count
from tempfile import mkdtemp, gettempdir
if __name__=="__main__":
#do our inputs
args = do_inputs()
main(args)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
1822,
29572,
11,
25064,
11,
28686,
11,
308,
13344,
198,
6738,
4423,
346,
1330,
374,
16762,
631,
198,
6738,
18540,
305,
919,
278,
1330,
42804,
62,
9127,
198,
6738,
20218,
7753,
1... | 2.831325 | 83 |
from django.test import TestCase
from company.models import Company, Employee
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
6738,
1664,
13,
27530,
1330,
5834,
11,
36824,
628
] | 4.210526 | 19 |
import sys
import json
from collections import OrderedDict
outf = open(snakemake.output[0],'w')
fusions = loadJSON(snakemake.input[0])
outputGeneTable(fusions, outf)
outf.close() | [
11748,
25064,
198,
11748,
33918,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
448,
69,
796,
1280,
7,
16184,
539,
15883,
13,
22915,
58,
15,
60,
4032,
86,
11537,
198,
69,
15880,
796,
3440,
40386,
7,
16184,
539,
15883,
13,
1... | 2.84127 | 63 |
"""Http transport class with CRUDX implementations of Transport methods
"""
import sys
import json
from requests import Session, Response
from openhltest_client.transport import Transport
from openhltest_client.mockserver import MockServer
from openhltest_client.base import Base
from openhltest_client.errors import *
| [
37811,
43481,
4839,
1398,
351,
8740,
8322,
55,
25504,
286,
19940,
5050,
201,
198,
37811,
201,
198,
11748,
25064,
201,
198,
11748,
33918,
201,
198,
6738,
7007,
1330,
23575,
11,
18261,
201,
198,
6738,
1280,
71,
2528,
395,
62,
16366,
13,
... | 3.626374 | 91 |
from source.exceptions.not_found import NotFoundException
from source.repositories.player_game import PlayerGameRepository
import source.commons.message as message
from source.utils.utils import remove_duplicated_data_from_array
| [
6738,
2723,
13,
1069,
11755,
13,
1662,
62,
9275,
1330,
1892,
21077,
16922,
198,
6738,
2723,
13,
260,
1930,
270,
1749,
13,
7829,
62,
6057,
1330,
7853,
8777,
6207,
13264,
198,
11748,
2723,
13,
9503,
684,
13,
20500,
355,
3275,
198,
6738,... | 3.833333 | 60 |
""" Ejercicio 1 """
def isSquare(num):
"""" Devuelve verdadero si el número es un cuadrado, caso contrario devuelve falso """
start = 1
end = num
while start <= end:
mid = int(start + (end - start) / 2)
square = mid * mid
if square == num:
return True
if square > num:
end = mid - 1
else:
start = mid + 1
return False
for i in range(26):
res = isSquare(i)
print(i, " es ", res) | [
37811,
412,
73,
2798,
46441,
352,
37227,
201,
198,
201,
198,
4299,
318,
48011,
7,
22510,
2599,
201,
198,
220,
13538,
15931,
6245,
2731,
303,
3326,
47984,
3529,
33721,
1288,
299,
21356,
647,
78,
1658,
555,
18912,
41909,
4533,
11,
6124,
... | 2.271357 | 199 |
# retired.py
def objectid2name(self, objid, **kwargs):
''' function takes in a protocol instance (self) and an objid. If the objid is not in the protocol instance, a False is returned.
if the objid is in the protocol it returns a list:
nodetype = True : step action or reagent
name = True: returns the name of the object.
location = True: returns a (step, action) location. If step, it returns a single int.
attributes = True: return list of all attribute names
units = True: returns a shorthand format for reagent units
parents = True: returns parents
full_data = True: adds (merges) all key: value pairs from the object to the outDict. object_data overrites
not completed:
siblings = True: returns all siblings
children = True: returns children
'''
default_setting = {}
default_setting['objectid'] = objid
default_setting['nodetype'] = 'None'
default_setting['name'] = 'None'
default_setting['location'] = []
default_setting['full_data'] = False
outDict = {}
# Merging the 2 dicts together, kwargs overites default settings:
if kwargs:
for k, v in itertools.chain(default_setting.iteritems(), kwargs.iteritems()):
outDict[k] = v
else:
for k, v in default_setting.iteritems():
outDict[k] = v
# make lists of all objectid's:
steps_by_id = [r['objectid'] for r in self.steps]
#[self.steps[r]['objectid'] for r in range(self.get_num_steps)]
# actions_by_id = self.get_action_tree('objectid')
actions_by_id = [i[2] for i in self.get_action_tree('objectid')]
reagents_by_id = [i[0] for i in self.get_reagent_data('objectid')]
# find what nodetype of objectid:
if objid in steps_by_id:
outDict['nodetype'] = 'step'
outDict['name'] = self.nodes[objid]['name']
outDict['location'] = [steps_by_id.index(objid)]
outDict['object_data'] = self.nodes[objid]
# outDict['slug'] =
if objid in actions_by_id:
outDict['nodetype'] = 'action'
outDict['name'] = self.nodes[objid]['name']
outDict['location'] = self.get_action_tree()[actions_by_id.index(objid)][:-1]
outDict['object_data'] = self.nodes[objid]
if objid in reagents_by_id:
outDict['nodetype'] = 'reagent'
outDict['name'] = self.nodes[objid]['name']
outDict['location'] = self.get_reagent_data('detail')[reagents_by_id.index(objid)][1:3]
s = self.get_reagents_by_action()
for k,v in s.items():
if objid in v:
reagent_order = s[k].index(objid)
outDict['location'].append(reagent_order)
outDict['object_data'] = self.nodes[objid]
if kwargs:
# Return general requensts:
if 'attributes' in kwargs and kwargs['attributes'] == True:
outDict['attributes'] = outDict['object_data'].keys()
if 'units' in kwargs and kwargs['units'] == True:
outDict['label'] = unify(outDict['object_data'])
if 'children' in kwargs and kwargs['children'] == True:
if outDict['nodetype'] == 'step':
outDict['children'] = [r['objectid'] for r in self.nodes[objid]['actions']]
if outDict['nodetype'] == 'action':
outDict['children'] = [r['objectid'] for r in self.nodes[objid][COMPONENT_KEY]]
if outDict['nodetype'] == 'reagent':
outDict['children'] = None
if 'parents' in kwargs and kwargs['parents'] == True:
tmp = self.get_objectid(outDict['location'][0], outDict['location'][1])
if outDict['nodetype'] =='step':
outDict['parents'] = 'protocol'
if outDict['nodetype'] == 'action':
outDict['parents'] = tmp[0]
if outDict['nodetype'] == 'reagent':
outDict['parents'] = tmp[1]
if 'full_data' in kwargs and kwargs['full_data']:
full_data = outDict.pop('object_data')
temp = {}
for k, v in itertools.chain(outDict.iteritems(), full_data.iteritems()):
temp[k] = v
outDict = temp
# Returm reagent handlers:
# destruct object_data unless specicied in options
if not outDict['full_data'] == True:
outDict.pop('object_data')
outDict.pop('full_data')
return outDict
def get_reagent_data(self, display=None):
# function takes the display argument and returns the (step, action) display of the reagent, i.e. verb, objectid, slug etc.
''' this combiones a find technique with a return technique:
find = self.data['components-location']
return = through the self.steps accessor and not theough an objid accessor.
'''
self.needed_reagents = []
if self.data['components-location'][0] > 0: # check if there are components in the protocol:
for l in self.data['components-location']: # iterate over all step,action locations where there are components
components_per_cur_list = len(self.steps[l[1]]['actions'][l[2]][COMPONENT_KEY])
for r in range(0,components_per_cur_list):
reagent_name = self.steps[l[1]]['actions'][l[2]][COMPONENT_KEY][r]['name']
objectid = self.steps[l[1]]['actions'][l[2]][COMPONENT_KEY][r]['objectid']
cur_reagent_name = []
cur_reagent_name.append(reagent_name)
if 'total volume' in reagent_name.lower():
continue
if display == 'detail':
cur_reagent_name.append(l[1])
cur_reagent_name.append(l[2])
if display == 'all':
tmp = []
tmp.append(l[1])
tmp.append(l[2])
tmp.append(self.steps[l[1]]['actions'][l[2]]['verb'])
cur_reagent_name.append(tmp)
if display =='name_objectid':
cur_reagent_name = (reagent_name, objectid)
if display == 'objectid':
actionid = self.get_objectid(l[1], l[2])
cur_reagent_name = (objectid, actionid[1])
self.needed_reagents.append(cur_reagent_name)
return self.needed_reagents
def get_reagents_by_action(self, out_label='objectid'):
''' this combiones a find technique with a return technique:
find = self.data['components-location']
return = through the self.steps accessor and not theough an objid accessor.
'''
self.verb_reagents = {}
for l in self.data['components-location']: # iterate over all step,action locations where there are components
components_per_cur_list = len(self.steps[l[1]]['actions'][l[2]][COMPONENT_KEY]) # iterate over reagents
verb = self.steps[l[1]]['actions'][l[2]]['verb']
verbid = self.steps[l[1]]['actions'][l[2]]['objectid']
if out_label == 'literal':
self.verb_reagents[verbid]=[]
if out_label == 'objectid':
self.verb_reagents[verbid]=[]
for r in range(0,components_per_cur_list):
reagent_name = self.steps[l[1]]['actions'][l[2]][COMPONENT_KEY][r]['name']
if 'total volume' in reagent_name.lower():
continue
objectid = self.steps[l[1]]['actions'][l[2]][COMPONENT_KEY][r]['objectid']
if out_label == 'literal':
self.verb_reagents[verbid].append(reagent_name)
if out_label == 'objectid':
self.verb_reagents[verbid].append(objectid)
return self.verb_reagents | [
2,
9880,
13,
9078,
198,
198,
4299,
2134,
312,
17,
3672,
7,
944,
11,
26181,
312,
11,
12429,
46265,
22046,
2599,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
220,
220,
220,
705,
7061,
2163,
2753,
287,
257,
8435,
4554,
357,
944,... | 1.957704 | 4,303 |
# Copyright 2009 Wayne See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import server
import appuifw
import e32
import chm_filebrowser
import os
import e32dbm
CONF_FILE = u"E:\\Data\\chompy\\chompy.cfg"
INIT_FILE = u"E:\\Data\\chompy\\online.html"
LOCAL_FILE = u"E:\\Data\\chompy\\offline.html"
SEPARATOR = u"/"
INIT_HTML = u"""<html>
<body>
<script type="text/javascript">
location.replace("http://localhost:""" + unicode(server.PORT) + """/%s")
</script>
</body>
</html>
"""
ERROR_TEMPLATE = """<html>
<body>
%s
</body>
</html>
"""
ERR_READING = u"CHM File cannot be read"
ERR_NO_HHC = u"CHM File contains no HHC file"
if not os.path.exists("E:\\Data\\chompy"):
os.makedirs("E:\\Data\\chompy")
if __name__ == '__main__':
Chompy().show() | [
2,
15069,
3717,
13329,
4091,
201,
198,
2,
201,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
201,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
201,
19... | 2.629779 | 497 |
import random
import itertools
| [
11748,
4738,
198,
11748,
340,
861,
10141,
628
] | 4 | 8 |
from flask_restx import Api
from flask import Blueprint
from .main.controller.fileController import api as file_ns
from .main.controller.deepLearningController import api as dl_ns
from .main.controller.user_controller import api as user_ns
from .main.controller.auth_controller import api as auth_ns
from .main.controller.view_controller import api as view_ns
# -> 각 controller에서 만든 api 함수를 가져와서 아래 blueprint에 등록해서 사용가능하도록
blueprint = Blueprint('api', __name__)
api = Api(blueprint, title='FLASK RESTPLUS(RESTX) API BOILER-PLATE WITH JWT',
version='1.0', description='파이썬(flask) API 서버 입니다.')
api.add_namespace(file_ns, path='/file')
api.add_namespace(dl_ns, path='/dl')
api.add_namespace(user_ns, path='/user')
api.add_namespace(auth_ns)
api.add_namespace(view_ns)
| [
6738,
42903,
62,
2118,
87,
1330,
5949,
72,
198,
6738,
42903,
1330,
39932,
198,
198,
6738,
764,
12417,
13,
36500,
13,
7753,
22130,
1330,
40391,
355,
2393,
62,
5907,
198,
6738,
764,
12417,
13,
36500,
13,
22089,
41730,
22130,
1330,
40391,
... | 2.247839 | 347 |
# Author: Gustavo Martin Morcuende
#
# Copyright 2020 Gustavo Martin Morcuende
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql.types import StringType, StructField, StructType
| [
2,
6434,
25,
43715,
78,
5780,
3461,
27399,
38396,
198,
198,
2,
198,
2,
15069,
12131,
43715,
78,
5780,
3461,
27399,
38396,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
... | 3.81768 | 181 |
import datetime
import calendar
# name
name = raw_input("enter your name: ")
# date
date_string = datetime.datetime.now().strftime("%a %m/%d/%Y %H:%M:%S")
print('Hello, ' + str(name) + '!' + ' Today is ' + date_string)
| [
11748,
4818,
8079,
198,
11748,
11845,
198,
198,
2,
1438,
198,
3672,
796,
8246,
62,
15414,
7203,
9255,
534,
1438,
25,
366,
8,
198,
198,
2,
3128,
198,
4475,
62,
8841,
796,
4818,
8079,
13,
19608,
8079,
13,
2197,
22446,
2536,
31387,
720... | 2.534091 | 88 |
from flask_example.app import create_app
__all__ = ["create_app"]
| [
6738,
42903,
62,
20688,
13,
1324,
1330,
2251,
62,
1324,
628,
198,
834,
439,
834,
796,
14631,
17953,
62,
1324,
8973,
198
] | 3.090909 | 22 |
--- serial/tools/list_ports_posix.py.orig 2017-03-16 22:59:33 UTC
+++ serial/tools/list_ports_posix.py
@@ -47,7 +47,7 @@ elif plat[:7] == 'openbsd': # OpenBSD
devices.extend(list_ports_common.list_links(devices))
return [list_ports_common.ListPortInfo(d) for d in devices]
-elif plat[:3] == 'bsd' or plat[:7] == 'freebsd':
+elif plat[:3] == 'bsd' or plat[:7] == 'freebsd' or plat[:9] == 'dragonfly':
| [
6329,
11389,
14,
31391,
14,
4868,
62,
3742,
62,
1930,
844,
13,
9078,
13,
11612,
197,
5539,
12,
3070,
12,
1433,
2534,
25,
3270,
25,
2091,
18119,
198,
45340,
11389,
14,
31391,
14,
4868,
62,
3742,
62,
1930,
844,
13,
9078,
198,
12404,
... | 2.189744 | 195 |
from django.db import models
#------------------------------------------------------------------------------
class Product(models.Model):
""" One record per product. All product details held here. """
title = models.CharField(max_length=200)
price = models.FloatField()
discount_price = models.FloatField()
category = models.CharField(max_length=200)
description = models.TextField()
image = models.CharField(max_length=300)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class Shopper(models.Model):
""" Holds details of each registered user of the site. """
name = models.CharField(max_length=50)
email = models.CharField(max_length=100)
address = models.CharField(max_length=100)
district = models.CharField(max_length=100)
city = models.CharField(max_length=50)
county = models.CharField(max_length=50)
postcode = models.CharField(max_length=10)
userid = models.IntegerField(default=1)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class Order(models.Model):
""" A header record for an order - just needs an id and a link to the
Shopper record. """
orderdate = models.DateTimeField(auto_now=True)
userid = models.ForeignKey(Shopper, blank=True, null=True, on_delete=models.CASCADE)
#------------------------------------------------------------------------------
class Item(models.Model):
""" Main order item record, which links to the product record and the
order record. The order_ref field is used to link together all order
items for the same order. """
item = models.ForeignKey(Product, blank=True, null=True, on_delete=models.CASCADE)
quantity = models.IntegerField()
cost = models.FloatField()
order = models.ForeignKey(Order, blank=True, null=True, on_delete=models.CASCADE)
#------------------------------------------------------------------------------
class OrderRef(models.Model):
""" The order ref is incremented for each order. Just a single record is
stored on the database. """
order_ref = models.IntegerField()
#------------------------------------------------------------------------------
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
2,
10097,
26171,
198,
4871,
8721,
7,
27530,
13,
17633,
2599,
198,
220,
220,
220,
37227,
1881,
1700,
583,
1720,
13,
1439,
1720,
3307,
2714,
994,
13,
37227,
628,
220,
220,
220,
3670,
... | 3.853226 | 620 |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
# In[2]:
joint_hier = [
('head', 'neck', 'blue'),
('neck', 'root', 'darkred'),
('root', 'clavicle', 'brown'),
('neck', 'leftShoulder', 'red'),
('leftShoulder', 'leftElbow', 'darkred'),
('leftElbow', 'leftWrist', 'orange'),
('neck', 'rightShoulder', 'orange'),
('rightShoulder', 'rightElbow', 'lightgreen'),
('rightElbow', 'rightWrist', 'green'),
('clavicle', 'leftHip', 'green'),
('leftHip', 'leftKnee', 'lightgreen'),
('leftKnee', 'leftAnkle', 'lightblue'),
('clavicle', 'rightHip', 'lightblue'),
('rightHip', 'rightKnee', 'cyan'),
('rightKnee', 'rightAnkle', 'blue')
]
# In[3]:
# In[4]:
# In[ ]:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
16,
5974,
628,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
26... | 2.301676 | 358 |
""" nftfw pattern_reader
Used by the blacklist operation
Patterns are text files specifying rules to be applied to log files.
Main idea is to create a data structure indexed by filename of log
file, which has list of regexes to be applied to each line and other
information to process the pattern generating firewall entries.
Returns
-------
Dict[file : List[Dict]]
file : str
Main index is file name of log file to scan
The value is a list of dicts:
pattern : str
pattern name for reference
ports : str
comma separated ports to match (may be 'all')
file : str
logfile to scan
regex : List[compiled regex]
List of regexes to scan each line of the log file with
"""
import os
import re
from pathlib import Path
from collections import defaultdict
import logging
log = logging.getLogger('nftfw')
def pattern_reader(cf):
"""Read pattern files
Files have commands:
file = logfile path
Symbiosis doesn't include wildcards in paths, adding
this allows /srv log files to be processed by a single
set of patterns.
ports = port list.
Comma separated list of numeric ports to be denied if this match
succeeds.
Can be 'all' to mean block all ports.
Can be the word 'update' to allow filewall database incident
update only from the IP.
Can be the word 'test' to indicate that this is a test
pattern and will only be used when requested by
the -p argument to the program, otherwise the
pattern file will be ignored
The file has comments - # followed by text.
Finally there's a list of regex patterns including
__IP__ which are used to find an ip address in the line.
"""
path = cf.etcpath('patterns')
files = (f for f in path.glob('*.patterns') if f.is_file())
patterns = ((f.stem, f.read_text()) for f in files)
recordlist = (parsefile(cf, f, c) for f, c in patterns)
# remove empty values
recordlist = (l for l in recordlist if l)
return filelist(recordlist)
def parsefile(cf, filename, contents):
"""Parse a single pattern file into record value
Ignore any files with ports=test unless we have a
cf.selected_pattern_file when we ignore all but the
selected_pattern_file pattern
if we have a cf.selected_pattern_file and the file
contains ports=test set global flag
selected_pattern_is_test
so we can take special action when reading files
Parameters
----------
filename : str
Source file name
contents : str
File contents
Returns
-------
Dict[file : List[Dict]]
file : str
Main index is file name of log file to scan
The value is a list of dicts:
pattern : str
pattern name for reference
ports : str
comma separated ports to match (may be 'all')
numeric values are checked here, but multiple
values and order is checked and normalised
in blacklist.py
file : str
logfile to scan
regex : List[compiled regex]
List of regexes to scan each line of the log file with
"""
# pylint: disable=too-many-return-statements
# should always have contents
if not any(contents):
return None
(file, ports, regex) = _pattern_scan(contents, filename)
# validate statements
# check for file=
if not file:
log.error('Pattern: missing file = statement in %s', filename)
return None
if not hasattr(cf, 'TESTING') \
and file[0] != '/':
log.error('Pattern: use full path to file in %s', filename)
return None
# validate port
if ports is None:
ports = 'all'
else:
pchk = re.compile(r'(all|update|test|(?:\d+(?:\s*,\s*\d+)*))$')
if not pchk.match(ports):
err = 'ports= must be all, test, ' \
+ 'update or comma separated numeric list'
log.error('Pattern: %s %s', filename, err)
return None
# check for selected pattern
if cf.selected_pattern_file is not None:
# if we have a selected_pattern_file, and it's not this file,
# then ignore it
# if it is and ports is test set global value this allows single
# patterns to be run normally
if filename != cf.selected_pattern_file:
return None
if ports == 'test':
cf.selected_pattern_is_test = True
else:
# no selected_pattern_file
# if the ports value is test ignore it
if ports == 'test':
return None
# if we have no regexes - then return None
if not any(regex):
log.error('Pattern: %s - no test expressions, file ignored', filename)
return None
# generate output
res = {'pattern':filename,
'ports':ports,
'file':file,
'regex':regex
}
return res
def _pattern_scan(contents, filename):
"""Scan a file
Parameters
----------
contents : str
File contents
filename : str
Filename for error messages
Returns
-------
tuple
file : str
file name
ports : str
comma separated ports list
regex : List[]
List of compiled regexes
"""
# pylint: disable=too-many-branches
# re used to pick out value setting lines
commandre = re.compile(r'^([a-z]*)\s*=\s*(.*)(:?#.*)?')
ports = None
file = None
regex = []
lineno = 0
# scan file
lines = (l.strip() for l in contents.split('\n'))
for line in lines:
lineno += 1
# ignore comments
# don't want to do global #
# checking in case re's contain #
if line == '' \
or line[0] == '#':
continue
# look for lines that look like rules
# accept file= and ports=
cm = commandre.match(line)
if cm:
if cm.group(1) == 'file':
if not file:
file = cm.group(2)
else:
log.info('Pattern: Repeated file statement in %s:%s %s',
filename, lineno, line)
elif cm.group(1) == 'ports':
if not ports:
ports = cm.group(2).strip()
else:
log.info('Repeated ports statement in %s:%s %s',
filename, lineno, line)
else:
log.error('Pattern: unknown command in %s:%s %s',
filename, lineno, line)
continue
# remaining lines are regexes
# replace __IP__ by address match
# add compiled re to regex list
# re stolen from symbiosis
# make this a little more robust
# the line must contain __IP__
if '__IP__' in line:
linere = line.replace(r'__IP__', r'(?:::ffff:)?([0-9a-fA-F:\.]+(?:/[0-9]+)?)', 1)
try:
cm = re.compile(linere, re.IGNORECASE)
if cm.groups != 1:
fmt = '%s, Line %s ignored ' \
+ ' - extra regex match groups found - use \\ before ( and )'
log.error(fmt, filename, lineno)
else:
regex.append(cm)
except re.error:
log.error('Pattern: invalid regex in %s: Line %s - line ignored',
filename, lineno)
else:
log.error('Pattern: Unknown line in %s: Line %s - line ignored',
filename, lineno)
return (file, ports, regex)
def filelist(recordlist):
"""Create dict From the raw data obtained from files
recordlist is an array of information from parsed files
Output a dict indexed by the files to scan, where the contents of each
entry is an array of the records from record list
If the files don't exist, they will be omitted from the final list.
Expanding on symbiosis, we'll do a glob on the files so HTML logs in
the srv tree can be scanned by a single ruleset. Doing this uses
pathlib to expand the glob.
Files that don't exist are removed from the set of actions so it's
possible to have no actions.
"""
action = defaultdict(list)
for record in recordlist:
if not record['file']:
continue
# file=value
fname = record['file']
# glob glob characters
globchars = set('*[?')
if any((c in globchars) for c in fname):
pathlist = []
# for a glob there may be symlinks which will
# generate several paths to the same file
# build up a list using
# realpath to arrive at the canonical path
# samefile to eliminate duplicates
root = Path('/')
# remove initial / from glob lookup
glstr = fname if fname[0] != '/' else fname[1:]
for file in root.glob(glstr):
f = Path(os.path.realpath(str(file)))
if not any(pathlist):
pathlist.append(f)
elif not any(l for l in pathlist \
if l.samefile(f)):
pathlist.append(f)
else:
pathlist = [Path(fname)]
files = (str(f) for f in pathlist \
if f.is_file() and f.stat().st_size > 0)
for file in files:
action[file].append(record)
return action
| [
37811,
299,
701,
44482,
3912,
62,
46862,
198,
198,
38052,
416,
262,
38810,
4905,
198,
198,
47546,
82,
389,
2420,
3696,
31577,
3173,
284,
307,
5625,
284,
2604,
3696,
13,
198,
198,
13383,
2126,
318,
284,
2251,
257,
1366,
4645,
41497,
41... | 2.351705 | 4,046 |
#!/usr/bin/env python3
# https://stackoverflow.com/questions/34461987/python3-importerror-no-module-named-xxxx
from .config_src import *
from .yaml2json import load_yaml
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
507,
14,
18,
27260,
27301,
14,
29412,
18,
12,
320,
1819,
14007,
12,
3919,
12,
21412,
12,
13190,
12,
12343,
198,
6738,
... | 2.758065 | 62 |
""" Solução
A solução se baseia em achar a posição dos caracteres '+' e '=', e então a partir destas posições, achar os valores relacionados às incógnitas R, L e J. E após descobrir qual dos 3 é a incógnita da equação e tendo os outros 2 valores, resolvemos de forma simples e trivial o problema.
"""
# Loop para os casos de teste, até o EOF
while True:
try:
# Entrada da equação
str = input()
# Achar a posição dos caracteres '+' e '='
pos_plus = str.find('+')
pos_equal = str.find('=')
# Separa a equação em R, L e J, de acordo com a posição dos caracteres '+' e '='
# Tudo atrás do '+' será a variável R, atrás do '=' será o L e após isso o J
R = str[0:pos_plus]
L = str[pos_plus+1:pos_equal]
J = str[pos_equal+1:]
# Verificações de qual parte da equação está indefinida, portante é a incógnita da equação
# E para cada incógnita, é achado o valor dela de maneira diferente, em relação aos outros 2 valores
if R == 'R':
# R = J - L
print(int(J) - int(L))
elif L == 'L':
# L = J - R
print(int(J) - int(R))
elif J == 'J':
# J = R + L
print(int(R) + int(L))
# Capta o EOF e finaliza os casos de teste
except EOFError:
break | [
37811,
4294,
84,
16175,
28749,
198,
32,
1540,
84,
16175,
28749,
384,
2779,
544,
795,
257,
10641,
257,
1426,
72,
16175,
28749,
23430,
1097,
529,
68,
411,
705,
10,
6,
304,
705,
28,
3256,
304,
920,
28749,
257,
636,
343,
2244,
292,
1426... | 1.917355 | 726 |
import collections
import difflib
from functools import lru_cache
import tvdb_api
SearchResult = collections.namedtuple("SearchResult", "show_title id search_difference")
class TVDB:
"""Handles all TVDB queries for the module."""
def get_ep_tvdb_info(self, file_info: dict) -> "obj, obj":
"""Returns the series and episode info as two objects."""
year = file_info["year"] if "year" in file_info else " "
tvdb_series_gen = self._best_match_series(file_info["title"], year)
while 1:
try:
tvdb_series = next(tvdb_series_gen)
except StopIteration:
# log: no non-garbage series data found matching x
break
except:
raise
else:
try:
tvdb_episode = tvdb_series[file_info["season"]][
file_info["episode"]
]
except:
# log: trying next series in list
continue
else:
return tvdb_series, tvdb_episode
return None, None
@lru_cache(maxsize=128)
def search_series_name(self, show_title: str, year: str = "") -> list:
"""Returns re-sorted list of series that match the query items.
Items are sorted by closeness to search query.
"""
search_term = show_title + year
try:
search_res = self.t.search(search_term)
except:
search_term = show_title
try:
search_res = self.t.search(show_title)
except:
search_res = []
finally:
return _sorted_by_diff(search_res, search_term)
| [
11748,
17268,
198,
11748,
814,
8019,
198,
6738,
1257,
310,
10141,
1330,
300,
622,
62,
23870,
198,
11748,
31557,
9945,
62,
15042,
628,
198,
18243,
23004,
796,
17268,
13,
13190,
83,
29291,
7203,
18243,
23004,
1600,
366,
12860,
62,
7839,
4... | 2.020833 | 864 |
import socket, os
from cryptography.fernet import Fernet
if __name__ == "__main__":
custom_config = input("custom config? (s/n) >> ")[0] == "s"
client = Client() if custom_config == False else Client(input("enter server hostname >> "), input("enter server port >> "))
client.start() | [
11748,
17802,
11,
28686,
198,
6738,
45898,
13,
69,
1142,
316,
1330,
38982,
316,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
2183,
62,
11250,
796,
5128,
7203,
23144,
4566,
30,
357,
82,
14,
77,
8,
9609... | 3.247191 | 89 |
#!/usr/bin/env python
#
# aria2 - The high speed download utility
#
# Copyright (C) 2013 Tatsuhiro Tsujikawa
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the
# OpenSSL library under certain conditions as described in each
# individual source file, and distribute linked combinations
# including the two.
# You must obey the GNU General Public License in all respects
# for all of the code used other than OpenSSL. If you modify
# file(s) with this exception, you may extend this exception to your
# version of the file(s), but you are not obligated to do so. If you
# do not wish to do so, delete this exception statement from your
# version. If you delete this exception statement from all source
# files in the program, then also delete it here.
#
# Generates API reference from C++ source code.
import re, sys, argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generate API reference")
parser.add_argument('--header', type=argparse.FileType('rb', 0),
help='header inserted at the top of the page')
parser.add_argument('files', nargs='+', type=argparse.FileType('rb', 0),
help='source file')
args = parser.parse_args()
if args.header:
print args.header.read()
for infile in args.files:
make_api_ref(args.files)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
257,
7496,
17,
532,
383,
1029,
2866,
4321,
10361,
198,
2,
198,
2,
15069,
357,
34,
8,
2211,
309,
19231,
49907,
23459,
73,
40398,
198,
2,
198,
2,
770,
1430,
318,
1479,
3... | 3.437803 | 619 |
#! /usr/bin/env python
# Copyright (c) 2017 Martin Rosellen
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import argparse
__author__ = 'Martin Rosellen'
__docformat__ = "restructuredtext en"
if __name__ == "__main__":
main(sys.argv) | [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
357,
66,
8,
2177,
5780,
8049,
297,
268,
198,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
286,
428,
3788,
290,
... | 3.717262 | 336 |
# run once to create table
# CREATE table
import sqlite3
conn = sqlite3.connect('jobslist.db')
cursor = conn.cursor()
sql="""CREATE TABLE jobslist (
date_pulled DATE,
job_post TEXT,
description TEXT,
link TEXT
)"""
cursor.execute(sql)
cursor.commit()
cursor.close()
| [
2,
1057,
1752,
284,
2251,
3084,
198,
198,
2,
29244,
6158,
3084,
198,
11748,
44161,
578,
18,
198,
198,
37043,
796,
44161,
578,
18,
13,
8443,
10786,
43863,
4868,
13,
9945,
11537,
198,
198,
66,
21471,
796,
48260,
13,
66,
21471,
3419,
1... | 2.396825 | 126 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
if __name__ == '__main__':
from dummy import *
audit(assign('wizbank', 'http://60.247.86.31/')[1])
audit(assign('wizbank', 'http://demo.cyberwisdom.net.cn/')[1]) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
220,
220,
220,
220,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,... | 2.035088 | 114 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for export converters."""
from absl import app
from absl.testing import absltest
from grr_response_core.lib.rdfvalues import osquery as rdf_osquery
from grr_response_server.export_converters import base
from grr_response_server.export_converters import osquery
from grr.test_lib import test_lib
if __name__ == "__main__":
app.run(main)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
51,
3558,
329,
10784,
6718,
1010,
526,
15931,
198,
198,
6738,
2352,
75,
1330,
598,
198,
6738,
2352,
75,
13,
33... | 2.823944 | 142 |
import os
import sys
import pytest
import pickle
import numpy as np
import datetime as dt
from attrdict import AttrDict
from .pytest_utils import fp, assert_objects_equal, call_with_legacy_params
from SEIRcity import model, utils
HERE = os.path.dirname(os.path.abspath(__file__))
def test_model_can_import():
"""Can import the two functions in SEIR_main_publish
"""
assert hasattr(model, "SEIR_model_publish_w_risk")
assert hasattr(model, "compute_R0")
@pytest.mark.skip
@pytest.mark.parametrize("legacy_pickle", [
# Call from SEIR_main_publish.main on commit on commit eca0e3b4222476b87d45
fp("tests/data/SEIR_model_publish_w_risk_result0.pckl")
])
def test_model_publish_w_risk_legacy(legacy_pickle, tmp_path):
"""Compare results of legacy run of model.SEIR_model_publish_w_risk
with results from a new run. Args, kwargs, and results were written
to file path `legacy_pickle` by dev_utils.result_to_pickle. Can the
same function yield exactly the same outputs as the legacy run using
exactly the same inputs. Equality is determined by
pytest_utils.assert_objects_equal.
"""
assert os.path.isfile(legacy_pickle)
legacy_result, new_result = call_with_legacy_params(
legacy_pickle=legacy_pickle, func=model.SEIR_model_publish_w_risk)
assert isinstance(legacy_result, tuple)
assert isinstance(new_result, tuple)
assert_objects_equal(legacy_result, new_result, verbose=False)
def test_dt_to_dt64arr_and_back():
"""Can convert from dt64 to dt and back.
"""
# we use the legacy_reopen datetime from below test as fixture
as_dt = dt.datetime(2020, 4, 2)
print('as_dt: ', as_dt)
as_dt64 = utils.dt_to_dt64(as_dt)
print('as_dt64: ', as_dt64)
back_again = utils.dt64_to_dt(as_dt64)
assert back_again == as_dt
@pytest.mark.skip
@pytest.mark.parametrize("legacy_pickle", [
# Call from SEIR_main_publish.main on commit on commit eca0e3b4222476b87d45
fp("tests/data/SEIR_model_publish_w_risk_result0.pckl")
])
def test_model_can_return_dt64_array(legacy_pickle, tmp_path):
"""If the format of school event times returned by model is changed
to an ndarray with dtype datetime64, can we recapitulate the old
datetime.datetime return format?
"""
# run model with same kwargs
assert os.path.isfile(legacy_pickle)
legacy_result, new_result = call_with_legacy_params(
legacy_pickle=legacy_pickle,
func=model.SEIR_model_publish_w_risk)
legacy_reopen = legacy_result[-1]
legacy_close = legacy_result[-2]
print('legacy_reopen: ', legacy_reopen)
print('legacy_close: ', legacy_close)
new_reopen_arr = new_result[-1]
new_close_arr = new_result[-2]
assert isinstance(new_reopen_arr, np.ndarray)
assert isinstance(new_close_arr, np.ndarray)
assert new_reopen_arr.dtype == 'float64'
assert new_close_arr.dtype == 'float64'
new_reopen_dt = utils.bool_arr_to_dt(new_reopen_arr)
new_close_dt = utils.bool_arr_to_dt(new_close_arr)
assert new_reopen_dt == legacy_reopen
assert new_close_dt == legacy_close
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
12972,
9288,
198,
11748,
2298,
293,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4818,
8079,
355,
288,
83,
198,
6738,
708,
4372,
713,
1330,
3460,
81,
35,
713,
198,
6738,
764,
9078,
9288,
... | 2.512924 | 1,238 |
from flask_wtf import FlaskForm
| [
6738,
42903,
62,
86,
27110,
1330,
46947,
8479,
628
] | 3.666667 | 9 |
# Summarize text passages and extract keywords
from summa.keywords import keywords
from summa.summarizer import summarize
import re
| [
2,
5060,
3876,
1096,
2420,
22674,
290,
7925,
26286,
198,
198,
6738,
2160,
2611,
13,
2539,
10879,
1330,
26286,
198,
6738,
2160,
2611,
13,
16345,
3876,
7509,
1330,
35743,
198,
11748,
302,
198
] | 4.030303 | 33 |
#coding:utf8
"""
author: yqq
date: 2019-05-11 20:26
descriptions: USDP处理
"""
import logging
import json
from base_handler import BaseHandler
from utils import decimal_default,get_linenumber
from .proxy import USDPProxy
from constants import USDP_IP_ADDR, USDP_RPC_PORT
import sql
from decimal import Decimal
g_IP, g_PORT = USDP_IP_ADDR, USDP_RPC_PORT
#1.交易数据完全相同的两笔交易同时广播, 第一笔成功, 第二笔广播失败, HTTP-500 ,
# {"error":"broadcast_tx_sync: Response error: RPC error -32603 - Internal error: Tx already exists in cache"}
#
#2.交易from, to, amount, sequence相同, memo不同 的两笔交易同时广播, 第一笔广播成功, 第二笔失败, HTTP200
# {"height":"0","txhash":"DFBCB4155DEBAD0DDFA61FA648BC065B1B70267126309D514B43C5DD22301EDD",
# "code":4,"raw_log":"{\"codespace\":\"sdk\",\"code\":4,
# \"message\":\"signature verification failed; verify correct account sequence and chain-id\"}"}
#
#3.如果转账金额与账户余额一样, 同样广播成功, 但是是无效交易
# {"height":"0","txhash":"C2915385AE548F3F5680C12D3679D6C2BFA7A5C04FEAA2DBB2BDD12C40293EC5"}
#
#4.广播带上 orderId 防止交易重发
# 如果上一笔广播失败,想要重新发起一笔新的交易,是否直接返回数据库存的txid?
#
#5.如果通过 /transaction/txid 这个接口查询交易, 如果txid不存在会返回 HTTP500
#
#6.如果sequence不对, 广播失败 , HTTP500
#{"height":"0","txhash":"150AAD45FF1637EE52A07A0961DBD6FC8F1A71017E458E743C27F6865DDC86F9",
# "code":4,"raw_log":"{\"codespace\":\"sdk\",\"code\":4,\"message\":\"signature verification failed;
# verify correct account sequence and chain-id\"}"}
#
#7./transaction/txid 接口返回的是上链的交易数据[成功, 失败(金额不足) ]
#
#8.关于哪些交易能上链
# 8.0 完全正确的交易数据 (能成功的交易)
# 8.1 from, to, fee , sequence, signature 都正确 amount不足
# 例如: 406e9608bca45d67847a9e3f237f36818346daefc93ba0f300ed6b1b6ae8795c
#
#2019-05-11 yqq
#获取用户充币信息的接口, 直接从数据库中获取交易数据
#@staticmethod
#归集查询接口
| [
2,
66,
7656,
25,
40477,
23,
198,
37811,
198,
9800,
25,
331,
38227,
198,
4475,
25,
13130,
12,
2713,
12,
1157,
1160,
25,
2075,
198,
20147,
1968,
507,
25,
1294,
6322,
13783,
226,
49426,
228,
198,
37811,
198,
11748,
18931,
198,
11748,
3... | 1.484414 | 1,187 |
import tweepy
__author__ = 'pvandepavoordt'
| [
11748,
4184,
538,
88,
198,
198,
834,
9800,
834,
796,
705,
79,
85,
392,
538,
615,
78,
585,
83,
6,
198
] | 2.142857 | 21 |
import visualization.panda.world as wd
import modeling.geometric_model as gm
import modeling.collision_model as cm
import grasping.planning.antipodal as gpa
import math
import numpy as np
import basis.robot_math as rm
import robot_sim.robots.cobotta.cobotta as cbt
import manipulation.pick_place_planner as ppp
import motion.probabilistic.rrt_connect as rrtc
base = wd.World(cam_pos=[1.2, .7, 1], lookat_pos=[.0, 0, .15])
gm.gen_frame().attach_to(base)
# ground
ground = cm.gen_box(extent=[5, 5, 1], rgba=[.7, .7, .7, .3])
ground.set_pos(np.array([0, 0, -.51]))
ground.attach_to(base)
robot_s = cbt.Cobotta()
robot_s.gen_meshmodel(toggle_tcpcs=True).attach_to(base)
seed_jnt_values = None
for z in np.linspace(.1, .6, 5):
goal_pos = np.array([.25, -.1, z])
goal_rot = rm.rotmat_from_axangle(np.array([0, 1, 0]), math.pi * 1 / 2)
gm.gen_frame(goal_pos, goal_rot).attach_to(base)
jnt_values = robot_s.ik(tgt_pos=goal_pos, tgt_rotmat=goal_rot, seed_jnt_values=seed_jnt_values)
print(jnt_values)
if jnt_values is not None:
robot_s.fk(jnt_values=jnt_values)
seed_jnt_values = jnt_values
robot_s.gen_meshmodel(toggle_tcpcs=True).attach_to(base)
base.run()
| [
11748,
32704,
13,
79,
5282,
13,
6894,
355,
266,
67,
198,
11748,
21128,
13,
469,
16996,
62,
19849,
355,
308,
76,
198,
11748,
21128,
13,
26000,
1166,
62,
19849,
355,
12067,
198,
11748,
44787,
13,
11578,
768,
13,
415,
541,
375,
282,
35... | 2.284351 | 524 |
from pathlib import Path
import typing as t
import sqlalchemy
import psycopg2.extensions as pg2ext
def truncate_table(db_engine: sqlalchemy.engine.Engine, table: str, schema: str = 'public', cascade: bool = False) -> None:
"""Truncate given table"""
db_engine.execute(f"TRUNCATE TABLE {schema + '.' + table} {'CASCADE' if cascade else ''}")
def export_to_csv(db_engine: sqlalchemy.engine.Engine, table_or_view: str, output_file: t.Union[Path, str], schema: str = 'public') -> None:
"""Export table/view to a csv file"""
output_file = Path(output_file).resolve()
con: pg2ext.connection = db_engine.raw_connection()
cur: pg2ext.cursor = con.cursor()
query = f"COPY {schema + '.' + table_or_view} TO STDOUT WITH (FORMAT CSV, HEADER, DELIMITER ',')"
with output_file.open(mode="wb") as fd:
cur.copy_expert(sql=query, file=fd)
def import_from_csv(db_engine: sqlalchemy.engine.Engine, input_file: t.Union[Path, str], table: str, schema: str = 'public') -> None:
"""Import csv file into given table"""
input_file = Path(input_file).resolve()
con: pg2ext.connection = db_engine.raw_connection()
cur: pg2ext.cursor = con.cursor()
query = f"COPY {schema + '.' + table} FROM STDIN WITH (FORMAT CSV, HEADER, DELIMITER ',')"
with input_file.open(mode="rb") as fd:
cur.copy_expert(sql=query, file=fd)
con.commit()
def check_if_table_or_view_exists(db_engine: sqlalchemy.engine.Engine, table_or_view: str, schema: str = 'public') -> bool:
"""Check if table or view exists in the db
:param db_engine: PostgreSQL db engine
:param table_or_view: table_or_view to read from
:param schema: table_or_view schema
"""
query = f"""
SELECT 1 FROM INFORMATION_SCHEMA.tables WHERE table_schema = '{schema}' AND table_name = '{table_or_view}'
UNION ALL
SELECT 1 FROM INFORMATION_SCHEMA.views WHERE table_schema = '{schema}' AND table_name = '{table_or_view}';
"""
r = db_engine.execute(query)
if r.fetchall():
return True
else:
return False | [
6738,
3108,
8019,
1330,
10644,
198,
11748,
19720,
355,
256,
198,
11748,
44161,
282,
26599,
198,
11748,
17331,
22163,
70,
17,
13,
2302,
5736,
355,
23241,
17,
2302,
628,
198,
4299,
40122,
378,
62,
11487,
7,
9945,
62,
18392,
25,
44161,
2... | 2.54733 | 824 |
import sys
import os
import time
from src import mnlp, ca, gtps, dspl
| [
11748,
25064,
201,
198,
11748,
28686,
201,
198,
11748,
640,
201,
198,
6738,
12351,
1330,
285,
21283,
79,
11,
1275,
11,
308,
83,
862,
11,
288,
22018,
201,
198,
201,
198
] | 2.451613 | 31 |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 20:58:36 2019
@author: Sneha
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 13:26:27 2019
@author: Sneha
"""
import tkinter as tk
from tkinter import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import numpy as np
import matplotlib.pyplot as plt
import math
from collections import deque, namedtuple
import sys
from collections import defaultdict
from heapq import *
import matplotlib.animation as animation
from shapely.geometry import Point, Polygon
import time
title='Click point in map to select Initial/Final point.'
arr=[]
root= tk.Tk()
# Print the tree
init=[]
final=[]
resolution=1
radius=0
clearance=0
# we'll use infinity as a default distance to nodes.
inf = float('inf')
Edge = namedtuple('Edge', 'start, end, cost')
def pathAvailability(x,y,arr,pol,maxPx,minPx,maxPy,minPy):
"""
Box
"""
global radius,clearance,resolution
d=radius+clearance
if(((y-((112.5/resolution)+d))<=0) and ((x-((100/resolution)+d))<=0) and ((-y+((67.5/resolution)-d))<=0) and ((-x+((50/resolution)-d))<=0)):
maxBx=100
minBx=50
maxBy=112.5
minBy=67.5
updateMinMax(arr,minBx,minBy,maxBx,maxBy,d)
return 0
# xpolygon=[120,158, 165,188,168,145];
# % ypolygon=[55,51,89,51,14,14];
# % Line 2 with coordinates (125,56) and (150,15)
p2 = Point(x,y)
for i in pol:
coords = i
poly = Polygon(i)
inside2 = p2.within(poly)
if(inside2==True):
break
if(inside2==True):
updateMinMax(arr,minPx,minPy,maxPx,maxPy,d)
return 0
if((((math.pow((x-(140/resolution)),2)/math.pow(((15/resolution)+d),2))+(math.pow((y-(120/resolution)),2)/math.pow(((6/resolution)+d),2))-1)<=0)):
maxEx=140+15
minEx=140-15
maxEy=120+6
minEy=120-6
updateMinMax(arr,minEx,minEy,maxEx,maxEy,d)
return 0
if((((math.pow((x-(190/resolution)),2))+(math.pow((y-(130/resolution)),2))-(math.pow(((15/resolution)+d),2)))<=0)):
maxCx=190+15
minCx=190-15
maxCy=130+15
minCy=130-15
updateMinMax(arr,minCx,minCy,maxCx,maxCy,d)
return 0
else:
return 1
xdata=[]
ydata=[]
t = np.linspace(0, 2*np.pi, 100)
r = 15
n=190 #x-position of the center
m=130 #radius on the y-axis
u=140 #x-position of the center
v=120 #y-position of the center
a=15 #radius on the x-axis
b=6 #radius on the y-axis
p=n+r*np.cos(t)
q=m+r*np.sin(t)
r=u+a*np.cos(t)
s=v+b*np.sin(t)
x = [50, 100, 100, 50]
y = [112.5, 112.5, 67.5, 67.5]
px=[125,163,170,193,173,150]
py=[56,52,90,52,15,15]
fig, ax = plt.subplots()
ax.grid(color=(0,0,0), linestyle='-', linewidth=1)
test=[]
xs=[]
ys=[]
uboxx=[]
uboxy=[]
for i in range(4):
uboxx.append(x[i]+radius*np.cos(t))
uboxy.append(y[i]+radius*np.sin(t) )
upolx=[]
upoly=[]
for i in range(6):
upolx.append(px[i]+radius*np.cos(t))
upoly.append(py[i]+radius*np.sin(t) )
ucirx=[]
uciry=[]
for i in range(len(r)):
ucirx.append(p[i]+radius*np.cos(t))
uciry.append(q[i]+radius*np.sin(t))
uelpx=[]
uelpy=[]
for i in range(len(r)):
uelpx.append(r[i]+radius*np.cos(t))
uelpy.append(s[i]+radius*np.sin(t))
listPnts=animate([[uboxx, uboxy,'b'],[x, y,'r'],[upolx, upoly,'b'], [px, py,'r'],[ucirx, uciry,'b'],[p,q,'r'],[uelpx, uelpy,'b'],[r,s,'r']])
r = 15/resolution
n=190/resolution #x-position of the center
m=130/resolution #radius on the y-axis
u=140/resolution #x-position of the center
v=120/resolution #y-position of the center
a=15/resolution #radius on the x-axis
b=6/resolution #radius on the y-axis
#plt.gca().set_aspect('equal')
p=n+r*np.cos(t)
q=m+r*np.sin(t)
r=u+a*np.cos(t)
s=v+b*np.sin(t)
x = [50/resolution, 100/resolution, 100/resolution, 50/resolution]
y = [112.5/resolution, 112.5/resolution, 67.5/resolution, 67.5/resolution]
px=[125/resolution,163/resolution,170/resolution,193/resolution,173/resolution,150/resolution]
py=[56/resolution,52/resolution,90/resolution,52/resolution,15/resolution,15/resolution]
uboxx=[]
uboxy=[]
for i in range(4):
uboxx.append(x[i]+radius*np.cos(t))
uboxy.append(y[i]+radius*np.sin(t) )
upolx=[]
upoly=[]
in_x=[]
in_y=[]
for i in range(6):
temp_x=px[i]+radius*np.cos(t)
temp_y=py[i]+radius*np.sin(t)
for j in temp_x:
in_x.append(j)
for k in temp_y:
in_y.append(j)
upolx.append(temp_x)
upoly.append(temp_y)
ucirx=[]
uciry=[]
for i in range(len(r)):
ucirx.append(p[i]+radius*np.cos(t))
uciry.append(q[i]+radius*np.sin(t))
uelpx=[]
uelpy=[]
for i in range(len(r)):
uelpx.append(r[i]+radius*np.cos(t))
uelpy.append(s[i]+radius*np.sin(t))
ax.fill(uboxx, uboxy,'b')
ax.fill(x, y,'r')
testing=ax.fill(upolx, upoly,'b')
ax.fill(px, py,'r')
ax.fill(ucirx, uciry,'b')
ax.fill(p,q,'r')
ax.fill(uelpx, uelpy,'b')
ax.fill(r,s,'r')
xs=[]
ys=[]
k=0
pol=[]
for i in testing:
array=i.get_xy()
polygon_vertices=[]
for j in array:
polygon_vertices.append((j[0],j[1]))
pol.append(polygon_vertices)
maxPx=0
minPx=250
maxPy=0
minPy=150
for i in pol:
coords = i
poly = Polygon(i)
for j in i:
if(minPx>j[0]):
minPx=j[0]
if(maxPx<j[0]):
maxPx=j[0]
if(minPy>j[1]):
minPy=j[1]
if(maxPy<j[1]):
maxPy=j[1]
print(minPx,minPy,maxPx,maxPy)
obstacles=[[uboxx, uboxy],[upolx, upoly],[ucirx, uciry],[uelpx, uelpy]]
weightx=[0,1,1,1,0,-1,-1,-1]
weighty=[1,1,0,-1,-1,-1,0,1]
cost=[1,np.sqrt(2),1,np.sqrt(2),1,np.sqrt(2),1,np.sqrt(2)]
graph=[]
tempx=init[0]
tempy=init[1]
pathx=[]
pathy=[]
paths_to_goal=[]
plt.tick_params(axis='both', which='major', labelsize=9)
print("Processing.....")
if(init and final):
nodes,node=astar(graph,str(init[0])+' '+str(init[1]),
str(final[0])+' '+str(final[1]),paths_to_goal,tempx,tempy,weightx,weighty,cost,final,pol)
if(node==0):
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= nodes)
label.pack()
test.mainloop()
else:
listPnts=[[uboxx, uboxy,'b'],[x, y,'r'],[upolx, upoly,'b'], [px, py,'r'],[ucirx, uciry,'b'],[p,q,'r'],[uelpx, uelpy,'b'],[r,s,'r']]
test=tk.Tk()
fig = plt.Figure(figsize=(5,4), dpi=100)
ax = fig.add_subplot(111)
line, = ax.plot([], [], 'y.',lw=0.3, alpha=0.2)
ax.grid()
scatter = FigureCanvasTkAgg(fig, test)
scatter.get_tk_widget().pack(side=tk.LEFT, fill=tk.BOTH)
for i in (listPnts):
ax.fill(i[0],i[1], color = i[2])
ax.legend()
ax.grid(color=(0,0,0), linestyle='-', linewidth=1)
ax.set_title(title);
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ani = animation.FuncAnimation(fig, animated, nodes, fargs=(nodes, node,test), interval=10,repeat=False, blit=False)
test.mainloop()
else:
test=tk.Tk()
test.geometry('400x300')
label = Label(test, text= "Please check validity if Initial/Goal Coordinates, Resolution, Radius or Clearance.")
label.pack()
test.mainloop() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
2758,
220,
352,
1160,
25,
3365,
25,
2623,
13130,
198,
198,
31,
9800,
25,
27065,
3099,
198,
37811,
198,
198,
2,
532,
9,
12,
19617,
25,
3... | 1.899443 | 3,948 |
# Copyright 2016 The Cebes Authors. All Rights Reserved.
#
# Licensed under the Apache License, version 2.0 (the "License").
# You may not use this work except in compliance with the License,
# which is available at www.apache.org/licenses/LICENSE-2.0
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied, as more fully set forth in the License.
#
# See the NOTICE file distributed with this work for information regarding copyright ownership.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import pandas as pd
import six
from pycebes.core import functions
from pycebes.core.column import Column
from pycebes.core.dataframe import Dataframe
from pycebes.core.exceptions import ServerException
from pycebes.core.sample import DataSample
from pycebes.core.schema import Schema, SchemaField, StorageTypes, VariableTypes
from pycebes.internal.responses import TaggedDataframeResponse
from tests import test_base
class TestDataframe(test_base.TestBase):
"""
SQL APIs
"""
"""
Data exploration
"""
if __name__ == '__main__':
unittest.main()
| [
2,
15069,
1584,
383,
327,
1765,
274,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
2196,
362,
13,
15,
357,
1169,
366,
34156,
11074,
198,
2,
921,
743,
407,
779,
428,
670,
2845,
287,
11846,
351... | 3.523677 | 359 |
# -*- coding: utf-8 -*-
# Copyright (c) 2004-2014 Alterra, Wageningen-UR
# Allard de Wit (allard.dewit@wur.nl), April 2014
"""Tools for reading weather and parameter files.
For reading files in the CABO formats used by crop simulation models in FORTRAN and FST:
- CABOWeatherDataProvider reads CABOWE weather files for use in PCSE
- CABOFileReader reads CABO parameter files.
For reading the new PCSE format use:
- PCSEFileReader reads parameters files in the PCSE format
"""
from .cabo_reader import CABOFileReader
from .cabo_weather import CABOWeatherDataProvider
from .pcsefilereader import PCSEFileReader
from .xlsweatherdataprovider import ExcelWeatherDataProvider
from .yaml_agmt_loader import YAMLAgroManagementReader
from .csvweatherdataprovider import CSVWeatherDataProvider
from .yaml_cropdataprovider import YAMLCropDataProvider | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
5472,
12,
4967,
32770,
430,
11,
21309,
3101,
268,
12,
4261,
198,
2,
1439,
446,
390,
40648,
357,
439,
446,
13,
67,
413,
270,
31,
86,
333,
13,
... | 3.335968 | 253 |
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the AWSAccountAlias object."""
from unittest.mock import patch
from masu.database.account_alias_accessor import AccountAliasAccessor
from masu.external.accounts.labels.aws.aws_account_alias import AWSAccountAlias
from masu.test import MasuTestCase
class AWSAccountAliasTest(MasuTestCase):
"""Test Cases for the AWSAccountAlias object."""
def setUp(self):
"""Set up test case."""
super().setUp()
self.account_id = "111111111111"
def test_initializer(self):
"""Test AWSAccountAlias initializer."""
arn = "roleArn"
schema = "acct10001"
accessor = AWSAccountAlias(arn, schema)
self.assertEqual(accessor._role_arn, arn)
self.assertEqual(accessor._schema, schema)
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_names_by_organization", return_value=[])
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_alias_from_role_arn")
def test_update_account_alias_no_alias(self, mock_get_alias, mock_get_account_names):
"""Test updating alias when none is set."""
mock_get_alias.return_value = (self.account_id, None)
role_arn = f"arn:aws:iam::{self.account_id}:role/CostManagement"
accessor = AWSAccountAlias(role_arn, "acct10001")
accessor.update_account_alias()
db_access = AccountAliasAccessor(self.account_id, "acct10001")
self.assertEqual(db_access._obj.account_id, self.account_id)
self.assertIsNone(db_access._obj.account_alias)
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_names_by_organization", return_value=[])
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_alias_from_role_arn")
def test_update_account_alias_with_alias(self, mock_get_alias, mock_get_account_names):
"""Test updating alias."""
alias = "hccm-alias"
mock_get_alias.return_value = (self.account_id, alias)
role_arn = f"arn:aws:iam::{self.account_id}:role/CostManagement"
accessor = AWSAccountAlias(role_arn, "acct10001")
accessor.update_account_alias()
db_access = AccountAliasAccessor(self.account_id, "acct10001")
self.assertEqual(db_access._obj.account_id, self.account_id)
self.assertEqual(db_access._obj.account_alias, alias)
mock_get_alias.return_value = (self.account_id, None)
accessor.update_account_alias()
db_access = AccountAliasAccessor(self.account_id, "acct10001")
self.assertIsNone(db_access._obj.account_alias)
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_names_by_organization")
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_alias_from_role_arn")
def test_update_account_via_orgs(self, mock_get_alias, mock_get_account_names):
"""Test update alias with org api response."""
alias = "hccm-alias"
mock_get_alias.return_value = (self.account_id, alias)
member_account_id = "1234598760"
member_account_name = "hccm-member"
account_names = [
{"id": self.account_id, "name": alias},
{"id": member_account_id, "name": member_account_name},
]
mock_get_account_names.return_value = account_names
role_arn = f"arn:aws:iam::{self.account_id}:role/CostManagement"
accessor = AWSAccountAlias(role_arn, "acct10001")
accessor.update_account_alias()
db_access = AccountAliasAccessor(self.account_id, "acct10001")
self.assertEqual(db_access._obj.account_id, self.account_id)
self.assertEqual(db_access._obj.account_alias, alias)
member_db_access = AccountAliasAccessor(member_account_id, "acct10001")
self.assertEqual(member_db_access._obj.account_id, member_account_id)
self.assertEqual(member_db_access._obj.account_alias, member_account_name)
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_names_by_organization")
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_alias_from_role_arn")
def test_update_account_via_orgs_partial(self, mock_get_alias, mock_get_account_names):
"""Test update alias with org api with partial response."""
alias = "hccm-alias"
mock_get_alias.return_value = (self.account_id, alias)
member_account_id = "1234596750"
account_names = [{"id": self.account_id, "name": alias}, {"id": member_account_id}]
mock_get_account_names.return_value = account_names
role_arn = f"arn:aws:iam::{self.account_id}:role/CostManagement"
accessor = AWSAccountAlias(role_arn, "acct10001")
accessor.update_account_alias()
db_access = AccountAliasAccessor(self.account_id, "acct10001")
self.assertEqual(db_access._obj.account_id, self.account_id)
self.assertEqual(db_access._obj.account_alias, alias)
member_db_access = AccountAliasAccessor(member_account_id, "acct10001")
self.assertEqual(member_db_access._obj.account_id, member_account_id)
self.assertEqual(member_db_access._obj.account_alias, member_account_id)
| [
2,
198,
2,
15069,
33448,
2297,
10983,
3457,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
37811,
14402,
262,
14356,
4090,
535,
608,
40489,
2134,
526,
15931,
198,
6738,
555,
715,
395,
13... | 2.450606 | 2,146 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .... import operands
from .core import TensorRandomOperandMixin, handle_array
def f(random_state, dfnum, dfden, size=None, chunk_size=None, gpu=None, dtype=None):
"""
Draw samples from an F distribution.
Samples are drawn from an F distribution with specified parameters,
`dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
freedom in denominator), where both parameters should be greater than
zero.
The random variate of the F distribution (also known as the
Fisher distribution) is a continuous probability distribution
that arises in ANOVA tests, and is the ratio of two chi-square
variates.
Parameters
----------
dfnum : float or array_like of floats
Degrees of freedom in numerator, should be > 0.
dfden : float or array_like of float
Degrees of freedom in denominator, should be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``dfnum`` and ``dfden`` are both scalars.
Otherwise, ``np.broadcast(dfnum, dfden).size`` samples are drawn.
chunk_size : int or tuple of int or tuple of ints, optional
Desired chunk size on each dimension
gpu : bool, optional
Allocate the tensor on GPU if True, False as default
dtype : data-type, optional
Data-type of the returned tensor.
Returns
-------
out : Tensor or scalar
Drawn samples from the parameterized Fisher distribution.
See Also
--------
scipy.stats.f : probability density function, distribution or
cumulative density function, etc.
Notes
-----
The F statistic is used to compare in-group variances to between-group
variances. Calculating the distribution depends on the sampling, and
so it is a function of the respective degrees of freedom in the
problem. The variable `dfnum` is the number of samples minus one, the
between-groups degrees of freedom, while `dfden` is the within-groups
degrees of freedom, the sum of the number of samples in each group
minus the number of groups.
References
----------
.. [1] Glantz, Stanton A. "Primer of Biostatistics.", McGraw-Hill,
Fifth Edition, 2002.
.. [2] Wikipedia, "F-distribution",
http://en.wikipedia.org/wiki/F-distribution
Examples
--------
An example from Glantz[1], pp 47-40:
Two groups, children of diabetics (25 people) and children from people
without diabetes (25 controls). Fasting blood glucose was measured,
case group had a mean value of 86.1, controls had a mean value of
82.2. Standard deviations were 2.09 and 2.49 respectively. Are these
data consistent with the null hypothesis that the parents diabetic
status does not affect their children's blood glucose levels?
Calculating the F statistic from the data gives a value of 36.01.
Draw samples from the distribution:
>>> import mars.tensor as mt
>>> dfnum = 1. # between group degrees of freedom
>>> dfden = 48. # within groups degrees of freedom
>>> s = mt.random.f(dfnum, dfden, 1000).execute()
The lower bound for the top 1% of the samples is :
>>> sorted(s)[-10]
7.61988120985
So there is about a 1% chance that the F statistic will exceed 7.62,
the measured value is 36, so the null hypothesis is rejected at the 1%
level.
"""
if dtype is None:
dtype = np.random.RandomState().f(
handle_array(dfnum), handle_array(dfden), size=(0,)).dtype
size = random_state._handle_size(size)
op = TensorF(state=random_state._state, size=size, gpu=gpu, dtype=dtype)
return op(dfnum, dfden, chunk_size=chunk_size)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
7358,
12,
7908,
41992,
4912,
31703,
12052,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
36... | 3.089102 | 1,459 |
from . import envs
from gym.envs.registration import register
__version__ = "0.0.1"
register(
id='snake-v0',
entry_point='gym_snake.envs:SnakeEnv'
)
| [
6738,
764,
1330,
551,
14259,
198,
6738,
11550,
13,
268,
14259,
13,
2301,
33397,
1330,
7881,
198,
198,
834,
9641,
834,
796,
366,
15,
13,
15,
13,
16,
1,
198,
198,
30238,
7,
198,
220,
220,
220,
4686,
11639,
16184,
539,
12,
85,
15,
... | 2.304348 | 69 |
import datetime
import os
import plistlib
import subprocess
import time
from distutils.version import LooseVersion
factoid = 'gatekeeper_date'
def fact():
'''Returns the modification date of the gatekeeper package'''
result = 'None'
try:
gkpkgs = subprocess.check_output(['/usr/sbin/pkgutil',
'--pkgs=.*Gatekeeper.*'])
dates = []
for pkgid in gkpkgs.splitlines():
pkginfo_plist = subprocess.check_output(['/usr/sbin/pkgutil',
'--pkg-info-plist', pkgid])
pkginfo = plistlib.readPlistFromString(pkginfo_plist)
dates.append(pkginfo['install-time'])
result = time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(max(dates)))
except (OSError, IOError, subprocess.CalledProcessError):
pass
return {factoid: result}
if __name__ == '__main__':
print '<result>%s</result>' % fact()[factoid]
| [
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
458,
396,
8019,
198,
11748,
850,
14681,
198,
11748,
640,
198,
6738,
1233,
26791,
13,
9641,
1330,
6706,
577,
14815,
198,
198,
22584,
1868,
796,
705,
10494,
13884,
62,
4475,
6,
198,
198,
... | 2.065126 | 476 |
# -*- coding: utf-8 -*-
"""ThreatConnect Playbook App"""
from langdetect import detect_langs
# Import default Playbook Class (Required)
from playbook_app import PlaybookApp
class App(PlaybookApp):
"""Playbook App"""
def run(self):
"""Run the App main logic.
This method should contain the core logic of the App.
"""
text = self.tcex.playbook.read(self.args.text)
detected_language_code = detect_langs(text)[0].lang
detected_language_probability = detect_langs(text)[0].prob
self.tcex.playbook.create_output('detectedLanguageCode', detected_language_code, 'String')
self.tcex.playbook.create_output('detectedLanguageProbability', detected_language_probability, 'String')
self.exit_message = 'Detected the language as {} (with a probability of {})'.format(detected_language_code, detected_language_probability)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
817,
630,
13313,
3811,
2070,
2034,
37811,
198,
198,
6738,
42392,
15255,
478,
1330,
4886,
62,
17204,
82,
198,
198,
2,
17267,
4277,
3811,
2070,
5016,
357,
37374,
8,... | 2.785714 | 322 |
from .consola import Consola
from .uiscreen import UIScreen
import datetime
| [
6738,
764,
5936,
5708,
1330,
3515,
5708,
198,
6738,
764,
84,
2304,
1361,
1330,
471,
1797,
32060,
198,
11748,
4818,
8079,
628
] | 3.5 | 22 |
from setuptools import setup, find_packages
setup(
name="discorddump",
packages=find_packages(),
zip_safe=False,
) | [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
15410,
585,
39455,
1600,
198,
220,
220,
220,
10392,
28,
19796,
62,
43789,
22784,
198,
220,
220,
220,
19974,
62,
21230,
28,
... | 2.76087 | 46 |
import typing # noqa: F401
import datetime as _datetime # noqa: F401
from kubernetes import client # noqa: F401
from kuber import kube_api as _kube_api # noqa: F401
from kuber import definitions as _kuber_definitions # noqa: F401
from kuber import _types # noqa: F401
from kuber.v1_18.meta_v1 import ListMeta # noqa: F401
from kuber.v1_18.meta_v1 import ObjectMeta # noqa: F401
from kuber.v1_18.meta_v1 import Status # noqa: F401
from kuber.v1_18.meta_v1 import StatusDetails # noqa: F401
class APIService(_kuber_definitions.Resource):
"""
APIService represents a server for a particular
GroupVersion. Name must be "version.group".
"""
def __init__(
self,
metadata: "ObjectMeta" = None,
spec: "APIServiceSpec" = None,
status: "APIServiceStatus" = None,
):
"""Create APIService instance."""
super(APIService, self).__init__(
api_version="apiregistration.k8s.io/v1", kind="APIService"
)
self._properties = {
"metadata": metadata if metadata is not None else ObjectMeta(),
"spec": spec if spec is not None else APIServiceSpec(),
"status": status if status is not None else APIServiceStatus(),
}
self._types = {
"apiVersion": (str, None),
"kind": (str, None),
"metadata": (ObjectMeta, None),
"spec": (APIServiceSpec, None),
"status": (APIServiceStatus, None),
}
@property
def metadata(self) -> "ObjectMeta":
""" """
return typing.cast(
"ObjectMeta",
self._properties.get("metadata"),
)
@metadata.setter
def metadata(self, value: typing.Union["ObjectMeta", dict]):
""" """
if isinstance(value, dict):
value = typing.cast(
ObjectMeta,
ObjectMeta().from_dict(value),
)
self._properties["metadata"] = value
@property
def spec(self) -> "APIServiceSpec":
"""
Spec contains information for locating and communicating
with a server
"""
return typing.cast(
"APIServiceSpec",
self._properties.get("spec"),
)
@spec.setter
def spec(self, value: typing.Union["APIServiceSpec", dict]):
"""
Spec contains information for locating and communicating
with a server
"""
if isinstance(value, dict):
value = typing.cast(
APIServiceSpec,
APIServiceSpec().from_dict(value),
)
self._properties["spec"] = value
@property
def status(self) -> "APIServiceStatus":
"""
Status contains derived information about an API server
"""
return typing.cast(
"APIServiceStatus",
self._properties.get("status"),
)
@status.setter
def status(self, value: typing.Union["APIServiceStatus", dict]):
"""
Status contains derived information about an API server
"""
if isinstance(value, dict):
value = typing.cast(
APIServiceStatus,
APIServiceStatus().from_dict(value),
)
self._properties["status"] = value
def create_resource(self, namespace: "str" = None) -> "APIServiceStatus":
"""
Creates the APIService in the currently
configured Kubernetes cluster and returns the status information
returned by the Kubernetes API after the create is complete.
"""
names = ["create_namespaced_api_service", "create_api_service"]
response = _kube_api.execute(
action="create",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict()},
)
output = APIServiceStatus()
if response is not None:
output.from_dict(_kube_api.to_kuber_dict(response.status))
return output
def replace_resource(self, namespace: "str" = None) -> "APIServiceStatus":
"""
Replaces the APIService in the currently
configured Kubernetes cluster and returns the status information
returned by the Kubernetes API after the replace is complete.
"""
names = ["replace_namespaced_api_service", "replace_api_service"]
response = _kube_api.execute(
action="replace",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict(), "name": self.metadata.name},
)
output = APIServiceStatus()
if response is not None:
output.from_dict(_kube_api.to_kuber_dict(response.status))
return output
def patch_resource(self, namespace: "str" = None) -> "APIServiceStatus":
"""
Patches the APIService in the currently
configured Kubernetes cluster and returns the status information
returned by the Kubernetes API after the replace is complete.
"""
names = ["patch_namespaced_api_service", "patch_api_service"]
response = _kube_api.execute(
action="patch",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict(), "name": self.metadata.name},
)
output = APIServiceStatus()
if response is not None:
output.from_dict(_kube_api.to_kuber_dict(response.status))
return output
def get_resource_status(self, namespace: "str" = None) -> "APIServiceStatus":
"""
Returns status information about the given resource within the cluster.
"""
names = [
"read_namespaced_api_service",
"read_api_service",
]
response = _kube_api.execute(
action="read",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"name": self.metadata.name},
)
output = APIServiceStatus()
if response is not None:
output.from_dict(_kube_api.to_kuber_dict(response.status))
return output
def read_resource(self, namespace: str = None):
"""
Reads the APIService from the currently configured
Kubernetes cluster and returns the low-level definition object.
"""
names = [
"read_namespaced_api_service",
"read_api_service",
]
return _kube_api.execute(
action="read",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"name": self.metadata.name},
)
def delete_resource(
self,
namespace: str = None,
propagation_policy: str = "Foreground",
grace_period_seconds: int = 10,
):
"""
Deletes the APIService from the currently configured
Kubernetes cluster.
"""
names = [
"delete_namespaced_api_service",
"delete_api_service",
]
body = client.V1DeleteOptions(
propagation_policy=propagation_policy,
grace_period_seconds=grace_period_seconds,
)
_kube_api.execute(
action="delete",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"name": self.metadata.name, "body": body},
)
@staticmethod
def get_resource_api(
api_client: client.ApiClient = None, **kwargs
) -> "client.ApiregistrationV1Api":
"""
Returns an instance of the kubernetes API client associated with
this object.
"""
if api_client:
kwargs["apl_client"] = api_client
return client.ApiregistrationV1Api(**kwargs)
class APIServiceCondition(_kuber_definitions.Definition):
"""
APIServiceCondition describes the state of an APIService at
a particular point
"""
def __init__(
self,
last_transition_time: str = None,
message: str = None,
reason: str = None,
status: str = None,
type_: str = None,
):
"""Create APIServiceCondition instance."""
super(APIServiceCondition, self).__init__(
api_version="apiregistration.k8s.io/v1", kind="APIServiceCondition"
)
self._properties = {
"lastTransitionTime": last_transition_time
if last_transition_time is not None
else None,
"message": message if message is not None else "",
"reason": reason if reason is not None else "",
"status": status if status is not None else "",
"type": type_ if type_ is not None else "",
}
self._types = {
"lastTransitionTime": (str, None),
"message": (str, None),
"reason": (str, None),
"status": (str, None),
"type": (str, None),
}
@property
def last_transition_time(self) -> str:
"""
Last time the condition transitioned from one status to
another.
"""
return typing.cast(
str,
self._properties.get("lastTransitionTime"),
)
@last_transition_time.setter
def last_transition_time(
self, value: typing.Union[str, _datetime.datetime, _datetime.date]
):
"""
Last time the condition transitioned from one status to
another.
"""
if isinstance(value, _datetime.datetime):
value = value.strftime("%Y-%m-%dT%H:%M:%SZ")
elif isinstance(value, _datetime.date):
value = value.strftime("%Y-%m-%dT00:00:00Z")
self._properties["lastTransitionTime"] = value
@property
def message(self) -> str:
"""
Human-readable message indicating details about last
transition.
"""
return typing.cast(
str,
self._properties.get("message"),
)
@message.setter
def message(self, value: str):
"""
Human-readable message indicating details about last
transition.
"""
self._properties["message"] = value
@property
def reason(self) -> str:
"""
Unique, one-word, CamelCase reason for the condition's last
transition.
"""
return typing.cast(
str,
self._properties.get("reason"),
)
@reason.setter
def reason(self, value: str):
"""
Unique, one-word, CamelCase reason for the condition's last
transition.
"""
self._properties["reason"] = value
@property
def status(self) -> str:
"""
Status is the status of the condition. Can be True, False,
Unknown.
"""
return typing.cast(
str,
self._properties.get("status"),
)
@status.setter
def status(self, value: str):
"""
Status is the status of the condition. Can be True, False,
Unknown.
"""
self._properties["status"] = value
@property
def type_(self) -> str:
"""
Type is the type of the condition.
"""
return typing.cast(
str,
self._properties.get("type"),
)
@type_.setter
def type_(self, value: str):
"""
Type is the type of the condition.
"""
self._properties["type"] = value
class APIServiceList(_kuber_definitions.Collection):
"""
APIServiceList is a list of APIService objects.
"""
def __init__(
self,
items: typing.List["APIService"] = None,
metadata: "ListMeta" = None,
):
"""Create APIServiceList instance."""
super(APIServiceList, self).__init__(
api_version="apiregistration.k8s.io/v1", kind="APIServiceList"
)
self._properties = {
"items": items if items is not None else [],
"metadata": metadata if metadata is not None else ListMeta(),
}
self._types = {
"apiVersion": (str, None),
"items": (list, APIService),
"kind": (str, None),
"metadata": (ListMeta, None),
}
@property
def items(self) -> typing.List["APIService"]:
""" """
return typing.cast(
typing.List["APIService"],
self._properties.get("items"),
)
@items.setter
def items(self, value: typing.Union[typing.List["APIService"], typing.List[dict]]):
""" """
cleaned: typing.List[APIService] = []
for item in value:
if isinstance(item, dict):
item = typing.cast(
APIService,
APIService().from_dict(item),
)
cleaned.append(typing.cast(APIService, item))
self._properties["items"] = cleaned
@property
def metadata(self) -> "ListMeta":
""" """
return typing.cast(
"ListMeta",
self._properties.get("metadata"),
)
@metadata.setter
def metadata(self, value: typing.Union["ListMeta", dict]):
""" """
if isinstance(value, dict):
value = typing.cast(
ListMeta,
ListMeta().from_dict(value),
)
self._properties["metadata"] = value
@staticmethod
def get_resource_api(
api_client: client.ApiClient = None, **kwargs
) -> "client.ApiregistrationV1Api":
"""
Returns an instance of the kubernetes API client associated with
this object.
"""
if api_client:
kwargs["apl_client"] = api_client
return client.ApiregistrationV1Api(**kwargs)
class APIServiceSpec(_kuber_definitions.Definition):
"""
APIServiceSpec contains information for locating and
communicating with a server. Only https is supported, though
you are able to disable certificate verification.
"""
def __init__(
self,
ca_bundle: str = None,
group: str = None,
group_priority_minimum: int = None,
insecure_skip_tlsverify: bool = None,
service: "ServiceReference" = None,
version: str = None,
version_priority: int = None,
):
"""Create APIServiceSpec instance."""
super(APIServiceSpec, self).__init__(
api_version="apiregistration.k8s.io/v1", kind="APIServiceSpec"
)
self._properties = {
"caBundle": ca_bundle if ca_bundle is not None else "",
"group": group if group is not None else "",
"groupPriorityMinimum": group_priority_minimum
if group_priority_minimum is not None
else None,
"insecureSkipTLSVerify": insecure_skip_tlsverify
if insecure_skip_tlsverify is not None
else None,
"service": service if service is not None else ServiceReference(),
"version": version if version is not None else "",
"versionPriority": version_priority
if version_priority is not None
else None,
}
self._types = {
"caBundle": (str, None),
"group": (str, None),
"groupPriorityMinimum": (int, None),
"insecureSkipTLSVerify": (bool, None),
"service": (ServiceReference, None),
"version": (str, None),
"versionPriority": (int, None),
}
@property
def ca_bundle(self) -> str:
"""
CABundle is a PEM encoded CA bundle which will be used to
validate an API server's serving certificate. If
unspecified, system trust roots on the apiserver are used.
"""
return typing.cast(
str,
self._properties.get("caBundle"),
)
@ca_bundle.setter
def ca_bundle(self, value: str):
"""
CABundle is a PEM encoded CA bundle which will be used to
validate an API server's serving certificate. If
unspecified, system trust roots on the apiserver are used.
"""
self._properties["caBundle"] = value
@property
def group(self) -> str:
"""
Group is the API group name this server hosts
"""
return typing.cast(
str,
self._properties.get("group"),
)
@group.setter
def group(self, value: str):
"""
Group is the API group name this server hosts
"""
self._properties["group"] = value
@property
def group_priority_minimum(self) -> int:
"""
GroupPriorityMininum is the priority this group should have
at least. Higher priority means that the group is preferred
by clients over lower priority ones. Note that other
versions of this group might specify even higher
GroupPriorityMininum values such that the whole group gets a
higher priority. The primary sort is based on
GroupPriorityMinimum, ordered highest number to lowest (20
before 10). The secondary sort is based on the alphabetical
comparison of the name of the object. (v1.bar before
v1.foo) We'd recommend something like: *.k8s.io (except
extensions) at 18000 and PaaSes (OpenShift, Deis) are
recommended to be in the 2000s
"""
return typing.cast(
int,
self._properties.get("groupPriorityMinimum"),
)
@group_priority_minimum.setter
def group_priority_minimum(self, value: int):
"""
GroupPriorityMininum is the priority this group should have
at least. Higher priority means that the group is preferred
by clients over lower priority ones. Note that other
versions of this group might specify even higher
GroupPriorityMininum values such that the whole group gets a
higher priority. The primary sort is based on
GroupPriorityMinimum, ordered highest number to lowest (20
before 10). The secondary sort is based on the alphabetical
comparison of the name of the object. (v1.bar before
v1.foo) We'd recommend something like: *.k8s.io (except
extensions) at 18000 and PaaSes (OpenShift, Deis) are
recommended to be in the 2000s
"""
self._properties["groupPriorityMinimum"] = value
@property
def insecure_skip_tlsverify(self) -> bool:
"""
InsecureSkipTLSVerify disables TLS certificate verification
when communicating with this server. This is strongly
discouraged. You should use the CABundle instead.
"""
return typing.cast(
bool,
self._properties.get("insecureSkipTLSVerify"),
)
@insecure_skip_tlsverify.setter
def insecure_skip_tlsverify(self, value: bool):
"""
InsecureSkipTLSVerify disables TLS certificate verification
when communicating with this server. This is strongly
discouraged. You should use the CABundle instead.
"""
self._properties["insecureSkipTLSVerify"] = value
@property
def service(self) -> "ServiceReference":
"""
Service is a reference to the service for this API server.
It must communicate on port 443 If the Service is nil, that
means the handling for the API groupversion is handled
locally on this server. The call will simply delegate to the
normal handler chain to be fulfilled.
"""
return typing.cast(
"ServiceReference",
self._properties.get("service"),
)
@service.setter
def service(self, value: typing.Union["ServiceReference", dict]):
"""
Service is a reference to the service for this API server.
It must communicate on port 443 If the Service is nil, that
means the handling for the API groupversion is handled
locally on this server. The call will simply delegate to the
normal handler chain to be fulfilled.
"""
if isinstance(value, dict):
value = typing.cast(
ServiceReference,
ServiceReference().from_dict(value),
)
self._properties["service"] = value
@property
def version(self) -> str:
"""
Version is the API version this server hosts. For example,
"v1"
"""
return typing.cast(
str,
self._properties.get("version"),
)
@version.setter
def version(self, value: str):
"""
Version is the API version this server hosts. For example,
"v1"
"""
self._properties["version"] = value
@property
def version_priority(self) -> int:
"""
VersionPriority controls the ordering of this API version
inside of its group. Must be greater than zero. The primary
sort is based on VersionPriority, ordered highest to lowest
(20 before 10). Since it's inside of a group, the number can
be small, probably in the 10s. In case of equal version
priorities, the version string will be used to compute the
order inside a group. If the version string is "kube-like",
it will sort above non "kube-like" version strings, which
are ordered lexicographically. "Kube-like" versions start
with a "v", then are followed by a number (the major
version), then optionally the string "alpha" or "beta" and
another number (the minor version). These are sorted first
by GA > beta > alpha (where GA is a version with no suffix
such as beta or alpha), and then by comparing major version,
then minor version. An example sorted list of versions: v10,
v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2,
foo1, foo10.
"""
return typing.cast(
int,
self._properties.get("versionPriority"),
)
@version_priority.setter
def version_priority(self, value: int):
"""
VersionPriority controls the ordering of this API version
inside of its group. Must be greater than zero. The primary
sort is based on VersionPriority, ordered highest to lowest
(20 before 10). Since it's inside of a group, the number can
be small, probably in the 10s. In case of equal version
priorities, the version string will be used to compute the
order inside a group. If the version string is "kube-like",
it will sort above non "kube-like" version strings, which
are ordered lexicographically. "Kube-like" versions start
with a "v", then are followed by a number (the major
version), then optionally the string "alpha" or "beta" and
another number (the minor version). These are sorted first
by GA > beta > alpha (where GA is a version with no suffix
such as beta or alpha), and then by comparing major version,
then minor version. An example sorted list of versions: v10,
v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2,
foo1, foo10.
"""
self._properties["versionPriority"] = value
class APIServiceStatus(_kuber_definitions.Definition):
"""
APIServiceStatus contains derived information about an API
server
"""
def __init__(
self,
conditions: typing.List["APIServiceCondition"] = None,
):
"""Create APIServiceStatus instance."""
super(APIServiceStatus, self).__init__(
api_version="apiregistration.k8s.io/v1", kind="APIServiceStatus"
)
self._properties = {
"conditions": conditions if conditions is not None else [],
}
self._types = {
"conditions": (list, APIServiceCondition),
}
@property
def conditions(self) -> typing.List["APIServiceCondition"]:
"""
Current service state of apiService.
"""
return typing.cast(
typing.List["APIServiceCondition"],
self._properties.get("conditions"),
)
@conditions.setter
def conditions(
self, value: typing.Union[typing.List["APIServiceCondition"], typing.List[dict]]
):
"""
Current service state of apiService.
"""
cleaned: typing.List[APIServiceCondition] = []
for item in value:
if isinstance(item, dict):
item = typing.cast(
APIServiceCondition,
APIServiceCondition().from_dict(item),
)
cleaned.append(typing.cast(APIServiceCondition, item))
self._properties["conditions"] = cleaned
class ServiceReference(_kuber_definitions.Definition):
"""
ServiceReference holds a reference to Service.legacy.k8s.io
"""
def __init__(
self,
name: str = None,
namespace: str = None,
port: int = None,
):
"""Create ServiceReference instance."""
super(ServiceReference, self).__init__(
api_version="apiregistration.k8s.io/v1", kind="ServiceReference"
)
self._properties = {
"name": name if name is not None else "",
"namespace": namespace if namespace is not None else "",
"port": port if port is not None else None,
}
self._types = {
"name": (str, None),
"namespace": (str, None),
"port": (int, None),
}
@property
def name(self) -> str:
"""
Name is the name of the service
"""
return typing.cast(
str,
self._properties.get("name"),
)
@name.setter
def name(self, value: str):
"""
Name is the name of the service
"""
self._properties["name"] = value
@property
def namespace(self) -> str:
"""
Namespace is the namespace of the service
"""
return typing.cast(
str,
self._properties.get("namespace"),
)
@namespace.setter
def namespace(self, value: str):
"""
Namespace is the namespace of the service
"""
self._properties["namespace"] = value
@property
def port(self) -> int:
"""
If specified, the port on the service that hosting webhook.
Default to 443 for backward compatibility. `port` should be
a valid port number (1-65535, inclusive).
"""
return typing.cast(
int,
self._properties.get("port"),
)
@port.setter
def port(self, value: int):
"""
If specified, the port on the service that hosting webhook.
Default to 443 for backward compatibility. `port` should be
a valid port number (1-65535, inclusive).
"""
self._properties["port"] = value
| [
11748,
19720,
220,
1303,
645,
20402,
25,
376,
21844,
198,
11748,
4818,
8079,
355,
4808,
19608,
8079,
220,
1303,
645,
20402,
25,
376,
21844,
198,
198,
6738,
479,
18478,
3262,
274,
1330,
5456,
220,
1303,
645,
20402,
25,
376,
21844,
198,
... | 2.280867 | 11,995 |
#!/usr/bin/env python
"""
_GetFilesForParentlessMerge_
MySQL implementation of Subscription.GetFilesForParentlessMerge
"""
from WMCore.Database.DBFormatter import DBFormatter
class GetFilesForParentlessMerge(DBFormatter):
"""
This query needs to return the following for any files that is deemed
mergeable:
WMBS ID (file_id)
Events (file_events)
Size (file_size)
LFN (file_lfn)
First event in file (file_first_event)
Runs in file (file_run)
Lumi sections in file (file_lumi)
PNN
"""
sql = """SELECT wmbs_file_details.id AS file_id,
wmbs_file_details.events AS file_events,
wmbs_file_details.filesize AS file_size,
wmbs_file_details.lfn AS file_lfn,
wmbs_file_details.first_event AS file_first_event,
MIN(wmbs_file_runlumi_map.run) AS file_run,
MIN(wmbs_file_runlumi_map.lumi) AS file_lumi,
wmbs_pnns.pnn,
wmbs_fileset_files.insert_time AS insert_time,
wmbs_workflow.injected AS injected
FROM wmbs_sub_files_available
INNER JOIN wmbs_file_details ON
wmbs_sub_files_available.fileid = wmbs_file_details.id
INNER JOIN wmbs_file_runlumi_map ON
wmbs_file_details.id = wmbs_file_runlumi_map.fileid
INNER JOIN wmbs_file_location ON
wmbs_file_details.id = wmbs_file_location.fileid
INNER JOIN wmbs_pnns ON
wmbs_file_location.pnn = wmbs_pnns.id
INNER JOIN wmbs_subscription ON
wmbs_subscription.id = wmbs_sub_files_available.subscription
INNER JOIN wmbs_fileset_files ON
wmbs_fileset_files.fileset = wmbs_subscription.fileset AND
wmbs_fileset_files.fileid = wmbs_sub_files_available.fileid
INNER JOIN wmbs_workflow ON
wmbs_workflow.id = wmbs_subscription.workflow
WHERE wmbs_sub_files_available.subscription = :p_1
GROUP BY wmbs_file_details.id,
wmbs_file_details.events,
wmbs_file_details.filesize,
wmbs_file_details.lfn,
wmbs_file_details.first_event,
wmbs_pnns.pnn,
wmbs_fileset_files.insert_time,
wmbs_workflow.injected
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
62,
3855,
25876,
1890,
24546,
1203,
13102,
469,
62,
198,
198,
3666,
17861,
7822,
286,
3834,
33584,
13,
3855,
25876,
1890,
24546,
1203,
13102,
469,
198,
37811,
198,
198,
6738,... | 1.829339 | 1,377 |
# Generated by Django 2.2.2 on 2019-11-07 12:49
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
17,
319,
13130,
12,
1157,
12,
2998,
1105,
25,
2920,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import json
import time
from p415_customerializer import to_json
from file_path_collect import output_json_path_dir as json_output
entry = {'title': 'Divve into history, 2009 edition',
'article_link': 'http://diveintomark.org/archives/2009/03/27/dive‐into‐history‐2009‐edition',
'comments_link': None, 'internal_id': b'\xDE\xD5\xB4\xF8', 'tags': ('diveintopython', 'docbook', 'html'),
'published': True, 'published_date': time.strptime('Fri Mar 27 22:20:42 2009')}
with open(json_output + 'entry.json', 'w', encoding='utf-8') as f:
# time 对象现在已经可以被 dump 了,是一个 int 数组
json.dump(entry, f, default=to_json)
"""
{'article_link': 'http://diveintomark.org/archives/2009/03/27/dive‐into‐history‐2009‐edition',
'comments_link': None,
'internal_id': b'\xde\xd5\xb4\xf8',
'published': True,
'published_date': time.struct_time(tm_year=2009, tm_mon=3, tm_mday=27, tm_hour=22, tm_min=20, tm_sec=42, tm_wday=4, tm_yday=86, tm_isdst=-1),
'tags': ('diveintopython', 'docbook', 'html'),
'title': 'Divve into history, 2009 edition'}
"""
| [
11748,
33918,
198,
11748,
640,
198,
198,
6738,
279,
35038,
62,
23144,
48499,
7509,
1330,
284,
62,
17752,
198,
6738,
2393,
62,
6978,
62,
33327,
1330,
5072,
62,
17752,
62,
6978,
62,
15908,
355,
33918,
62,
22915,
198,
198,
13000,
796,
13... | 2.268657 | 469 |
import socket
import threading
import socketserver
import logging
import Authorization
import Decoder as Decoder_File
import Localdatabase
import MongoDatabase_Global
import ServerDataOffer
import ServerDataReceiver
from Decoder import Decoder
from MongoDatabase_Global import MongoTB_Global
from ServerDataReceiver import ThreadedTCPRequestHandler
from ServerDataOffer import ThreadedTCPOfferHandler
import Localdatabase
from Localdatabase import Database
import settings
import os
from time import sleep
import _thread
from datetime import date, timedelta
mongo = MongoTB_Global()
dec = Decoder()
if __name__ == "__main__":
logging.basicConfig(filename='/home/over/deamon.log', level=logging.WARNING, format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %H:%M:%S %p')
settings.init()
HOST = settings.HOST
PORT = settings.PORT
PORTMACTOWEBFILEDOWNLOAD = settings.PORTMACTOWEBFILEDOWNLOAD
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
server_file = ThreadedTCPServer((HOST,PORTMACTOWEBFILEDOWNLOAD), ThreadedTCPOfferHandler)
ip, port = server.server_address
ip_file, port_file = server_file.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
server_file_thread = threading.Thread(target=server_file.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = False
server_thread.start()
server_file_thread.deamon = False
server_file_thread.start()
logging.info("Server loop running in thread: {}".format(server_thread.name))
logging.info("Server loop running in thread: {}".format(server_file_thread.name))
_thread.start_new_thread(parse_loop, ())
| [
11748,
17802,
198,
11748,
4704,
278,
198,
11748,
37037,
18497,
198,
198,
11748,
18931,
198,
198,
11748,
35263,
198,
11748,
34580,
355,
34580,
62,
8979,
198,
11748,
15181,
1940,
265,
5754,
198,
11748,
42591,
38105,
62,
22289,
198,
11748,
9... | 3.092593 | 594 |
import cv2
import os
import urllib.request
import numpy as np
from config import *
from lxml import etree
from random import randint
from telethon import TelegramClient, events, sync
from telethon.tl.functions.photos import UploadProfilePhotoRequest
### Log in Telegram
try:
proxy
except NameError:
proxy = None
print('Logging in Telegram')
client = TelegramClient('waifu_session', api_id, api_hash, proxy=proxy)
client.start()
print('Logged in as', client.get_me().username)
### Get number of pages
data = urllib.request.urlopen(search_url).read()
html = etree.HTML(data)
page_links = html.xpath('//*[@id="paginator"]/div/a')
pages = int(page_links[-2].text)
print('Got', page_links[-2].text, 'pages')
### Download image list
page = randint(1, pages)
print('Selected page', page)
search_url_page = search_url + '&page=' + str(page)
data = urllib.request.urlopen(search_url_page).read()
html = etree.HTML(data)
image_detail_urls = html.xpath("//ul[@id='post-list-posts']/li[count(a/span[2][substring-before(text() , 'x') >= substring-after(text() , 'x')]) > 0]//a[@class='thumb']/@href")
count = len(image_detail_urls)
print('Got', count, 'images')
### Face detection
### Download image
for _ in range(count):
image_detail_url = url_prefix + image_detail_urls[randint(0, count - 1)]
image_detail = urllib.request.urlopen(image_detail_url).read()
html = etree.HTML(image_detail)
image_url = html.xpath("//*[@id='highres']/@href")[0]
print('Image URL:', image_url)
image_data = urllib.request.urlopen(image_url).read()
arr = np.asarray(bytearray(image_data), dtype=np.uint8)
# Detect face
img = cv2.imdecode(arr, -1)
faces = detect(img)
if len(faces) == 0:
print('No face detected')
continue
if len(faces) > 1:
print('Too many faces:', len(faces))
continue
print('Detected', len(faces), 'faces')
face = faces[randint(0, len(faces) - 1)]
(x, y, w, h) = face
crop_img = img[y:y+h, x:x+w]
encoded = cv2.imencode('.png', crop_img)[1]
data_encoded = np.array(encoded)
str_encoded = data_encoded.tostring()
print(len(str_encoded))
### Set profile photo
uploaded = client.upload_file(str_encoded)
print('Uploaded ID:', uploaded.id)
client(UploadProfilePhotoRequest(uploaded))
print('Successfully changed profile photo')
### Log
with open('avatar.log', 'a') as f:
f.write(image_detail_url + '\n')
break
| [
11748,
269,
85,
17,
198,
11748,
28686,
198,
11748,
2956,
297,
571,
13,
25927,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
4566,
1330,
1635,
198,
6738,
300,
19875,
1330,
2123,
631,
198,
6738,
4738,
1330,
43720,
600,
198,
6738,
5735,
... | 2.546584 | 966 |
# -*- coding: utf-8 -*-
import os
from pathlib import Path
import pytest
from brainhacker.utils.download import _url_to_local_path, _fetch_file, mne_data_path
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
12972,
9288,
198,
198,
6738,
3632,
71,
10735,
13,
26791,
13,
15002,
1330,
4808,
6371,
62,
1462,
62,
12001,
... | 2.405405 | 74 |
from . import good, fail, semantic_fail
# Basic comparisons, unitary syntax
good('=0', 0)
good('==0', 0)
fail('=0', 1)
fail('==0', 1)
good('!=0', 1)
fail('!=0', 0)
good('>0', 1)
fail('>0', 0)
fail('>0', -1)
good('>=0', 1)
good('>=0', 0)
fail('>=0', -1)
good('<0', -1)
fail('<0', 0)
fail('<0', +1)
good('<=0', -1)
good('<=0', 0)
fail('<=0', +1)
good('<=1', 1)
good('>=1', 1)
good('=1', 1)
good('<=1', 0)
# wrong types
good('=1', 1)
semantic_fail('=1', [1])
semantic_fail('=0', [0])
semantic_fail('>0', [])
# binary syntax
good('1>0', None)
fail('1>1', None)
good('0<1', None)
fail('1<1', None)
good('1>=0', None)
fail('1>=2', None)
good('0<=1', None)
fail('2<=1', None)
good('1=1', None)
fail('1=0', None)
good('1==1', None)
fail('1==0', None)
good('0!=1', None)
fail('0!=0', None)
good('1+1>=0', None)
fail('0>=1+1', None)
good('1-1=0', None)
fail('1-1=1', None)
good('-1<=1-1', None)
good('3*2>=2*1', None)
| [
6738,
764,
1330,
922,
11,
2038,
11,
37865,
62,
32165,
198,
198,
2,
14392,
17909,
11,
4326,
560,
15582,
198,
11274,
10786,
28,
15,
3256,
657,
8,
198,
11274,
10786,
855,
15,
3256,
657,
8,
198,
32165,
10786,
28,
15,
3256,
352,
8,
198... | 1.93038 | 474 |
import requests
import urllib2
import os, sys
import json
import anyjson
import traceback
from sh import git
from peyotl import convert_nexson_format, \
concatenate_collections, \
tree_is_in_collection
from peyotl.phylesystem.git_workflows import GitWorkflowError, \
validate_and_convert_nexson
from peyotl.collections_store import OWNER_ID_PATTERN, \
COLLECTION_ID_PATTERN
from peyotl.collections_store.validation import validate_collection
from peyotl.amendments import AMENDMENT_ID_PATTERN
from peyotl.amendments.validation import validate_amendment
from peyotl.nexson_syntax import get_empty_nexson, \
extract_supporting_file_messages, \
PhyloSchema, \
read_as_json, \
BY_ID_HONEY_BADGERFISH
from peyotl.external import import_nexson_from_treebase
from github import Github, BadCredentialsException
import api_utils
from gluon.tools import fetch
from urllib import urlencode, quote_plus
from gluon.html import web2pyHTMLParser
import re
from gluon.contrib.markdown.markdown2 import markdown
from gluon.http import HTTP
from ConfigParser import SafeConfigParser
import copy
import bleach
from bleach.sanitizer import Cleaner
from cStringIO import StringIO
_GLOG = api_utils.get_logger(None, 'ot_api.default.global')
try:
from open_tree_tasks import call_http_json
#_GLOG.debug('call_http_json imported')
except:
call_http_json = None
_GLOG.debug('call_http_json was not imported from open_tree_tasks')
_VALIDATING = True
# Cook up some reasonably strong regular expressions to detect bare
# URLs in Markdown and wrap them in hyperlinks. Adapted from
# http://stackoverflow.com/questions/1071191/detect-urls-in-a-string-and-wrap-with-a-href-tag
link_regex = re.compile(r'''
(?x)( # verbose identify URLs within text
(?<![>"]) # don't touch URLs that are already wrapped!
(http|https) # make sure we find a resource type
:// # ...needs to be followed by colon-slash-slash
(\w+[:.]?){2,} # at least two domain groups, e.g. (gnosis.)(cx)
(/?| # could be just the domain name (maybe w/ slash)
[^ \n\r"]+ # or stuff then space, newline, tab, quote
[\w/]) # resource name ends in alphanumeric or slash
(?=([\s\.,>)'"\]]|$)) # assert: followed by white or clause ending OR end of line
) # end of match group
''', re.UNICODE)
# this do-nothing version makes a sensible hyperlink
link_replace = r'\1'
# NOTE the funky constructor required to use this below
# Define a consistent cleaner to sanitize user input. We need a few
# elements that are common in our markdown but missing from the Bleach
# whitelist.
# N.B. HTML comments are stripped by default. Non-allowed tags will appear
# "naked" in output, so we can identify any bad actors.
allowed_curation_comment_tags = [u'p', u'br', u'h1', u'h2', u'h3', u'h4', u'h5', u'h6', u'hr', u'pre', u'code'] # any others?
ot_markdown_tags = list(set( bleach.sanitizer.ALLOWED_TAGS + allowed_curation_comment_tags))
ot_cleaner = Cleaner(tags=ot_markdown_tags)
def trees_in_synth(*valist, **kwargs):
"""Return an "artificial" collection that contains all trees (and
contributors) from all of the tree collections that contribute to
synthesis.
"""
# _LOG = api_utils.get_logger(request, 'ot_api.default.v1')
if kwargs.get('jsoncallback', None) or kwargs.get('callback', None):
# support JSONP requests from another domain
response.view = 'generic.jsonp'
else:
response.view = 'generic.json'
coll_id_list = _get_synth_input_collection_ids()
coll_list = []
cds = api_utils.get_tree_collection_store(request)
for coll_id in coll_id_list:
try:
coll_list.append(cds.return_doc(coll_id, commit_sha=None, return_WIP_map=False)[0])
except:
msg = 'GET of collection {} failed'.format(coll_id)
# _LOG.exception(msg)
raise HTTP(404, json.dumps({"error": 1, "description": msg}))
try:
result = concatenate_collections(coll_list)
except:
# _LOG.exception('concatenation of collections failed')
e = sys.exc_info()[0]
_raise_HTTP_from_msg(e)
return json.dumps(result)
def _get_synth_input_collection_ids():
"""Return a list of all collection ids for the collections that contribute
to synthesis (based on the current propinquity configuration).
"""
# URL could be configurable, but I'm not sure we've ever changed this...
url_of_synth_config = 'https://raw.githubusercontent.com/mtholder/propinquity/master/config.opentree.synth'
try:
resp = requests.get(url_of_synth_config)
conf_fo = StringIO(resp.content)
except:
raise HTTP(504, 'Could not fetch synthesis list from {}'.format(url_of_synth_config))
cfg = SafeConfigParser()
try:
cfg.readfp(conf_fo)
except:
raise HTTP(500, 'Could not parse file from {}'.format(url_of_synth_config))
try:
coll_id_list = cfg.get('synthesis', 'collections').split()
except:
raise HTTP(500, 'Could not find a collection list in file from {}'.format(url_of_synth_config))
return coll_id_list
# Create a unique key with the URL and any vars (GET *or* POST) to its "query string"
# ALSO include the request method (HTTP verb) to respond to OPTIONS requests
@cache(key=build_general_cache_key(request),
time_expire=None,
cache_model=cache.ram)
def cached():
"""If no value was found (above) in the cache, proxy the request to its original destination"""
##from pprint import pprint
# let's restrict this to the api server, to avoid shenanigans
root_relative_url = request.env.request_uri.split('/cached/')[-1]
##pprint('ROOT-RELATIVE URL: ')
##pprint(root_relative_url)
#@TODO this is a hack
base_url = request.env.http_host
base_url = api_utils.get_collections_api_base_url(request)
fetch_url = '{}/{}'.format(base_url, root_relative_url)
# fetch_url = '%s://%s/%s' % (request.env.wsgi_url_scheme, request.env.http_host, root_relative_url)
##pprint('PROXYING TO SIMPLE URL: ')
##pprint(fetch_url)
# permissive CORS handling of requests from another domain (e.g. tree.opentreeoflife.org)
if request.env.request_method == 'OPTIONS':
if request.env.http_access_control_request_method:
response.headers['Access-Control-Allow-Methods'] = request.env.http_access_control_request_method
if request.env.http_access_control_request_headers:
response.headers['Access-Control-Allow-Headers'] = request.env.http_access_control_request_headers
##pprint('RESPONDING TO OPTIONS')
raise HTTP(200, **(response.headers))
# N.B. This try/except block means we'll cache errors. For now, the fix is to clear the entire cache.
try:
# fetch the latest IDs as JSON from remote site
import simplejson
if fetch_url.startswith('//'):
# Prepend scheme to a scheme-relative URL
fetch_url = "http:%s" % fetch_url
fetch_args = request.vars # {'startingTaxonOTTId': ""}
# TODO: For more flexibility, we should examine and mimic the original request (HTTP verb, headers, etc)
# this needs to be a POST (pass fetch_args or ''); if GET, it just describes the API
# N.B. that gluon.tools.fetch() can't be used here, since it won't send "raw" JSON data as treemachine expects
req = urllib2.Request(url=fetch_url, data=simplejson.dumps(fetch_args), headers={"Content-Type": "application/json"})
the_response = urllib2.urlopen(req).read()
##pprint('RESPONSE:')
##pprint(the_response)
#print '...returning a sensible urllib2 response...'
return the_response
except urllib2.URLError, e:
# throw a web2py exception (copying status code and message from urllib2.URLError) so we don't poison the cache!
# REMINDER: By default, Web2py's RAM cache will ignore any response whose code doesn't match 1xx, 2xx, or 3xx!
# NOTE that we won't cache this response, but we DO want to return its payload
raise HTTP(e.code, e.read())
except Exception, e:
#print '>>> some other Exception! TRYING TO RETURN ITS VALUES IN A WEB2PY EXCEPTION...'
raise HTTP(500, 'Unknown exception in cached call!')
def push_failure():
"""Return the contents of the push fail file if it exists.
adds a boolean `pushes_succeeding` flag (True if there is no fail file)
If this flag is False, there should also be:
`data` utc timestamp of the push event that first failed
`study` the study that triggered the first failing push event
`commit` the master commit SHA of the working dir at the time of the first failure
`stacktrace`: the stacktrace of the push_study_to_remote operation that failed.
If `pushes_succeeded` is False, but there is only a message field, then another
thread may have rectified the push problems while this operation was trying
to report the errors. In this case, you should call this function again.
Report a bug if it has not reverted to `pushes_succeeding=True.
"""
response.view = 'generic.json'
fail_file = api_utils.get_failed_push_filepath(request)
if os.path.exists(fail_file):
try:
blob = read_as_json(fail_file)
except:
blob = {'message': 'could not read push fail file'}
blob['pushes_succeeding'] = False
else:
blob = {'pushes_succeeding': True}
blob['doc_type'] = request.vars.get('doc_type', 'nexson')
return json.dumps(blob)
def collections(*args, **kwargs):
"""Handle an incoming URL targeting /v2/collections/
This includes:
/v2/collections/find_collections
/v2/collections/find_trees
/v2/collections/properties
"""
if request.env.request_method == 'OPTIONS':
"A simple method for approving CORS preflight request"
if request.env.http_access_control_request_method:
response.headers['Access-Control-Allow-Methods'] = request.env.http_access_control_request_method
if request.env.http_access_control_request_headers:
response.headers['Access-Control-Allow-Headers'] = request.env.http_access_control_request_headers
raise HTTP(200, T("OPTIONS!"), **(response.headers))
# N.B. other request methods don't really matter for these functions!
# extract and validate the intended API call
assert request.args[0].lower() == 'collections'
if len(request.args) < 2:
raise HTTP(404, T('No method specified! Try collections/find_collections, find_trees, or properties'))
api_call = request.args[1] # ignore anything later in the URL
if api_call == 'find_collections':
# TODO: proxy to oti? or drop 'collections' here and re-route this (in apache config)?
# For now, let's just return all collections (complete JSON)
response.view = 'generic.json'
docstore = api_utils.get_tree_collection_store(request)
# Convert these to more closely resemble the output of find_all_studies
collection_list = []
for id, props in docstore.iter_doc_objs():
# reckon and add 'lastModified' property, based on commit history?
latest_commit = docstore.get_version_history_for_doc_id(id)[0]
props.update({
'id': id,
'lastModified': {
'author_name': latest_commit.get('author_name'),
'relative_date': latest_commit.get('relative_date'),
'display_date': latest_commit.get('date'),
'ISO_date': latest_commit.get('date_ISO_8601'),
'sha': latest_commit.get('id') # this is the commit hash
}
})
collection_list.append(props)
return json.dumps(collection_list)
if api_call == 'find_trees':
# TODO: proxy to oti? see above, and also controllers/studies.py > find_trees()
raise HTTP(200, T("Now we'd list all collections holding trees that match the criteria provided!"))
elif api_call == 'collection_list':
response.view = 'generic.json'
docstore = api_utils.get_tree_collection_store(request)
ids = docstore.get_collection_ids()
return json.dumps(ids)
elif api_call == 'properties':
# TODO: proxy to oti? or drop 'collections' here and re-route this (in apache config)?
raise HTTP(200, T("Now we'd list all searchable properties in tree collections!"))
elif api_call == 'store_config':
response.view = 'generic.json'
docstore = api_utils.get_tree_collection_store(request)
cd = docstore.get_configuration_dict()
return json.dumps(cd)
elif api_call == 'push_failure':
# this should find a type-specific PUSH_FAILURE file
request.vars['doc_type'] = 'collection'
return push_failure()
raise HTTP(404, T('No such method as collections/{}'.format(api_call)))
def __extract_json_from_http_call(request, data_field_name='data', **kwargs):
"""Returns the json blob (as a deserialized object) from `kwargs` or the request.body"""
json_obj = None
try:
# check for kwarg data_field_name, or load the full request body
if data_field_name in kwargs:
json_obj = kwargs.get(data_field_name, {})
else:
json_obj = request.body.read()
if not isinstance(json_obj, dict):
json_obj = json.loads(json_obj)
if data_field_name in json_obj:
json_obj = json_obj[data_field_name]
except:
# _LOG = api_utils.get_logger(request, 'ot_api.default.v1')
# _LOG.exception('Exception getting JSON content in __extract_json_from_http_call')
raise HTTP(400, json.dumps({"error": 1, "description": 'no collection JSON found in request'}))
return json_obj
def collection(*args, **kwargs):
"""Handle an incoming URL targeting /v2/collection/{COLLECTION_ID}
Use our typical mapping of HTTP verbs to (sort of) CRUD actions.
"""
# _LOG = api_utils.get_logger(request, 'ot_api.collection')
if request.env.request_method == 'OPTIONS':
"A simple method for approving CORS preflight request"
if request.env.http_access_control_request_method:
response.headers['Access-Control-Allow-Methods'] = request.env.http_access_control_request_method
if request.env.http_access_control_request_headers:
response.headers['Access-Control-Allow-Headers'] = request.env.http_access_control_request_headers
raise HTTP(200, T("single-collection OPTIONS!"), **(response.headers))
assert request.args[0].lower() == 'collection'
# check for full or partial collection ID
owner_id = None
collection_id = None
if len(request.args) > 1:
# for a new collection, we might have just the owner's id (GitHub username)
owner_id = request.args[1]
if not OWNER_ID_PATTERN.match(owner_id):
raise HTTP(400, json.dumps({"error": 1, "description": "invalid owner ID ({}) provided".format(owner_id)}))
if len(request.args) > 2:
collection_id = ('/').join(request.args[1:3])
if not COLLECTION_ID_PATTERN.match(collection_id):
#raise HTTP(400, json.dumps({"error": 1, "description": 'invalid collection ID provided'}))
# ignore the submitted id and generate a new one
collection_id = None
elif request.env.request_method != 'POST':
# N.B. this id is optional when creating a new collection
raise HTTP(400, json.dumps({"error": 1, "description": 'collection ID expected after "collection/"'}))
# fetch and parse the JSON payload, if any
collection_obj, collection_errors, collection_adapter = __extract_and_validate_collection(request,
kwargs)
if (collection_obj is None) and request.env.request_method in ('POST','PUT'):
raise HTTP(400, json.dumps({"error": 1, "description": "collection JSON expected for HTTP method {}".format(request.env.request_method) }))
auth_info = None
if owner_id is None:
# set this explicitly to the logged-in userid (make sure the user is allowed!)
auth_info = api_utils.authenticate(**kwargs)
owner_id = auth_info.get('login', None)
if owner_id is None:
raise HTTP(400, json.dumps({"error": 1, "description": "no GitHub userid found for HTTP method {}".format(request.env.request_method) }))
if collection_id is None:
# try to extract a usable collection ID from the JSON payload (confirm owner_id against above)
url = collection_obj.get('url', None)
if url is None:
raise HTTP(400, json.dumps({"error": 1, "description": "no collection URL provided in query string or JSON payload"}))
try:
collection_id = url.split('/collection/')[1]
except:
# _LOG.exception('{} failed'.format(request.env.request_method))
raise HTTP(404, json.dumps({"error": 1, "description": "invalid URL, no collection id found: {}".format(url)}))
try:
assert collection_id.split('/')[0] == owner_id
except:
# _LOG.exception('{} failed'.format(request.env.request_method))
raise HTTP(404, json.dumps({"error": 1, "description": "collection URL in JSON doesn't match logged-in user: {}".format(url)}))
# some request types imply git commits; gather any user-provided commit message
try:
commit_msg = kwargs.get('commit_msg','')
if commit_msg.strip() == '':
# git rejects empty commit messages
commit_msg = None
except:
commit_msg = None
if kwargs.get('jsoncallback', None) or kwargs.get('callback', None):
# support JSONP requests from another domain
response.view = 'generic.jsonp'
if request.env.request_method == 'GET':
# fetch the current collection JSON
# _LOG.debug('GET /v2/collection/{}'.format(str(collection_id)))
version_history = None
comment_html = None
parent_sha = kwargs.get('starting_commit_SHA', None)
# _LOG.debug('parent_sha = {}'.format(parent_sha))
# return the correct nexson of study_id, using the specified view
collections = api_utils.get_tree_collection_store(request)
try:
r = collections.return_doc(collection_id, commit_sha=parent_sha, return_WIP_map=True)
except:
# _LOG.exception('GET failed')
raise HTTP(404, json.dumps({"error": 1, "description": "Collection '{}' GET failure".format(collection_id)}))
try:
collection_json, head_sha, wip_map = r
## if returning_full_study: # TODO: offer bare vs. full output (w/ history, etc)
version_history = collections.get_version_history_for_doc_id(collection_id)
try:
# pre-render internal description (assumes markdown!)
comment_html = _markdown_to_html(collection_json['description'], open_links_in_new_window=True )
except:
comment_html = ''
except:
# _LOG.exception('GET failed')
e = sys.exc_info()[0]
_raise_HTTP_from_msg(e)
if not collection_json:
raise HTTP(404, "Collection '{s}' has no JSON data!".format(s=collection_id))
# add/restore the url field (using the visible fetch URL)
base_url = api_utils.get_collections_api_base_url(request)
collection_json['url'] = '{b}/v2/collection/{i}'.format(b=base_url,
i=collection_id)
try:
external_url = collections.get_public_url(collection_id)
except:
# _LOG = api_utils.get_logger(request, 'ot_api.default.v1')
# _LOG.exception('collection {} not found in external_url'.format(collection))
external_url = 'NOT FOUND'
result = {'sha': head_sha,
'data': collection_json,
'branch2sha': wip_map,
'commentHTML': comment_html,
'external_url': external_url,
}
if version_history:
result['versionHistory'] = version_history
# reckon and add 'lastModified' property, based on commit history?
latest_commit = version_history[0]
last_modified = {
'author_name': latest_commit.get('author_name'),
'relative_date': latest_commit.get('relative_date'),
'display_date': latest_commit.get('date'),
'ISO_date': latest_commit.get('date_ISO_8601'),
'sha': latest_commit.get('id') # this is the commit hash
}
result['lastModified'] = last_modified
return result
if request.env.request_method == 'PUT':
if not check_not_read_only():
raise HTTP(500, "should raise from check_not_read_only")
# update an existing collection with the data provided
# _LOG = api_utils.get_logger(request, 'ot_api.default.collections.PUT')
auth_info = auth_info or api_utils.authenticate(**kwargs)
# submit new json for this id, and read the results
parent_sha = kwargs.get('starting_commit_SHA', None)
merged_sha = None #TODO: kwargs.get('???', None)
docstore = api_utils.get_tree_collection_store(request)
try:
r = docstore.update_existing_collection(owner_id,
collection_id,
collection_obj,
auth_info,
parent_sha,
merged_sha,
commit_msg=commit_msg)
commit_return = r
except GitWorkflowError, err:
_raise_HTTP_from_msg(err.msg)
except:
raise HTTP(400, traceback.format_exc())
# check for 'merge needed'?
mn = commit_return.get('merge_needed')
if (mn is not None) and (not mn):
__deferred_push_to_gh_call(request, collection_id, doc_type='collection', **kwargs)
# Add updated commit history to the blob
commit_return['versionHistory'] = docstore.get_version_history_for_doc_id(collection_id)
return commit_return
#
# parent_sha = kwargs.get('starting_commit_SHA')
# if parent_sha is None:
# raise HTTP(400, 'Expecting a "starting_commit_SHA" argument with the SHA of the parent')
# try:
# commit_msg = kwargs.get('commit_msg','')
# if commit_msg.strip() == '':
# # git rejects empty commit messages
# commit_msg = None
# except:
# commit_msg = None
# master_file_blob_included = kwargs.get('merged_SHA')
# msg = 'PUT to collection {} for starting_commit_SHA = {} and merged_SHA = {}'
# _LOG.debug(msg.format(collection_id,
# parent_sha,
# str(master_file_blob_included)))
#
# try:
# gd = phylesystem.create_git_action(resource_id)
# except KeyError, err:
# _LOG.debug('PUT failed in create_git_action (probably a bad collection ID)')
# _raise_HTTP_from_msg("invalid collection ID, please check the URL")
# except GitWorkflowError, err:
# _LOG.debug('PUT failed in create_git_action: {}'.format(err.msg))
# _raise_HTTP_from_msg(err.msg)
# except:
# raise HTTP(400, traceback.format_exc())
if request.env.request_method == 'POST':
if not check_not_read_only():
raise HTTP(500, "should raise from check_not_read_only")
# Create a new collection with the data provided
# _LOG = api_utils.get_logger(request, 'ot_api.default.collections.POST')
auth_info = auth_info or api_utils.authenticate(**kwargs)
# submit the json and proposed id (if any), and read the results
docstore = api_utils.get_tree_collection_store(request)
try:
r = docstore.add_new_collection(owner_id,
collection_obj,
auth_info,
collection_id,
commit_msg=commit_msg)
new_collection_id, commit_return = r
except GitWorkflowError, err:
_raise_HTTP_from_msg(err.msg)
except:
raise HTTP(400, traceback.format_exc())
if commit_return['error'] != 0:
# _LOG.debug('add_new_collection failed with error code')
raise HTTP(400, json.dumps(commit_return))
__deferred_push_to_gh_call(request, new_collection_id, doc_type='collection', **kwargs)
return commit_return
if request.env.request_method == 'DELETE':
if not check_not_read_only():
raise HTTP(500, "should raise from check_not_read_only")
# remove this collection from the docstore
# _LOG = api_utils.get_logger(request, 'ot_api.default.collections.POST')
auth_info = auth_info or api_utils.authenticate(**kwargs)
docstore = api_utils.get_tree_collection_store(request)
parent_sha = kwargs.get('starting_commit_SHA')
if parent_sha is None:
raise HTTP(400, 'Expecting a "starting_commit_SHA" argument with the SHA of the parent')
try:
x = docstore.delete_collection(collection_id,
auth_info,
parent_sha,
commit_msg=commit_msg)
if x.get('error') == 0:
__deferred_push_to_gh_call(request, None, doc_type='collection', **kwargs)
return x
except GitWorkflowError, err:
_raise_HTTP_from_msg(err.msg)
except:
# _LOG.exception('Unknown error in collection deletion')
# raise HTTP(400, traceback.format_exc())
raise HTTP(400, json.dumps({"error": 1, "description": 'Unknown error in collection deletion'}))
raise HTTP(500, T("Unknown HTTP method '{}'".format(request.env.request_method)))
# TAXONOMIC AMENDMENTS
def amendments(*args, **kwargs):
"""Handle an incoming URL targeting /v3/amendments/
This includes:
/v3/amendments/list_all
/v3/amendments/store_config
/v3/amendments/push_failure
"""
if request.env.request_method == 'OPTIONS':
"A simple method for approving CORS preflight request"
if request.env.http_access_control_request_method:
response.headers['Access-Control-Allow-Methods'] = request.env.http_access_control_request_method
if request.env.http_access_control_request_headers:
response.headers['Access-Control-Allow-Headers'] = request.env.http_access_control_request_headers
raise HTTP(200, T("OPTIONS!"), **(response.headers))
# N.B. other request methods don't really matter for these functions!
# extract and validate the intended API call
assert request.args[0].lower() == 'amendments'
if len(request.args) < 2:
raise HTTP(404, T('No method specified! Try amendments/list_all, store_config, or push_failure'))
api_call = request.args[1] # ignore anything later in the URL
if api_call == 'list_all':
# For now, let's just return all amendments (complete JSON)
response.view = 'generic.json'
docstore = api_utils.get_taxonomic_amendment_store(request)
# Convert these to more closely resemble the output of find_all_studies
amendment_list = []
for id, props in docstore.iter_doc_objs():
# reckon and add 'lastModified' property, based on commit history?
latest_commit = docstore.get_version_history_for_doc_id(id)[0]
props.update({
'id': id,
'lastModified': {
'author_name': latest_commit.get('author_name'),
'relative_date': latest_commit.get('relative_date'),
'display_date': latest_commit.get('date'),
'ISO_date': latest_commit.get('date_ISO_8601'),
'sha': latest_commit.get('id') # this is the commit hash
}
})
amendment_list.append(props)
return json.dumps(amendment_list)
elif api_call == 'amendment_list':
response.view = 'generic.json'
docstore = api_utils.get_taxonomic_amendment_store(request)
ids = docstore.get_amendment_ids()
return json.dumps(ids)
elif api_call == 'store_config':
response.view = 'generic.json'
docstore = api_utils.get_taxonomic_amendment_store(request)
cd = docstore.get_configuration_dict()
return json.dumps(cd)
elif api_call == 'push_failure':
# this should find a type-specific PUSH_FAILURE file
request.vars['doc_type'] = 'amendment'
return push_failure()
raise HTTP(404, T('No such method as amendments/{}'.format(api_call)))
def amendment(*args, **kwargs):
"""Handle an incoming URL targeting /v3/amendment/{AMENDMENT_ID}
Use our typical mapping of HTTP verbs to (sort of) CRUD actions.
"""
# _LOG = api_utils.get_logger(request, 'ot_api.amendment')
if request.env.request_method == 'OPTIONS':
"A simple method for approving CORS preflight request"
if request.env.http_access_control_request_method:
response.headers['Access-Control-Allow-Methods'] = request.env.http_access_control_request_method
if request.env.http_access_control_request_headers:
response.headers['Access-Control-Allow-Headers'] = request.env.http_access_control_request_headers
raise HTTP(200, T("single-amendment OPTIONS!"), **(response.headers))
assert request.args[0].lower() == 'amendment'
# check for an existing amendment ID
amendment_id = None
if len(request.args) > 1:
amendment_id = request.args[1]
if not AMENDMENT_ID_PATTERN.match(amendment_id):
raise HTTP(400, json.dumps({"error": 1, "description": "invalid amendment ID ({}) provided".format(amendment_id)}))
# TODO: OR ignore the submitted id and generate a new one
#amendment_id = None
elif request.env.request_method != 'POST':
# N.B. this id is optional when creating a new amendment
raise HTTP(400, json.dumps({"error": 1, "description": 'amendment ID expected after "amendment/"'}))
# fetch and parse the JSON payload, if any
amendment_obj, amendment_errors, amendment_adapter = __extract_and_validate_amendment(request,
kwargs)
if (amendment_obj is None) and request.env.request_method in ('POST','PUT'):
raise HTTP(400, json.dumps({"error": 1, "description": "amendment JSON expected for HTTP method {}".format(request.env.request_method) }))
if request.env.request_method != 'GET':
# all other methods require authentication
auth_info = api_utils.authenticate(**kwargs)
# some request types imply git commits; gather any user-provided commit message
try:
commit_msg = kwargs.get('commit_msg','')
if commit_msg.strip() == '':
# git rejects empty commit messages
commit_msg = None
except:
commit_msg = None
if kwargs.get('jsoncallback', None) or kwargs.get('callback', None):
# support JSONP requests from another domain
response.view = 'generic.jsonp'
if request.env.request_method == 'GET':
# fetch the current amendment JSON
# _LOG.debug('GET /v2/amendment/{}'.format(str(amendment_id)))
version_history = None
comment_html = None
parent_sha = kwargs.get('starting_commit_SHA', None)
# _LOG.debug('parent_sha = {}'.format(parent_sha))
# return the correct nexson of study_id, using the specified view
amendments = api_utils.get_taxonomic_amendment_store(request)
try:
r = amendments.return_doc(amendment_id, commit_sha=parent_sha, return_WIP_map=True)
except:
# _LOG.exception('GET failed')
raise HTTP(404, json.dumps({"error": 1, "description": "Amendment '{}' GET failure".format(amendment_id)}))
try:
amendment_json, head_sha, wip_map = r
## if returning_full_study: # TODO: offer bare vs. full output (w/ history, etc)
version_history = amendments.get_version_history_for_doc_id(amendment_id)
except:
# _LOG.exception('GET failed')
e = sys.exc_info()[0]
_raise_HTTP_from_msg(e)
if not amendment_json:
raise HTTP(404, "Amendment '{s}' has no JSON data!".format(s=amendment_id))
try:
external_url = amendments.get_public_url(amendment_id)
except:
# _LOG = api_utils.get_logger(request, 'ot_api.default.v1')
# _LOG.exception('amendment {} not found in external_url'.format(amendment))
external_url = 'NOT FOUND'
result = {'sha': head_sha,
'data': amendment_json,
'branch2sha': wip_map,
'external_url': external_url,
}
if version_history:
result['versionHistory'] = version_history
return result
if request.env.request_method == 'PUT':
if not check_not_read_only():
raise HTTP(500, "should raise from check_not_read_only")
# update an existing amendment with the data provided
# _LOG = api_utils.get_logger(request, 'ot_api.default.amendments.PUT')
# submit new json for this id, and read the results
parent_sha = kwargs.get('starting_commit_SHA', None)
merged_sha = None #TODO: kwargs.get('???', None)
docstore = api_utils.get_taxonomic_amendment_store(request)
try:
r = docstore.update_existing_amendment(amendment_id,
amendment_obj,
auth_info,
parent_sha,
merged_sha,
commit_msg=commit_msg)
commit_return = r
except GitWorkflowError, err:
_raise_HTTP_from_msg(err.msg)
except:
raise HTTP(400, traceback.format_exc())
# check for 'merge needed'?
mn = commit_return.get('merge_needed')
if (mn is not None) and (not mn):
__deferred_push_to_gh_call(request, amendment_id, doc_type='amendment', **kwargs)
return commit_return
if request.env.request_method == 'POST':
if not check_not_read_only():
raise HTTP(500, "should raise from check_not_read_only")
# Create a new amendment with the data provided
# _LOG = api_utils.get_logger(request, 'ot_api.default.amendments.POST')
# submit the json and proposed id (if any), and read the results
docstore = api_utils.get_taxonomic_amendment_store(request)
# N.B. add_new_amendment below takes care of minting new ottids,
# assigning them to new taxa, and returning a per-taxon mapping to the
# caller. It will assign the new amendment id accordingly!
try:
r = docstore.add_new_amendment(amendment_obj,
auth_info,
commit_msg=commit_msg)
new_amendment_id, commit_return = r
except GitWorkflowError, err:
_raise_HTTP_from_msg(err.msg)
except:
raise HTTP(400, traceback.format_exc())
if commit_return['error'] != 0:
# _LOG.debug('add_new_amendment failed with error code')
raise HTTP(400, json.dumps(commit_return))
__deferred_push_to_gh_call(request, new_amendment_id, doc_type='amendment', **kwargs)
return commit_return
if request.env.request_method == 'DELETE':
if not check_not_read_only():
raise HTTP(500, "should raise from check_not_read_only")
# remove this amendment from the docstore
# _LOG = api_utils.get_logger(request, 'ot_api.default.amendments.POST')
docstore = api_utils.get_taxonomic_amendment_store(request)
parent_sha = kwargs.get('starting_commit_SHA')
if parent_sha is None:
raise HTTP(400, 'Expecting a "starting_commit_SHA" argument with the SHA of the parent')
try:
x = docstore.delete_amendment(amendment_id,
auth_info,
parent_sha,
commit_msg=commit_msg)
if x.get('error') == 0:
__deferred_push_to_gh_call(request, None, doc_type='amendment', **kwargs)
return x
except GitWorkflowError, err:
_raise_HTTP_from_msg(err.msg)
except:
# _LOG.exception('Unknown error in amendment deletion')
raise HTTP(400, traceback.format_exc())
#raise HTTP(400, json.dumps({"error": 1, "description": 'Unknown error in amendment deletion'}))
raise HTTP(500, T("Unknown HTTP method '{}'".format(request.env.request_method)))
# Names here will intercept GET and POST requests to /v1/{METHOD_NAME}
# This allows us to normalize all API method URLs under v1/, even for
# non-RESTful methods.
_route_tag2func = {'index':index,
'trees_in_synth': trees_in_synth,
'include_tree_in_synth': include_tree_in_synth,
'exclude_tree_from_synth': exclude_tree_from_synth,
'study_list': study_list,
'phylesystem_config': phylesystem_config,
'unmerged_branches': unmerged_branches,
'external_url': external_url,
'push_failure': push_failure,
'repo_nexson_format': reponexsonformat,
'reponexsonformat': reponexsonformat,
'render_markdown': render_markdown,
# handle minor resource types based on identifying paths
# NOTE singular vs. plural forms
'collections': collections,
'collection': collection,
'amendments': amendments,
'amendment': amendment,
#TODO: 'following': following,
}
@request.restful()
def v1():
"The OpenTree API v1"
# _LOG = api_utils.get_logger(request, 'ot_api.default.v1')
response.view = 'generic.json'
# CORS support for cross-domain API requests (from anywhere)
response.headers['Access-Control-Allow-Origin'] = "*"
response.headers['Access-Control-Allow-Credentials'] = 'true'
response.headers['Access-Control-Max-Age'] = 86400 # cache for a day
phylesystem = api_utils.get_phylesystem(request)
repo_parent, repo_remote, git_ssh, pkey, git_hub_remote, max_filesize, max_num_trees, read_only_mode = api_utils.read_phylesystem_config(request)
#_LOG.debug('Max file size set to {}, max num trees set to {}'.format(max_filesize, max_num_trees))
repo_nexml2json = phylesystem.repo_nexml2json
#_LOG.debug("phylesystem created with repo_nexml2json={}".format(repo_nexml2json))
def __finish_write_verb(phylesystem,
git_data,
nexson,
resource_id,
auth_info,
adaptor,
annotation,
parent_sha,
commit_msg='',
master_file_blob_included=None):
'''Called by PUT and POST handlers to avoid code repetition.'''
# global TIMING
#TODO, need to make this spawn a thread to do the second commit rather than block
a = phylesystem.annotate_and_write(git_data,
nexson,
resource_id,
auth_info,
adaptor,
annotation,
parent_sha,
commit_msg,
master_file_blob_included)
annotated_commit = a
# TIMING = api_utils.log_time_diff(_LOG, 'annotated commit', TIMING)
if annotated_commit['error'] != 0:
# _LOG = api_utils.get_logger(request, 'ot_api.default.v1')
# _LOG.debug('annotated_commit failed')
raise HTTP(400, json.dumps(annotated_commit))
return annotated_commit
def GET(resource,
resource_id=None,
subresource=None,
subresource_id=None,
jsoncallback=None,
callback=None,
_=None,
**kwargs):
"OpenTree API methods relating to reading"
# _LOG = api_utils.get_logger(request, 'ot_api.default.v1.GET')
delegate = _route_tag2func.get(resource)
if delegate:
return delegate(**kwargs)
valid_resources = ('study', )
if not resource.lower() == 'study':
raise HTTP(400, json.dumps({"error": 1,
"description": 'resource requested not in list of valid resources: %s' % valid_resources }))
if resource_id is None:
raise HTTP(400, json.dumps({"error": 1, "description": 'study ID expected after "study/"'}))
valid_subresources = ('tree', 'meta', 'otus', 'otu', 'otumap')
# _LOG.debug('GET default/v1/{}/{}'.format(str(resource), str(resource_id)))
returning_full_study = False
returning_tree = False
content_id = None
version_history = None
comment_html = None
if request.extension not in('html', 'json'):
type_ext = '.' + request.extension
else:
type_ext = None
if subresource is None:
returning_full_study = True
return_type = 'study'
elif subresource == 'tree':
return_type = 'tree'
returning_tree = True
content_id = subresource_id
elif subresource == 'subtree':
subtree_id = kwargs.get('subtree_id')
if subtree_id is None:
raise HTTP(400, json.dumps({"error": 1,
"description": 'subtree resource requires a study_id and tree_id in the URL and a subtree_id parameter'}))
return_type = 'subtree'
returning_tree = True
content_id = (subresource_id, subtree_id)
elif subresource in ['file', 'meta', 'otus', 'otu', 'otumap']:
if subresource != 'meta':
content_id = subresource_id
return_type = subresource
else:
raise HTTP(400, json.dumps({"error": 1,
"description": 'subresource requested not in list of valid resources: %s' % ' '.join(valid_subresources)}))
out_schema = __validate_output_nexml2json(kwargs,
return_type,
type_ext,
content_id=content_id)
# support JSONP request from another domain
if jsoncallback or callback:
response.view = 'generic.jsonp'
parent_sha = kwargs.get('starting_commit_SHA')
# _LOG.debug('parent_sha = {}'.format(parent_sha))
# return the correct nexson of study_id, using the specified view
phylesystem = api_utils.get_phylesystem(request)
try:
r = phylesystem.return_study(resource_id, commit_sha=parent_sha, return_WIP_map=True)
except:
# _LOG.exception('GET failed')
raise HTTP(404, json.dumps({"error": 1, "description": 'Study #%s GET failure' % resource_id}))
try:
study_nexson, head_sha, wip_map = r
if returning_full_study:
blob_sha = phylesystem.get_blob_sha_for_study_id(resource_id, head_sha)
phylesystem.add_validation_annotation(study_nexson, blob_sha)
version_history = phylesystem.get_version_history_for_study_id(resource_id)
try:
comment_html = _markdown_to_html(study_nexson['nexml']['^ot:comment'], open_links_in_new_window=True )
except:
comment_html = ''
except:
# _LOG.exception('GET failed')
e = sys.exc_info()[0]
_raise_HTTP_from_msg(e)
if subresource == 'file':
m_list = extract_supporting_file_messages(study_nexson)
if subresource_id is None:
r = []
for m in m_list:
files = m.get('data', {}).get('files', {}).get('file', [])
for f in files:
if '@url' in f:
r.append({'id': m['@id'],
'filename': f.get('@filename', ''),
'url_fragment': f['@url']})
break
return json.dumps(r)
else:
try:
matching = None
for m in m_list:
if m['@id'] == subresource_id:
matching = m
break
if matching is None:
raise HTTP(404, 'No file with id="{f}" found in study="{s}"'.format(f=subresource_id, s=resource_id))
u = None
files = m.get('data', {}).get('files', {}).get('file', [])
for f in files:
if '@url' in f:
u = f['@url']
break
if u is None:
raise HTTP(404, 'No @url found in the message with id="{f}" found in study="{s}"'.format(f=subresource_id, s=resource_id))
#TEMPORARY HACK TODO
u = u.replace('uploadid=', 'uploadId=')
#TODO: should not hard-code this, I suppose... (but not doing so requires more config...)
if u.startswith('/curator'):
u = 'https://tree.opentreeoflife.org' + u
response.headers['Content-Type'] = 'text/plain'
fetched = requests.get(u)
fetched.raise_for_status()
return fetched.text
except Exception as x:
# _LOG.exception('file_get failed')
raise HTTP(404, 'Could not retrieve file. Exception: "{}"'.format(str(x)))
elif out_schema.format_str == 'nexson' and out_schema.version == repo_nexml2json:
result_data = study_nexson
else:
try:
serialize = not out_schema.is_json()
src_schema = PhyloSchema('nexson', version=repo_nexml2json)
result_data = out_schema.convert(study_nexson,
serialize=serialize,
src_schema=src_schema)
except:
msg = "Exception in coercing to the required NexSON version for validation. "
# _LOG.exception(msg)
raise HTTP(400, msg)
if not result_data:
raise HTTP(404, 'subresource "{r}/{t}" not found in study "{s}"'.format(r=subresource,
t=subresource_id,
s=resource_id))
if returning_full_study and out_schema.is_json():
try:
study_DOI = study_nexson['nexml']['^ot:studyPublication']['@href']
except KeyError:
study_DOI = None
try:
duplicate_study_ids = _fetch_duplicate_study_ids(study_DOI, resource_id)
except:
# _LOG.exception('call to OTI check for duplicate DOIs failed')
duplicate_study_ids = None
try:
shard_name = _fetch_shard_name(resource_id)
except:
# _LOG.exception('check for shard name failed')
shard_name = None
result = {'sha': head_sha,
'data': result_data,
'branch2sha': wip_map,
'commentHTML': comment_html,
}
if duplicate_study_ids is not None:
result['duplicateStudyIDs'] = duplicate_study_ids
if shard_name:
result['shardName'] = shard_name
if version_history:
result['versionHistory'] = version_history
return result
else:
return result_data
def POST(resource, resource_id=None, _method='POST', **kwargs):
"Open Tree API methods relating to creating (and importing) resources"
if not check_not_read_only():
raise HTTP(500, "should raise from check_not_read_only")
delegate = _route_tag2func.get(resource)
if delegate:
return delegate(**kwargs)
# _LOG = api_utils.get_logger(request, 'ot_api.default.v1.POST')
# support JSONP request from another domain
if kwargs.get('jsoncallback', None) or kwargs.get('callback', None):
response.view = 'generic.jsonp'
# check for HTTP method override (passed on query string)
if _method == 'PUT':
PUT(resource, resource_id, kwargs)
elif _method == 'DELETE':
DELETE(resource, resource_id, kwargs)
if not resource == 'study':
raise HTTP(400, json.dumps({"error":1,
"description": "Only the creation of new studies is currently supported"}))
auth_info = api_utils.authenticate(**kwargs)
# Studies that were created in phylografter, can be added by
# POSTing the content with resource_id
new_study_id = resource_id
if new_study_id is not None:
try:
int(new_study_id)
except:
new_study_id = 'pg_' + new_study_id
else:
try:
new_study_id.startswith('pg_')
except:
raise HTTP(400, 'Use of the resource_id to specify a study ID is limited to phylografter studies')
bundle = __extract_and_validate_nexson(request,
repo_nexml2json,
kwargs)
new_study_nexson = bundle[0]
else:
# we're creating a new study (possibly with import instructions in the payload)
import_from_location = kwargs.get('import_from_location', '')
treebase_id = kwargs.get('treebase_id', '')
nexml_fetch_url = kwargs.get('nexml_fetch_url', '')
nexml_pasted_string = kwargs.get('nexml_pasted_string', '')
publication_doi = kwargs.get('publication_DOI', '')
# if a URL or something other than a valid DOI was entered, don't submit it to crossref API
publication_doi_for_crossref = __make_valid_DOI(publication_doi) or None
publication_ref = kwargs.get('publication_reference', '')
# is the submitter explicity applying the CC0 waiver to a new study?
cc0_agreement = (kwargs.get('chosen_license', '') == 'apply-new-CC0-waiver' and
kwargs.get('cc0_agreement', '') == 'true')
# look for the chosen import method, e.g,
# 'import-method-PUBLICATION_DOI' or 'import-method-MANUAL_ENTRY'
import_method = kwargs.get('import_method', '')
##dryad_DOI = kwargs.get('dryad_DOI', '')
app_name = request.application
# add known values for its metatags
meta_publication_reference = None
# Create initial study NexSON using the chosen import method.
#
# N.B. We're currently using a streamlined creation path with just
# two methods (TreeBASE ID and publication DOI). But let's keep the
# logic for others, just in case we revert based on user feedback.
importing_from_treebase_id = (import_method == 'import-method-TREEBASE_ID' and treebase_id)
importing_from_nexml_fetch = (import_method == 'import-method-NEXML' and nexml_fetch_url)
importing_from_post_arg = (import_method == 'import-method-POST')
importing_from_nexml_string = (import_method == 'import-method-NEXML' and nexml_pasted_string)
importing_from_crossref_API = (import_method == 'import-method-PUBLICATION_DOI' and publication_doi_for_crossref) or \
(import_method == 'import-method-PUBLICATION_REFERENCE' and publication_ref)
# Are they using an existing license or waiver (CC0, CC-BY, something else?)
using_existing_license = (kwargs.get('chosen_license', '') == 'study-data-has-existing-license')
# any of these methods should returna parsed NexSON dict (vs. string)
if importing_from_treebase_id:
# make sure the treebase ID is an integer
treebase_id = "".join(treebase_id.split()) # remove all whitespace
treebase_id = treebase_id.lstrip('S').lstrip('s') # allow for possible leading 'S'?
try:
treebase_id = int(treebase_id)
except ValueError, e:
raise HTTP(400, json.dumps({
"error": 1,
"description": "TreeBASE ID should be a simple integer, not '%s'! Details:\n%s" % (treebase_id, e.message)
}))
try:
new_study_nexson = import_nexson_from_treebase(treebase_id, nexson_syntax_version=BY_ID_HONEY_BADGERFISH)
except Exception as e:
raise HTTP(500, json.dumps({
"error": 1,
"description": "Unexpected error parsing the file obtained from TreeBASE. Please report this bug to the Open Tree of Life developers."
}))
# elif importing_from_nexml_fetch:
# if not (nexml_fetch_url.startswith('http://') or nexml_fetch_url.startswith('https://')):
# raise HTTP(400, json.dumps({
# "error": 1,
# "description": 'Expecting: "nexml_fetch_url" to startwith http:// or https://',
# }))
# new_study_nexson = get_ot_study_info_from_treebase_nexml(src=nexml_fetch_url,
# nexson_syntax_version=BY_ID_HONEY_BADGERFISH)
# elif importing_from_nexml_string:
# new_study_nexson = get_ot_study_info_from_treebase_nexml(nexml_content=nexml_pasted_string,
# nexson_syntax_version=BY_ID_HONEY_BADGERFISH)
elif importing_from_crossref_API:
new_study_nexson = _new_nexson_with_crossref_metadata(doi=publication_doi_for_crossref, ref_string=publication_ref, include_cc0=cc0_agreement)
elif importing_from_post_arg:
bundle = __extract_and_validate_nexson(request,
repo_nexml2json,
kwargs)
new_study_nexson = bundle[0]
else: # assumes 'import-method-MANUAL_ENTRY', or insufficient args above
new_study_nexson = get_empty_nexson(BY_ID_HONEY_BADGERFISH, include_cc0=cc0_agreement)
if publication_doi:
# submitter entered an invalid DOI (or other URL); add it now
new_study_nexson['nexml'][u'^ot:studyPublication'] = {'@href': publication_doi}
nexml = new_study_nexson['nexml']
if not importing_from_post_arg:
# If submitter requested the CC0 waiver or other waiver/license, make sure it's here
if cc0_agreement:
nexml['^xhtml:license'] = {'@href': 'http://creativecommons.org/publicdomain/zero/1.0/'}
elif using_existing_license:
existing_license = kwargs.get('alternate_license', '')
if existing_license == 'CC-0':
nexml['^xhtml:license'] = {'@name': 'CC0', '@href': 'http://creativecommons.org/publicdomain/zero/1.0/'}
pass
elif existing_license == 'CC-BY-2.0':
nexml['^xhtml:license'] = {'@name': 'CC-BY 2.0', '@href': 'http://creativecommons.org/licenses/by/2.0/'}
pass
elif existing_license == 'CC-BY-2.5':
nexml['^xhtml:license'] = {'@name': 'CC-BY 2.5', '@href': 'http://creativecommons.org/licenses/by/2.5/'}
pass
elif existing_license == 'CC-BY-3.0':
nexml['^xhtml:license'] = {'@name': 'CC-BY 3.0', '@href': 'http://creativecommons.org/licenses/by/3.0/'}
pass
# NOTE that we don't offer CC-BY 4.0, which is problematic for data
elif existing_license == 'CC-BY':
# default to version 3, if not specified.
nexml['^xhtml:license'] = {'@name': 'CC-BY 3.0', '@href': 'http://creativecommons.org/licenses/by/3.0/'}
pass
else: # assume it's something else
alt_license_name = kwargs.get('alt_license_name', '')
alt_license_url = kwargs.get('alt_license_URL', '')
# OK to add a name here? mainly to capture submitter's intent
nexml['^xhtml:license'] = {'@name': alt_license_name, '@href': alt_license_url}
nexml['^ot:curatorName'] = auth_info.get('name', '').decode('utf-8')
phylesystem = api_utils.get_phylesystem(request)
try:
r = phylesystem.ingest_new_study(new_study_nexson,
repo_nexml2json,
auth_info,
new_study_id)
new_resource_id, commit_return = r
except GitWorkflowError, err:
_raise_HTTP_from_msg(err.msg)
except:
raise HTTP(400, traceback.format_exc())
if commit_return['error'] != 0:
# _LOG.debug('ingest_new_study failed with error code')
raise HTTP(400, json.dumps(commit_return))
__deferred_push_to_gh_call(request, new_resource_id, doc_type='nexson', **kwargs)
return commit_return
def __coerce_nexson_format(nexson, dest_format, current_format=None):
'''Calls convert_nexson_format but does the appropriate logging and HTTP exceptions.
'''
try:
return convert_nexson_format(nexson, dest_format, current_format=current_format)
except:
msg = "Exception in coercing to the required NexSON version for validation!"
# _LOG = api_utils.get_logger(request, 'ot_api.default.v1')
# _LOG.exception(msg)
raise HTTP(400, msg)
def __extract_nexson_from_http_call(request, **kwargs):
"""Returns the nexson blob from `kwargs` or the request.body"""
try:
# check for kwarg 'nexson', or load the full request body
if 'nexson' in kwargs:
nexson = kwargs.get('nexson', {})
else:
nexson = request.body.read()
if not isinstance(nexson, dict):
nexson = json.loads(nexson)
if 'nexson' in nexson:
nexson = nexson['nexson']
except:
# _LOG = api_utils.get_logger(request, 'ot_api.default.v1')
# _LOG.exception('Exception getting nexson content in __extract_nexson_from_http_call')
raise HTTP(400, json.dumps({"error": 1, "description": 'NexSON must be valid JSON'}))
return nexson
def PUT(resource, resource_id=None, *args, **kwargs):
"Open Tree API methods relating to updating existing resources"
if not check_not_read_only():
raise HTTP(500, "should raise from check_not_read_only")
# _LOG = api_utils.get_logger(request, 'ot_api.default.v1.PUT')
delegate = _route_tag2func.get(resource)
if delegate:
return delegate(**kwargs)
#global TIMING
# support JSONP request from another domain
if kwargs.get('jsoncallback',None) or kwargs.get('callback',None):
response.view = 'generic.jsonp'
if not resource=='study':
# _LOG.debug('resource must be "study"')
raise HTTP(400, 'resource != study')
if resource_id is None:
# _LOG.debug('resource id not provided')
raise HTTP(400, json.dumps({"error": 1, "description": 'study ID expected after "study/"'}))
parent_sha = kwargs.get('starting_commit_SHA')
if parent_sha is None:
raise HTTP(400, 'Expecting a "starting_commit_SHA" argument with the SHA of the parent')
try:
commit_msg = kwargs.get('commit_msg','')
if commit_msg.strip() == '':
# git rejects empty commit messages
commit_msg = None
except:
commit_msg = None
master_file_blob_included = kwargs.get('merged_SHA')
# _LOG.debug('PUT to study {} for starting_commit_SHA = {} and merged_SHA = {}'.format(resource_id,
# parent_sha,
# str(master_file_blob_included)))
#TIMING = api_utils.log_time_diff(_LOG)
auth_info = api_utils.authenticate(**kwargs)
#TIMING = api_utils.log_time_diff(_LOG, 'github authentication', TIMING)
bundle = __extract_and_validate_nexson(request,
repo_nexml2json,
kwargs)
nexson, annotation, nexson_adaptor = bundle
#TIMING = api_utils.log_time_diff(_LOG, 'validation and normalization', TIMING)
phylesystem = api_utils.get_phylesystem(request)
try:
gd = phylesystem.create_git_action(resource_id)
except KeyError, err:
# _LOG.debug('PUT failed in create_git_action (probably a bad study ID)')
_raise_HTTP_from_msg("invalid study ID, please check the URL")
try:
blob = __finish_write_verb(phylesystem,
gd,
nexson=nexson,
resource_id=resource_id,
auth_info=auth_info,
adaptor=nexson_adaptor,
annotation=annotation,
parent_sha=parent_sha,
commit_msg=commit_msg,
master_file_blob_included=master_file_blob_included)
except GitWorkflowError, err:
# _LOG.exception('PUT failed in __finish_write_verb')
_raise_HTTP_from_msg(err.msg)
#TIMING = api_utils.log_time_diff(_LOG, 'blob creation', TIMING)
mn = blob.get('merge_needed')
if (mn is not None) and (not mn):
__deferred_push_to_gh_call(request, resource_id, doc_type='nexson', **kwargs)
# Add updated commit history to the blob
blob['versionHistory'] = phylesystem.get_version_history_for_study_id(resource_id)
return blob
def DELETE(resource, resource_id=None, *args, **kwargs):
"Open Tree API methods relating to deleting existing resources"
if not check_not_read_only():
raise HTTP(500, "should raise from check_not_read_only")
delegate = _route_tag2func.get(resource)
if delegate:
return delegate(**kwargs)
# support JSONP request from another domain
# _LOG = api_utils.get_logger(request, 'ot_api.default.v1.DELETE')
if kwargs.get('jsoncallback',None) or kwargs.get('callback',None):
response.view = 'generic.jsonp'
if not resource=='study':
raise HTTP(400, 'resource != study')
if resource_id is None:
# _LOG.debug('resource id not provided')
raise HTTP(400, json.dumps({"error": 1, "description": 'study ID expected after "study/"'}))
parent_sha = kwargs.get('starting_commit_SHA')
if parent_sha is None:
raise HTTP(400, 'Expecting a "starting_commit_SHA" argument with the SHA of the parent')
try:
commit_msg = kwargs.get('commit_msg','')
if commit_msg.strip() == '':
# git rejects empty commit messages
commit_msg = None
except:
commit_msg = None
auth_info = api_utils.authenticate(**kwargs)
phylesystem = api_utils.get_phylesystem(request)
try:
x = phylesystem.delete_study(resource_id, auth_info, parent_sha, commit_msg=commit_msg)
if x.get('error') == 0:
__deferred_push_to_gh_call(request, None, doc_type='nexson', **kwargs)
return x
except GitWorkflowError, err:
_raise_HTTP_from_msg(err.msg)
except:
# _LOG.exception('Exception getting nexson content in phylesystem.delete_study')
raise HTTP(400, json.dumps({"error": 1, "description": 'Unknown error in study deletion'}))
def OPTIONS(*args, **kwargs):
"A simple method for approving CORS preflight request"
if request.env.http_access_control_request_method:
response.headers['Access-Control-Allow-Methods'] = request.env.http_access_control_request_method
if request.env.http_access_control_request_headers:
response.headers['Access-Control-Allow-Headers'] = request.env.http_access_control_request_headers
raise HTTP(200, **(response.headers))
return locals()
| [
11748,
7007,
198,
11748,
2956,
297,
571,
17,
198,
11748,
28686,
11,
25064,
198,
198,
11748,
33918,
198,
11748,
597,
17752,
198,
11748,
12854,
1891,
198,
6738,
427,
1330,
17606,
198,
6738,
613,
88,
313,
75,
1330,
10385,
62,
12413,
1559,
... | 2.111899 | 32,431 |
from blacksheep.contents import FormContent, JSONContent, TextContent
from blacksheep.testing.client import TestClient
from blacksheep.testing.messages import MockReceive, MockSend
from blacksheep.testing.simulator import AbstractTestSimulator
__all__ = [
"TestClient",
"AbstractTestSimulator",
"JSONContent",
"TextContent",
"FormContent",
"MockReceive",
"MockSend",
]
| [
6738,
2042,
7091,
538,
13,
3642,
658,
1330,
5178,
19746,
11,
19449,
19746,
11,
8255,
19746,
198,
6738,
2042,
7091,
538,
13,
33407,
13,
16366,
1330,
6208,
11792,
198,
6738,
2042,
7091,
538,
13,
33407,
13,
37348,
1095,
1330,
44123,
3041,
... | 3.038168 | 131 |
from pathlib import Path
import lingpy as lp
from clldutils.misc import slug
from pylexibank import Dataset as BaseDataset
from pylexibank.util import getEvoBibAsBibtex
from pylexibank import progressbar
from pylexibank import Concept, Language
import attr
@attr.s
| [
6738,
3108,
8019,
1330,
10644,
198,
11748,
18459,
9078,
355,
300,
79,
198,
198,
6738,
269,
297,
67,
26791,
13,
44374,
1330,
31065,
198,
6738,
279,
2349,
87,
571,
962,
1330,
16092,
292,
316,
355,
7308,
27354,
292,
316,
198,
6738,
279,
... | 2.966667 | 90 |