content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from os import stat
from callbacks import start
from util.serializers import EnrollmentPaymentSerializer
from util.errors import BackendError, catch_error
from util.api_service import ApiService
from util.telegram_service import TelegramService
from util.constants import EventInstance, Folder, FolderPermission, State, Enrollment, Payment
from telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup, user
from telegram.ext import CallbackContext
from http import HTTPStatus
# @catch_error
| [
6738,
28686,
1330,
1185,
198,
6738,
869,
10146,
1330,
923,
198,
6738,
7736,
13,
46911,
11341,
1330,
2039,
48108,
19197,
434,
32634,
7509,
198,
6738,
7736,
13,
48277,
1330,
5157,
437,
12331,
11,
4929,
62,
18224,
198,
6738,
7736,
13,
1504... | 3.886364 | 132 |
# -*- coding:utf-8 -*-
from .resnet import resnet18, resnet34
from .mobilenet import mobilenet_v2
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
220,
532,
9,
12,
198,
198,
6738,
764,
411,
3262,
1330,
581,
3262,
1507,
11,
581,
3262,
2682,
198,
6738,
764,
76,
25898,
268,
316,
1330,
17754,
268,
316,
62,
85,
17,
198
] | 2.380952 | 42 |
atan2(1,sqrt(3)) | [
39036,
17,
7,
16,
11,
31166,
17034,
7,
18,
4008
] | 1.6 | 10 |
#!python3
#encoding: utf-8
import htmlstr.HtmlWrapper
class HtmlBase(object):
"""
HTML文書文字列を生成する。
@param {str} bodyは<body>に含めるHTML文字列。
@param {str} headは<head>に含めるHTML文字列。
@param {dict} metaは<meta>に入れるデータ。次のキーを持ったdict型。'charset', 'description', 'author', 'viewport', 'title', 'icon_href', 'css_href'
"""
| [
2,
0,
29412,
18,
198,
2,
12685,
7656,
25,
3384,
69,
12,
23,
198,
11748,
27711,
2536,
13,
39,
20369,
36918,
2848,
198,
4871,
367,
20369,
14881,
7,
15252,
2599,
628,
220,
220,
220,
37227,
198,
220,
220,
220,
11532,
23877,
229,
162,
... | 1.75 | 188 |
"""Chrome."""
from typing import List, Optional
import selenium.webdriver
from selenium.webdriver.chrome.options import Options
from surferrr.base.base_browser import BaseBrowser
ARGUMENT_HEADLESS = '--headless'
class Chrome(BaseBrowser):
"""Chrome."""
def __init__(self,
headless: bool = False,
binary_path: Optional[str] = None,
driver_path: Optional[str] = None,
arguments: Optional[List[str]] = None) -> None:
"""Initialize object.
Args:
headless (bool): Headless or not. Default to False.
binary_path (str, optional): Binary path.
driver_path (str, optional): Driver path.
arguments (List[str], optional): Arguments.
"""
super().__init__(binary_path=binary_path, arguments=arguments)
self.headless = headless
self.driver_path = driver_path if driver_path else 'chromedriver'
if self.headless:
self.arguments.append(ARGUMENT_HEADLESS)
def launch(self) -> None:
"""Launch browser."""
options = Options()
if self.binary_path:
options.binary_location = self.binary_path
for arg in self.arguments:
options.add_argument(arg)
self.driver = selenium.webdriver.Chrome(self.driver_path,
options=options)
if self.headless:
user_agent = self.get_user_agent().replace('Headless', '')
d = {
'userAgent': user_agent,
# 'platform': 'Windows',
}
self.driver.execute_cdp_cmd('Network.setUserAgentOverride', d)
| [
37811,
1925,
5998,
526,
15931,
198,
198,
6738,
19720,
1330,
7343,
11,
32233,
198,
198,
11748,
384,
11925,
1505,
13,
12384,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
46659,
13,
25811,
1330,
18634,
198,
198,
6738,
969,
223... | 2.167726 | 787 |
from models import StackedHourGlass, Linear
import torch
from tqdm import tqdm
import torchvision.transforms as transforms
from utils.data import ImageSequence, SurrealDataset, get_order_joint_human, Human36M
from utils import Config, get_2d_joints, get_all_32joints
from torch.utils.data import DataLoader
import os
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.gridspec as gridspec
import utils.viz as viz
from utils.eval import *
from matplotlib.animation import FuncAnimation
plt.ion()
config = Config('./config')
device_type = "cuda" if torch.cuda.is_available() and config.device_type == "cuda" else "cpu"
print("Using", device_type)
config_video_constraints = config.video_constraints
config_surreal = config.surreal
pretrained_path = os.path.abspath(os.path.join(os.curdir, config.hourglass.pretrained_path))
pretrained_path_linear = os.path.abspath(os.path.join(os.curdir, config.eval.linear_model))
device = torch.device(device_type)
# Stacked pre-trained model
hg_model = StackedHourGlass(config.hourglass.n_channels, config.hourglass.n_stack, config.hourglass.n_modules,
config.hourglass.n_reductions, config.hourglass.n_joints)
hg_model.to(device)
hg_model.load_state_dict(torch.load(pretrained_path, map_location=device)['model_state'])
hg_model.eval()
# 3D predictor
number_frames = 1
if config.eval.video_constraints.use:
number_frames = config.eval.video_constraints.frames_before + config.eval.video_constraints.frames_after + 1
model = Linear(input_size=32 * number_frames, hidden_size=1024, output_size=48, num_lin_block=3).to(device)
model.load_state_dict(torch.load(pretrained_path_linear, map_location=device))
model.eval()
model.to(device)
if config.eval.data.type == "sequence":
sequence = torch.utils.data.DataLoader(ImageSequence(config.eval.data.path), batch_size=config.eval.batch_size,
shuffle=False)
elif config.eval.data.type == "human":
human_dataset = Human36M(config.eval.data.path,
use_hourglass=True,
video_constraints=config.eval.video_constraints.use,
frames_before=config.eval.video_constraints.frames_before, frames_after=config.eval.video_constraints.frames_after,
test_subjects=[config.eval.data.subject], actions=[config.eval.data.action])
sequence = torch.utils.data.DataLoader(human_dataset.test_set, batch_size=config.eval.batch_size, shuffle=False)
else:
raise ValueError(config.eval.data.type + " type does not exist.")
images = []
joints_2d = []
joints_3d = []
for batch in tqdm(sequence): # size batch x 3 x 256 x 256
if config.eval.data.type == "sequence":
get_data_sequence(batch, device, hg_model, model, images, joints_2d, joints_3d, config)
elif config.eval.data.type == "human":
get_data_human(batch, device, human_dataset, model, images, joints_2d, joints_3d, config)
if len(images) > 0:
images = np.vstack(images)
joints_2d = np.vstack(joints_2d) # shape batch x 32 * 2
joints_3d = np.vstack(joints_3d) # shape batch x 32 * 3
"""
0 LFOOT, 1 RHIP, 2 RKNEE, 3 RFOOT, 4 -1, 5 -1, 6 0, 7 LHIP, 8 LKNEE, 9 -1, 10 -1, 11 -1, 12 HIP, 13 Thorax, 14 -1, 15 Neck, 16 -1, 17 RSHoulder
18 LShoulder, 19 LELBOX, 20 -1, 21 -1, 22 -1, 23 -1, 24 -1, 25 RELBOW, 26 RHAND, 27 TOP HEAD, 28 -1, 29 -1, 30 -1, 31 -1
"""
grid_dim = 3 if config.eval.data.type in ['sequence'] else 2
radius = 1 if config.eval.data.type in ['sequence'] else None
gs1 = gridspec.GridSpec(1, grid_dim)
gs1.update(wspace=-0.00, hspace=0.05) # set the spacing between axes.
# plt.axis('off')
fig = plt.gcf()
# print('Save gif...')
# anim = FuncAnimation(fig, update, frames=np.arange(0, 250), interval=200)
# anim.save('figs/sh-video-hm-2.gif', dpi=100, writer='imagemagick')
# print("Saved.")
for t in range(0, joints_2d.shape[0]):
update(t)
plt.draw()
plt.pause(0.0001)
| [
6738,
4981,
1330,
520,
6021,
43223,
47698,
11,
44800,
198,
11748,
28034,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
31408,
198,
6738,
3384,
4487,
13,
7890,
1330,
7412,
44015,
594,
11... | 2.458591 | 1,618 |
from typing import Dict, Optional, Union, List
import datasets
from fewie.data.datasets import NER_DATASETS_ROOT
def load_dataset(
dataset_name: str,
split: str,
version: Optional[str] = None,
data_dir: Optional[str] = None,
) -> Union[datasets.Dataset, datasets.DatasetDict]:
"""Loads a dataset with a user-customized loading-script.
Args:
dataset_name: The name of the loading script, without extension name.
split: The name of the split, usually be "train", "test" or "validation".
version: The version of the dataset.
data_dir: The directory where the dataset is stored.
Returns:
The split of the given dataset, in HuggingFace format.
"""
dataset_script = (NER_DATASETS_ROOT / dataset_name).with_suffix(".py")
return datasets.load_dataset(
str(dataset_script), version, data_dir=data_dir, split=split
)
def get_label_list(labels: List[List[int]]) -> List[int]:
"""Gets a sorted list of all the unique labels from `labels`.
Args:
labels: A list of lists, each corresponding to the label-sequence of a text.
Returns:
All the unique labels the ever appear in `labels`, given in a sorted list.
Example:
Given `labels=[[0, 0, 3, 2, 5], [4, 0], [5, 2, 3]]`, returns `[0, 2, 3, 4, 5]`.
"""
unique_labels = set()
for label in labels:
unique_labels = unique_labels | set(label)
label_list = list(unique_labels)
label_list.sort()
return label_list
def get_label_to_id(
dataset: datasets.Dataset, label_column_name: str
) -> Union[Dict[str, int], Dict[int, int]]:
"""Returns a dictionary the encodes labels to ids, namely integers starting from 0.
Args:
dataset: A HuggingFace-format dataset with column `label_column_name`.
label_column_name: The name of the column, in this case `ner_tags` usually.
Returns:
A label-to-id dictionary that maps labels to {0, 1, ..., #classes}.
"""
if isinstance(dataset.features[label_column_name].feature, datasets.ClassLabel):
label_list = dataset.features[label_column_name].feature.names
# No need to convert the labels since they are already ints.
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(dataset[label_column_name])
label_to_id = {l: i for i, l in enumerate(label_list)}
return label_to_id
| [
6738,
19720,
1330,
360,
713,
11,
32233,
11,
4479,
11,
7343,
198,
198,
11748,
40522,
198,
6738,
1178,
494,
13,
7890,
13,
19608,
292,
1039,
1330,
399,
1137,
62,
35,
1404,
1921,
32716,
62,
13252,
2394,
628,
198,
4299,
3440,
62,
19608,
... | 2.610043 | 936 |
#!/usr/bin/python
import tensorflow as tf
from edward.models import Normal
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
1225,
904,
13,
27530,
1330,
14435,
198
] | 3.166667 | 24 |
from crawler.frontier import Frontier
from crawler.worker import Worker
| [
6738,
27784,
1754,
13,
8534,
959,
1330,
23281,
198,
6738,
27784,
1754,
13,
28816,
1330,
35412,
198
] | 4.235294 | 17 |
# coding: utf-8
"""
Gate API v4
Welcome to Gate.io API APIv4 provides spot, margin and futures trading operations. There are public APIs to retrieve the real-time market statistics, and private APIs which needs authentication to trade on user's behalf. # noqa: E501
Contact: support@mail.gate.io
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from gate_api.api_client import ApiClient
from gate_api.exceptions import ApiTypeError, ApiValueError # noqa: F401
class SpotApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def list_currencies(self, **kwargs): # noqa: E501
"""List all currencies' details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_currencies(async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: list[gate_api.Currency]
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_currencies_with_http_info(**kwargs) # noqa: E501
def list_currencies_with_http_info(self, **kwargs): # noqa: E501
"""List all currencies' details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_currencies_with_http_info(async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(list[gate_api.Currency], status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = []
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method list_currencies" % k)
local_var_params[k] = v
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/spot/currencies',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Currency]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def get_currency(self, currency, **kwargs): # noqa: E501
"""Get details of a specific currency # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_currency(currency, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency: Currency name (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: gate_api.Currency
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_currency_with_http_info(currency, **kwargs) # noqa: E501
def get_currency_with_http_info(self, currency, **kwargs): # noqa: E501
"""Get details of a specific currency # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_currency_with_http_info(currency, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency: Currency name (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(gate_api.Currency, status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['currency']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method get_currency" % k)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'currency' is set
if self.api_client.client_side_validation and (
'currency' not in local_var_params or local_var_params['currency'] is None # noqa: E501
): # noqa: E501
raise ApiValueError("Missing the required parameter `currency` when calling `get_currency`") # noqa: E501
collection_formats = {}
path_params = {}
if 'currency' in local_var_params:
path_params['currency'] = local_var_params['currency'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/spot/currencies/{currency}',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Currency', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def list_currency_pairs(self, **kwargs): # noqa: E501
"""List all currency pairs supported # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_currency_pairs(async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: list[gate_api.CurrencyPair]
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_currency_pairs_with_http_info(**kwargs) # noqa: E501
def list_currency_pairs_with_http_info(self, **kwargs): # noqa: E501
"""List all currency pairs supported # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_currency_pairs_with_http_info(async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(list[gate_api.CurrencyPair], status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = []
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method list_currency_pairs" % k)
local_var_params[k] = v
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/spot/currency_pairs',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[CurrencyPair]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def get_currency_pair(self, currency_pair, **kwargs): # noqa: E501
"""Get details of a specifc order # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_currency_pair(currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Currency pair (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: gate_api.CurrencyPair
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_currency_pair_with_http_info(currency_pair, **kwargs) # noqa: E501
def get_currency_pair_with_http_info(self, currency_pair, **kwargs): # noqa: E501
"""Get details of a specifc order # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_currency_pair_with_http_info(currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Currency pair (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(gate_api.CurrencyPair, status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['currency_pair']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method get_currency_pair" % k)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'currency_pair' is set
if self.api_client.client_side_validation and (
'currency_pair' not in local_var_params or local_var_params['currency_pair'] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `currency_pair` when calling `get_currency_pair`"
) # noqa: E501
collection_formats = {}
path_params = {}
if 'currency_pair' in local_var_params:
path_params['currency_pair'] = local_var_params['currency_pair'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/spot/currency_pairs/{currency_pair}',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CurrencyPair', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def list_tickers(self, **kwargs): # noqa: E501
"""Retrieve ticker information # noqa: E501
Return only related data if `currency_pair` is specified; otherwise return all of them # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_tickers(async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Currency pair
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: list[gate_api.Ticker]
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_tickers_with_http_info(**kwargs) # noqa: E501
def list_tickers_with_http_info(self, **kwargs): # noqa: E501
"""Retrieve ticker information # noqa: E501
Return only related data if `currency_pair` is specified; otherwise return all of them # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_tickers_with_http_info(async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Currency pair
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(list[gate_api.Ticker], status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['currency_pair']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method list_tickers" % k)
local_var_params[k] = v
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'currency_pair' in local_var_params and local_var_params['currency_pair'] is not None: # noqa: E501
query_params.append(('currency_pair', local_var_params['currency_pair'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/spot/tickers',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Ticker]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def list_order_book(self, currency_pair, **kwargs): # noqa: E501
"""Retrieve order book # noqa: E501
Order book will be sorted by price from high to low on bids; low to high on asks # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_order_book(currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Currency pair (required)
:param str interval: Order depth. 0 means no aggregation is applied. default to 0
:param int limit: Maximum number of order depth data in asks or bids
:param bool with_id: Return order book ID
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: gate_api.OrderBook
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_order_book_with_http_info(currency_pair, **kwargs) # noqa: E501
def list_order_book_with_http_info(self, currency_pair, **kwargs): # noqa: E501
"""Retrieve order book # noqa: E501
Order book will be sorted by price from high to low on bids; low to high on asks # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_order_book_with_http_info(currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Currency pair (required)
:param str interval: Order depth. 0 means no aggregation is applied. default to 0
:param int limit: Maximum number of order depth data in asks or bids
:param bool with_id: Return order book ID
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(gate_api.OrderBook, status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['currency_pair', 'interval', 'limit', 'with_id']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method list_order_book" % k)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'currency_pair' is set
if self.api_client.client_side_validation and (
'currency_pair' not in local_var_params or local_var_params['currency_pair'] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `currency_pair` when calling `list_order_book`"
) # noqa: E501
if (
self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `limit` when calling `list_order_book`, must be a value greater than or equal to `1`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'currency_pair' in local_var_params and local_var_params['currency_pair'] is not None: # noqa: E501
query_params.append(('currency_pair', local_var_params['currency_pair'])) # noqa: E501
if 'interval' in local_var_params and local_var_params['interval'] is not None: # noqa: E501
query_params.append(('interval', local_var_params['interval'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'with_id' in local_var_params and local_var_params['with_id'] is not None: # noqa: E501
query_params.append(('with_id', local_var_params['with_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/spot/order_book',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OrderBook', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def list_trades(self, currency_pair, **kwargs): # noqa: E501
"""Retrieve market trades # noqa: E501
You can use `from` and `to` to query by time range, or use `last_id` by scrolling page. The default behavior is by time range. Scrolling query using `last_id` is not recommended any more. If `last_id` is specified, time range query parameters will be ignored. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_trades(currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Currency pair (required)
:param int limit: Maximum number of records to be returned in a single list
:param str last_id: Specify list staring point using the `id` of last record in previous list-query results
:param bool reverse: Whether the id of records to be retrieved should be smaller than the last_id specified- true: Retrieve records where id is smaller than the specified last_id- false: Retrieve records where id is larger than the specified last_idDefault to false. When `last_id` is specified. Set `reverse` to `true` to trace back trading history; `false` to retrieve latest tradings. No effect if `last_id` is not specified.
:param int _from: Start timestamp of the query
:param int to: Time range ending, default to current time
:param int page: Page number
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: list[gate_api.Trade]
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_trades_with_http_info(currency_pair, **kwargs) # noqa: E501
def list_trades_with_http_info(self, currency_pair, **kwargs): # noqa: E501
"""Retrieve market trades # noqa: E501
You can use `from` and `to` to query by time range, or use `last_id` by scrolling page. The default behavior is by time range. Scrolling query using `last_id` is not recommended any more. If `last_id` is specified, time range query parameters will be ignored. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_trades_with_http_info(currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Currency pair (required)
:param int limit: Maximum number of records to be returned in a single list
:param str last_id: Specify list staring point using the `id` of last record in previous list-query results
:param bool reverse: Whether the id of records to be retrieved should be smaller than the last_id specified- true: Retrieve records where id is smaller than the specified last_id- false: Retrieve records where id is larger than the specified last_idDefault to false. When `last_id` is specified. Set `reverse` to `true` to trace back trading history; `false` to retrieve latest tradings. No effect if `last_id` is not specified.
:param int _from: Start timestamp of the query
:param int to: Time range ending, default to current time
:param int page: Page number
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(list[gate_api.Trade], status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['currency_pair', 'limit', 'last_id', 'reverse', '_from', 'to', 'page']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method list_trades" % k)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'currency_pair' is set
if self.api_client.client_side_validation and (
'currency_pair' not in local_var_params or local_var_params['currency_pair'] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `currency_pair` when calling `list_trades`"
) # noqa: E501
if (
self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 1000
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `limit` when calling `list_trades`, must be a value less than or equal to `1000`"
) # noqa: E501
if (
self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `limit` when calling `list_trades`, must be a value greater than or equal to `1`"
) # noqa: E501
if (
self.api_client.client_side_validation and 'page' in local_var_params and local_var_params['page'] < 1
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `page` when calling `list_trades`, must be a value greater than or equal to `1`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'currency_pair' in local_var_params and local_var_params['currency_pair'] is not None: # noqa: E501
query_params.append(('currency_pair', local_var_params['currency_pair'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'last_id' in local_var_params and local_var_params['last_id'] is not None: # noqa: E501
query_params.append(('last_id', local_var_params['last_id'])) # noqa: E501
if 'reverse' in local_var_params and local_var_params['reverse'] is not None: # noqa: E501
query_params.append(('reverse', local_var_params['reverse'])) # noqa: E501
if '_from' in local_var_params and local_var_params['_from'] is not None: # noqa: E501
query_params.append(('from', local_var_params['_from'])) # noqa: E501
if 'to' in local_var_params and local_var_params['to'] is not None: # noqa: E501
query_params.append(('to', local_var_params['to'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/spot/trades',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Trade]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def list_candlesticks(self, currency_pair, **kwargs): # noqa: E501
"""Market candlesticks # noqa: E501
Maximum of 1000 points can be returned in a query. Be sure not to exceed the limit when specifying from, to and interval # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_candlesticks(currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Currency pair (required)
:param int limit: Maximum recent data points to return. `limit` is conflicted with `from` and `to`. If either `from` or `to` is specified, request will be rejected.
:param int _from: Start time of candlesticks, formatted in Unix timestamp in seconds. Default to`to - 100 * interval` if not specified
:param int to: End time of candlesticks, formatted in Unix timestamp in seconds. Default to current time
:param str interval: Interval time between data points
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: list[list[str]]
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_candlesticks_with_http_info(currency_pair, **kwargs) # noqa: E501
def list_candlesticks_with_http_info(self, currency_pair, **kwargs): # noqa: E501
"""Market candlesticks # noqa: E501
Maximum of 1000 points can be returned in a query. Be sure not to exceed the limit when specifying from, to and interval # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_candlesticks_with_http_info(currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Currency pair (required)
:param int limit: Maximum recent data points to return. `limit` is conflicted with `from` and `to`. If either `from` or `to` is specified, request will be rejected.
:param int _from: Start time of candlesticks, formatted in Unix timestamp in seconds. Default to`to - 100 * interval` if not specified
:param int to: End time of candlesticks, formatted in Unix timestamp in seconds. Default to current time
:param str interval: Interval time between data points
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(list[list[str]], status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['currency_pair', 'limit', '_from', 'to', 'interval']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method list_candlesticks" % k)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'currency_pair' is set
if self.api_client.client_side_validation and (
'currency_pair' not in local_var_params or local_var_params['currency_pair'] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `currency_pair` when calling `list_candlesticks`"
) # noqa: E501
if (
self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 1000
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `limit` when calling `list_candlesticks`, must be a value less than or equal to `1000`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'currency_pair' in local_var_params and local_var_params['currency_pair'] is not None: # noqa: E501
query_params.append(('currency_pair', local_var_params['currency_pair'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if '_from' in local_var_params and local_var_params['_from'] is not None: # noqa: E501
query_params.append(('from', local_var_params['_from'])) # noqa: E501
if 'to' in local_var_params and local_var_params['to'] is not None: # noqa: E501
query_params.append(('to', local_var_params['to'])) # noqa: E501
if 'interval' in local_var_params and local_var_params['interval'] is not None: # noqa: E501
query_params.append(('interval', local_var_params['interval'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/spot/candlesticks',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[list[str]]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def get_fee(self, **kwargs): # noqa: E501
"""Query user trading fee rates # noqa: E501
This API is deprecated in favour of new fee retrieving API `/wallet/fee`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_fee(async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Specify a currency pair to retrieve precise fee rate This field is optional. In most cases, the fee rate is identical among all currency pairs
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: gate_api.TradeFee
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_fee_with_http_info(**kwargs) # noqa: E501
def get_fee_with_http_info(self, **kwargs): # noqa: E501
"""Query user trading fee rates # noqa: E501
This API is deprecated in favour of new fee retrieving API `/wallet/fee`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_fee_with_http_info(async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Specify a currency pair to retrieve precise fee rate This field is optional. In most cases, the fee rate is identical among all currency pairs
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(gate_api.TradeFee, status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['currency_pair']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method get_fee" % k)
local_var_params[k] = v
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'currency_pair' in local_var_params and local_var_params['currency_pair'] is not None: # noqa: E501
query_params.append(('currency_pair', local_var_params['currency_pair'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/fee',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TradeFee', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def list_spot_accounts(self, **kwargs): # noqa: E501
"""List spot accounts # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_spot_accounts(async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency: Retrieve data of the specified currency
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: list[gate_api.SpotAccount]
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_spot_accounts_with_http_info(**kwargs) # noqa: E501
def list_spot_accounts_with_http_info(self, **kwargs): # noqa: E501
"""List spot accounts # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_spot_accounts_with_http_info(async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency: Retrieve data of the specified currency
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(list[gate_api.SpotAccount], status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['currency']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method list_spot_accounts" % k)
local_var_params[k] = v
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'currency' in local_var_params and local_var_params['currency'] is not None: # noqa: E501
query_params.append(('currency', local_var_params['currency'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/accounts',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SpotAccount]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def create_batch_orders(self, order, **kwargs): # noqa: E501
"""Create a batch of orders # noqa: E501
Batch orders requirements: 1. custom order field `text` is required 2. At most 4 currency pairs, maximum 10 orders each, are allowed in one request 3. No mixture of spot orders and margin orders, i.e. `account` must be identical for all orders # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_batch_orders(order, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param list[Order] order: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: list[gate_api.BatchOrder]
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_batch_orders_with_http_info(order, **kwargs) # noqa: E501
def create_batch_orders_with_http_info(self, order, **kwargs): # noqa: E501
"""Create a batch of orders # noqa: E501
Batch orders requirements: 1. custom order field `text` is required 2. At most 4 currency pairs, maximum 10 orders each, are allowed in one request 3. No mixture of spot orders and margin orders, i.e. `account` must be identical for all orders # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_batch_orders_with_http_info(order, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param list[Order] order: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(list[gate_api.BatchOrder], status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['order']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method create_batch_orders" % k)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'order' is set
if self.api_client.client_side_validation and (
'order' not in local_var_params or local_var_params['order'] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `order` when calling `create_batch_orders`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'order' in local_var_params:
body_params = local_var_params['order']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']
) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/batch_orders',
'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[BatchOrder]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def list_all_open_orders(self, **kwargs): # noqa: E501
"""List all open orders # noqa: E501
List open orders in all currency pairs. Note that pagination parameters affect record number in each currency pair's open order list. No pagination is applied to the number of currency pairs returned. All currency pairs with open orders will be returned. Spot and margin orders are returned by default. To list cross margin orders, `account` must be set to `cross_margin` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_all_open_orders(async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param int page: Page number
:param int limit: Maximum number of records returned in one page in each currency pair
:param str account: Specify operation account. Default to spot and margin account if not specified. Set to `cross_margin` to operate against margin account
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: list[gate_api.OpenOrders]
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_all_open_orders_with_http_info(**kwargs) # noqa: E501
def list_all_open_orders_with_http_info(self, **kwargs): # noqa: E501
"""List all open orders # noqa: E501
List open orders in all currency pairs. Note that pagination parameters affect record number in each currency pair's open order list. No pagination is applied to the number of currency pairs returned. All currency pairs with open orders will be returned. Spot and margin orders are returned by default. To list cross margin orders, `account` must be set to `cross_margin` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_all_open_orders_with_http_info(async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param int page: Page number
:param int limit: Maximum number of records returned in one page in each currency pair
:param str account: Specify operation account. Default to spot and margin account if not specified. Set to `cross_margin` to operate against margin account
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(list[gate_api.OpenOrders], status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['page', 'limit', 'account']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method list_all_open_orders" % k)
local_var_params[k] = v
del local_var_params['kwargs']
if (
self.api_client.client_side_validation and 'page' in local_var_params and local_var_params['page'] < 1
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `page` when calling `list_all_open_orders`, must be a value greater than or equal to `1`"
) # noqa: E501
if (
self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 100
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `limit` when calling `list_all_open_orders`, must be a value less than or equal to `100`"
) # noqa: E501
if (
self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `limit` when calling `list_all_open_orders`, must be a value greater than or equal to `1`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'account' in local_var_params and local_var_params['account'] is not None: # noqa: E501
query_params.append(('account', local_var_params['account'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/open_orders',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[OpenOrders]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def list_orders(self, currency_pair, status, **kwargs): # noqa: E501
"""List orders # noqa: E501
Spot and margin orders are returned by default. If cross margin orders are needed, `account` must be set to `cross_margin` When `status` is `open`, i.e., listing open orders, only pagination parameters `page` and `limit` are supported and `limit` cannot be larger than 100. Query by `side` and time range parameters `from` and `to` are not supported. When `status` is `finished`, i.e., listing finished orders, pagination parameters, time range parameters `from` and `to`, and `side` parameters are all supported. Time range parameters are handled as order finish time. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_orders(currency_pair, status, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Retrieve results with specified currency pair. It is required for open orders, but optional for finished ones. (required)
:param str status: List orders based on status `open` - order is waiting to be filled `finished` - order has been filled or cancelled (required)
:param int page: Page number
:param int limit: Maximum number of records to be returned. If `status` is `open`, maximum of `limit` is 100
:param str account: Specify operation account. Default to spot and margin account if not specified. Set to `cross_margin` to operate against margin account
:param int _from: Start timestamp of the query
:param int to: Time range ending, default to current time
:param str side: All bids or asks. Both included if not specified
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: list[gate_api.Order]
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_orders_with_http_info(currency_pair, status, **kwargs) # noqa: E501
def list_orders_with_http_info(self, currency_pair, status, **kwargs): # noqa: E501
"""List orders # noqa: E501
Spot and margin orders are returned by default. If cross margin orders are needed, `account` must be set to `cross_margin` When `status` is `open`, i.e., listing open orders, only pagination parameters `page` and `limit` are supported and `limit` cannot be larger than 100. Query by `side` and time range parameters `from` and `to` are not supported. When `status` is `finished`, i.e., listing finished orders, pagination parameters, time range parameters `from` and `to`, and `side` parameters are all supported. Time range parameters are handled as order finish time. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_orders_with_http_info(currency_pair, status, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Retrieve results with specified currency pair. It is required for open orders, but optional for finished ones. (required)
:param str status: List orders based on status `open` - order is waiting to be filled `finished` - order has been filled or cancelled (required)
:param int page: Page number
:param int limit: Maximum number of records to be returned. If `status` is `open`, maximum of `limit` is 100
:param str account: Specify operation account. Default to spot and margin account if not specified. Set to `cross_margin` to operate against margin account
:param int _from: Start timestamp of the query
:param int to: Time range ending, default to current time
:param str side: All bids or asks. Both included if not specified
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(list[gate_api.Order], status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['currency_pair', 'status', 'page', 'limit', 'account', '_from', 'to', 'side']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method list_orders" % k)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'currency_pair' is set
if self.api_client.client_side_validation and (
'currency_pair' not in local_var_params or local_var_params['currency_pair'] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `currency_pair` when calling `list_orders`"
) # noqa: E501
# verify the required parameter 'status' is set
if self.api_client.client_side_validation and (
'status' not in local_var_params or local_var_params['status'] is None # noqa: E501
): # noqa: E501
raise ApiValueError("Missing the required parameter `status` when calling `list_orders`") # noqa: E501
if (
self.api_client.client_side_validation and 'page' in local_var_params and local_var_params['page'] < 1
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `page` when calling `list_orders`, must be a value greater than or equal to `1`"
) # noqa: E501
if (
self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 1000
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `limit` when calling `list_orders`, must be a value less than or equal to `1000`"
) # noqa: E501
if (
self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `limit` when calling `list_orders`, must be a value greater than or equal to `1`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'currency_pair' in local_var_params and local_var_params['currency_pair'] is not None: # noqa: E501
query_params.append(('currency_pair', local_var_params['currency_pair'])) # noqa: E501
if 'status' in local_var_params and local_var_params['status'] is not None: # noqa: E501
query_params.append(('status', local_var_params['status'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'account' in local_var_params and local_var_params['account'] is not None: # noqa: E501
query_params.append(('account', local_var_params['account'])) # noqa: E501
if '_from' in local_var_params and local_var_params['_from'] is not None: # noqa: E501
query_params.append(('from', local_var_params['_from'])) # noqa: E501
if 'to' in local_var_params and local_var_params['to'] is not None: # noqa: E501
query_params.append(('to', local_var_params['to'])) # noqa: E501
if 'side' in local_var_params and local_var_params['side'] is not None: # noqa: E501
query_params.append(('side', local_var_params['side'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/orders',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Order]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def create_order(self, order, **kwargs): # noqa: E501
"""Create an order # noqa: E501
You can place orders with spot, margin or cross margin account through setting the `account `field. It defaults to `spot`, which means spot account is used to place orders. When margin account is used, i.e., `account` is `margin`, `auto_borrow` field can be set to `true` to enable the server to borrow the amount lacked using `POST /margin/loans` when your account's balance is not enough. Whether margin orders' fill will be used to repay margin loans automatically is determined by the auto repayment setting in your **margin account**, which can be updated or queried using `/margin/auto_repay` API. When cross margin account is used, i.e., `account` is `cross_margin`, `auto_borrow` can also be enabled to achieve borrowing the insufficient amount automatically if cross account's balance is not enough. But it differs from margin account that automatic repayment is determined by order's `auto_repay` field and only current order's fill will be used to repay cross margin loans. Automatic repayment will be triggered when the order is finished, i.e., its status is either `cancelled` or `closed`. **Order status** An order waiting to be filled is `open`, and it stays `open` until it is filled totally. If fully filled, order is finished and its status turns to `closed`.If the order is cancelled before it is totally filled, whether or not partially filled, its status is `cancelled`. **Iceberg order** `iceberg` field can be used to set the amount shown. Set to `-1` to hide the order completely. Note that the hidden part's fee will be charged using taker's fee rate. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_order(order, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param Order order: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: gate_api.Order
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_order_with_http_info(order, **kwargs) # noqa: E501
def create_order_with_http_info(self, order, **kwargs): # noqa: E501
"""Create an order # noqa: E501
You can place orders with spot, margin or cross margin account through setting the `account `field. It defaults to `spot`, which means spot account is used to place orders. When margin account is used, i.e., `account` is `margin`, `auto_borrow` field can be set to `true` to enable the server to borrow the amount lacked using `POST /margin/loans` when your account's balance is not enough. Whether margin orders' fill will be used to repay margin loans automatically is determined by the auto repayment setting in your **margin account**, which can be updated or queried using `/margin/auto_repay` API. When cross margin account is used, i.e., `account` is `cross_margin`, `auto_borrow` can also be enabled to achieve borrowing the insufficient amount automatically if cross account's balance is not enough. But it differs from margin account that automatic repayment is determined by order's `auto_repay` field and only current order's fill will be used to repay cross margin loans. Automatic repayment will be triggered when the order is finished, i.e., its status is either `cancelled` or `closed`. **Order status** An order waiting to be filled is `open`, and it stays `open` until it is filled totally. If fully filled, order is finished and its status turns to `closed`.If the order is cancelled before it is totally filled, whether or not partially filled, its status is `cancelled`. **Iceberg order** `iceberg` field can be used to set the amount shown. Set to `-1` to hide the order completely. Note that the hidden part's fee will be charged using taker's fee rate. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_order_with_http_info(order, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param Order order: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(gate_api.Order, status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['order']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method create_order" % k)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'order' is set
if self.api_client.client_side_validation and (
'order' not in local_var_params or local_var_params['order'] is None # noqa: E501
): # noqa: E501
raise ApiValueError("Missing the required parameter `order` when calling `create_order`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'order' in local_var_params:
body_params = local_var_params['order']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']
) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/orders',
'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Order', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def cancel_orders(self, currency_pair, **kwargs): # noqa: E501
"""Cancel all `open` orders in specified currency pair # noqa: E501
If `account` is not set, all open orders, including spot, margin and cross margin ones, will be cancelled. You can set `account` to cancel only orders within the specified account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_orders(currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Currency pair (required)
:param str side: All bids or asks. Both included if not specified
:param str account: Specify account type. Default to all account types being included
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: list[gate_api.Order]
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.cancel_orders_with_http_info(currency_pair, **kwargs) # noqa: E501
def cancel_orders_with_http_info(self, currency_pair, **kwargs): # noqa: E501
"""Cancel all `open` orders in specified currency pair # noqa: E501
If `account` is not set, all open orders, including spot, margin and cross margin ones, will be cancelled. You can set `account` to cancel only orders within the specified account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_orders_with_http_info(currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Currency pair (required)
:param str side: All bids or asks. Both included if not specified
:param str account: Specify account type. Default to all account types being included
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(list[gate_api.Order], status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['currency_pair', 'side', 'account']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method cancel_orders" % k)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'currency_pair' is set
if self.api_client.client_side_validation and (
'currency_pair' not in local_var_params or local_var_params['currency_pair'] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `currency_pair` when calling `cancel_orders`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'currency_pair' in local_var_params and local_var_params['currency_pair'] is not None: # noqa: E501
query_params.append(('currency_pair', local_var_params['currency_pair'])) # noqa: E501
if 'side' in local_var_params and local_var_params['side'] is not None: # noqa: E501
query_params.append(('side', local_var_params['side'])) # noqa: E501
if 'account' in local_var_params and local_var_params['account'] is not None: # noqa: E501
query_params.append(('account', local_var_params['account'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/orders',
'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Order]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def cancel_batch_orders(self, cancel_order, **kwargs): # noqa: E501
"""Cancel a batch of orders with an ID list # noqa: E501
Multiple currency pairs can be specified, but maximum 20 orders are allowed per request # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_batch_orders(cancel_order, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param list[CancelOrder] cancel_order: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: list[gate_api.CancelOrderResult]
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.cancel_batch_orders_with_http_info(cancel_order, **kwargs) # noqa: E501
def cancel_batch_orders_with_http_info(self, cancel_order, **kwargs): # noqa: E501
"""Cancel a batch of orders with an ID list # noqa: E501
Multiple currency pairs can be specified, but maximum 20 orders are allowed per request # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_batch_orders_with_http_info(cancel_order, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param list[CancelOrder] cancel_order: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(list[gate_api.CancelOrderResult], status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['cancel_order']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method cancel_batch_orders" % k)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'cancel_order' is set
if self.api_client.client_side_validation and (
'cancel_order' not in local_var_params or local_var_params['cancel_order'] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `cancel_order` when calling `cancel_batch_orders`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'cancel_order' in local_var_params:
body_params = local_var_params['cancel_order']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']
) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/cancel_batch_orders',
'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[CancelOrderResult]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def get_order(self, order_id, currency_pair, **kwargs): # noqa: E501
"""Get a single order # noqa: E501
Spot and margin orders are queried by default. If cross margin orders are needed, `account` must be set to `cross_margin` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_order(order_id, currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str order_id: Order ID returned, or user custom ID(i.e., `text` field). Operations based on custom ID are accepted only in the first 30 minutes after order creation.After that, only order ID is accepted. (required)
:param str currency_pair: Currency pair (required)
:param str account: Specify operation account. Default to spot and margin account if not specified. Set to `cross_margin` to operate against margin account
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: gate_api.Order
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_order_with_http_info(order_id, currency_pair, **kwargs) # noqa: E501
def get_order_with_http_info(self, order_id, currency_pair, **kwargs): # noqa: E501
"""Get a single order # noqa: E501
Spot and margin orders are queried by default. If cross margin orders are needed, `account` must be set to `cross_margin` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_order_with_http_info(order_id, currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str order_id: Order ID returned, or user custom ID(i.e., `text` field). Operations based on custom ID are accepted only in the first 30 minutes after order creation.After that, only order ID is accepted. (required)
:param str currency_pair: Currency pair (required)
:param str account: Specify operation account. Default to spot and margin account if not specified. Set to `cross_margin` to operate against margin account
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(gate_api.Order, status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['order_id', 'currency_pair', 'account']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method get_order" % k)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'order_id' is set
if self.api_client.client_side_validation and (
'order_id' not in local_var_params or local_var_params['order_id'] is None # noqa: E501
): # noqa: E501
raise ApiValueError("Missing the required parameter `order_id` when calling `get_order`") # noqa: E501
# verify the required parameter 'currency_pair' is set
if self.api_client.client_side_validation and (
'currency_pair' not in local_var_params or local_var_params['currency_pair'] is None # noqa: E501
): # noqa: E501
raise ApiValueError("Missing the required parameter `currency_pair` when calling `get_order`") # noqa: E501
collection_formats = {}
path_params = {}
if 'order_id' in local_var_params:
path_params['order_id'] = local_var_params['order_id'] # noqa: E501
query_params = []
if 'currency_pair' in local_var_params and local_var_params['currency_pair'] is not None: # noqa: E501
query_params.append(('currency_pair', local_var_params['currency_pair'])) # noqa: E501
if 'account' in local_var_params and local_var_params['account'] is not None: # noqa: E501
query_params.append(('account', local_var_params['account'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/orders/{order_id}',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Order', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def cancel_order(self, order_id, currency_pair, **kwargs): # noqa: E501
"""Cancel a single order # noqa: E501
Spot and margin orders are cancelled by default. If trying to cancel cross margin orders, `account` must be set to `cross_margin` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_order(order_id, currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str order_id: Order ID returned, or user custom ID(i.e., `text` field). Operations based on custom ID are accepted only in the first 30 minutes after order creation.After that, only order ID is accepted. (required)
:param str currency_pair: Currency pair (required)
:param str account: Specify operation account. Default to spot and margin account if not specified. Set to `cross_margin` to operate against margin account
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: gate_api.Order
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.cancel_order_with_http_info(order_id, currency_pair, **kwargs) # noqa: E501
def cancel_order_with_http_info(self, order_id, currency_pair, **kwargs): # noqa: E501
"""Cancel a single order # noqa: E501
Spot and margin orders are cancelled by default. If trying to cancel cross margin orders, `account` must be set to `cross_margin` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_order_with_http_info(order_id, currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str order_id: Order ID returned, or user custom ID(i.e., `text` field). Operations based on custom ID are accepted only in the first 30 minutes after order creation.After that, only order ID is accepted. (required)
:param str currency_pair: Currency pair (required)
:param str account: Specify operation account. Default to spot and margin account if not specified. Set to `cross_margin` to operate against margin account
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(gate_api.Order, status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['order_id', 'currency_pair', 'account']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method cancel_order" % k)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'order_id' is set
if self.api_client.client_side_validation and (
'order_id' not in local_var_params or local_var_params['order_id'] is None # noqa: E501
): # noqa: E501
raise ApiValueError("Missing the required parameter `order_id` when calling `cancel_order`") # noqa: E501
# verify the required parameter 'currency_pair' is set
if self.api_client.client_side_validation and (
'currency_pair' not in local_var_params or local_var_params['currency_pair'] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `currency_pair` when calling `cancel_order`"
) # noqa: E501
collection_formats = {}
path_params = {}
if 'order_id' in local_var_params:
path_params['order_id'] = local_var_params['order_id'] # noqa: E501
query_params = []
if 'currency_pair' in local_var_params and local_var_params['currency_pair'] is not None: # noqa: E501
query_params.append(('currency_pair', local_var_params['currency_pair'])) # noqa: E501
if 'account' in local_var_params and local_var_params['account'] is not None: # noqa: E501
query_params.append(('account', local_var_params['account'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/orders/{order_id}',
'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Order', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def list_my_trades(self, currency_pair, **kwargs): # noqa: E501
"""List personal trading history # noqa: E501
Spot and margin trades are queried by default. If cross margin trades are needed, `account` must be set to `cross_margin` You can also set `from` and(or) `to` to query by time range Time range parameters are handled as order finish time. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_my_trades(currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Retrieve results with specified currency pair. It is required for open orders, but optional for finished ones. (required)
:param int limit: Maximum number of records to be returned in a single list
:param int page: Page number
:param str order_id: Filter trades with specified order ID. `currency_pair` is also required if this field is present
:param str account: Specify operation account. Default to spot and margin account if not specified. Set to `cross_margin` to operate against margin account
:param int _from: Start timestamp of the query
:param int to: Time range ending, default to current time
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: list[gate_api.Trade]
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_my_trades_with_http_info(currency_pair, **kwargs) # noqa: E501
def list_my_trades_with_http_info(self, currency_pair, **kwargs): # noqa: E501
"""List personal trading history # noqa: E501
Spot and margin trades are queried by default. If cross margin trades are needed, `account` must be set to `cross_margin` You can also set `from` and(or) `to` to query by time range Time range parameters are handled as order finish time. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_my_trades_with_http_info(currency_pair, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str currency_pair: Retrieve results with specified currency pair. It is required for open orders, but optional for finished ones. (required)
:param int limit: Maximum number of records to be returned in a single list
:param int page: Page number
:param str order_id: Filter trades with specified order ID. `currency_pair` is also required if this field is present
:param str account: Specify operation account. Default to spot and margin account if not specified. Set to `cross_margin` to operate against margin account
:param int _from: Start timestamp of the query
:param int to: Time range ending, default to current time
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(list[gate_api.Trade], status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['currency_pair', 'limit', 'page', 'order_id', 'account', '_from', 'to']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method list_my_trades" % k)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'currency_pair' is set
if self.api_client.client_side_validation and (
'currency_pair' not in local_var_params or local_var_params['currency_pair'] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `currency_pair` when calling `list_my_trades`"
) # noqa: E501
if (
self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 1000
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `limit` when calling `list_my_trades`, must be a value less than or equal to `1000`"
) # noqa: E501
if (
self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `limit` when calling `list_my_trades`, must be a value greater than or equal to `1`"
) # noqa: E501
if (
self.api_client.client_side_validation and 'page' in local_var_params and local_var_params['page'] < 1
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `page` when calling `list_my_trades`, must be a value greater than or equal to `1`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'currency_pair' in local_var_params and local_var_params['currency_pair'] is not None: # noqa: E501
query_params.append(('currency_pair', local_var_params['currency_pair'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'order_id' in local_var_params and local_var_params['order_id'] is not None: # noqa: E501
query_params.append(('order_id', local_var_params['order_id'])) # noqa: E501
if 'account' in local_var_params and local_var_params['account'] is not None: # noqa: E501
query_params.append(('account', local_var_params['account'])) # noqa: E501
if '_from' in local_var_params and local_var_params['_from'] is not None: # noqa: E501
query_params.append(('from', local_var_params['_from'])) # noqa: E501
if 'to' in local_var_params and local_var_params['to'] is not None: # noqa: E501
query_params.append(('to', local_var_params['to'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/my_trades',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Trade]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def list_spot_price_triggered_orders(self, status, **kwargs): # noqa: E501
"""Retrieve running auto order list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_spot_price_triggered_orders(status, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str status: Only list the orders with this status (required)
:param str market: Currency pair
:param str account: Trading account
:param int limit: Maximum number of records to be returned in a single list
:param int offset: List offset, starting from 0
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: list[gate_api.SpotPriceTriggeredOrder]
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_spot_price_triggered_orders_with_http_info(status, **kwargs) # noqa: E501
def list_spot_price_triggered_orders_with_http_info(self, status, **kwargs): # noqa: E501
"""Retrieve running auto order list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_spot_price_triggered_orders_with_http_info(status, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str status: Only list the orders with this status (required)
:param str market: Currency pair
:param str account: Trading account
:param int limit: Maximum number of records to be returned in a single list
:param int offset: List offset, starting from 0
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(list[gate_api.SpotPriceTriggeredOrder], status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['status', 'market', 'account', 'limit', 'offset']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'" " to method list_spot_price_triggered_orders" % k
)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'status' is set
if self.api_client.client_side_validation and (
'status' not in local_var_params or local_var_params['status'] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `status` when calling `list_spot_price_triggered_orders`"
) # noqa: E501
if (
self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 1000
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `limit` when calling `list_spot_price_triggered_orders`, must be a value less than or equal to `1000`"
) # noqa: E501
if (
self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `limit` when calling `list_spot_price_triggered_orders`, must be a value greater than or equal to `1`"
) # noqa: E501
if (
self.api_client.client_side_validation and 'offset' in local_var_params and local_var_params['offset'] < 0
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `offset` when calling `list_spot_price_triggered_orders`, must be a value greater than or equal to `0`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'status' in local_var_params and local_var_params['status'] is not None: # noqa: E501
query_params.append(('status', local_var_params['status'])) # noqa: E501
if 'market' in local_var_params and local_var_params['market'] is not None: # noqa: E501
query_params.append(('market', local_var_params['market'])) # noqa: E501
if 'account' in local_var_params and local_var_params['account'] is not None: # noqa: E501
query_params.append(('account', local_var_params['account'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/price_orders',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SpotPriceTriggeredOrder]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def create_spot_price_triggered_order(self, spot_price_triggered_order, **kwargs): # noqa: E501
"""Create a price-triggered order # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_spot_price_triggered_order(spot_price_triggered_order, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param SpotPriceTriggeredOrder spot_price_triggered_order: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: gate_api.TriggerOrderResponse
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_spot_price_triggered_order_with_http_info(spot_price_triggered_order, **kwargs) # noqa: E501
def create_spot_price_triggered_order_with_http_info(self, spot_price_triggered_order, **kwargs): # noqa: E501
"""Create a price-triggered order # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_spot_price_triggered_order_with_http_info(spot_price_triggered_order, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param SpotPriceTriggeredOrder spot_price_triggered_order: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(gate_api.TriggerOrderResponse, status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['spot_price_triggered_order']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'" " to method create_spot_price_triggered_order" % k
)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'spot_price_triggered_order' is set
if self.api_client.client_side_validation and (
'spot_price_triggered_order' not in local_var_params
or local_var_params['spot_price_triggered_order'] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `spot_price_triggered_order` when calling `create_spot_price_triggered_order`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'spot_price_triggered_order' in local_var_params:
body_params = local_var_params['spot_price_triggered_order']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']
) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/price_orders',
'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TriggerOrderResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def cancel_spot_price_triggered_order_list(self, **kwargs): # noqa: E501
"""Cancel all open orders # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_spot_price_triggered_order_list(async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str market: Currency pair
:param str account: Trading account
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: list[gate_api.SpotPriceTriggeredOrder]
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.cancel_spot_price_triggered_order_list_with_http_info(**kwargs) # noqa: E501
def cancel_spot_price_triggered_order_list_with_http_info(self, **kwargs): # noqa: E501
"""Cancel all open orders # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_spot_price_triggered_order_list_with_http_info(async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str market: Currency pair
:param str account: Trading account
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(list[gate_api.SpotPriceTriggeredOrder], status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['market', 'account']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'" " to method cancel_spot_price_triggered_order_list" % k
)
local_var_params[k] = v
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'market' in local_var_params and local_var_params['market'] is not None: # noqa: E501
query_params.append(('market', local_var_params['market'])) # noqa: E501
if 'account' in local_var_params and local_var_params['account'] is not None: # noqa: E501
query_params.append(('account', local_var_params['account'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/price_orders',
'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SpotPriceTriggeredOrder]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def get_spot_price_triggered_order(self, order_id, **kwargs): # noqa: E501
"""Get a single order # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_spot_price_triggered_order(order_id, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str order_id: Retrieve the data of the order with the specified ID (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: gate_api.SpotPriceTriggeredOrder
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_spot_price_triggered_order_with_http_info(order_id, **kwargs) # noqa: E501
def get_spot_price_triggered_order_with_http_info(self, order_id, **kwargs): # noqa: E501
"""Get a single order # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_spot_price_triggered_order_with_http_info(order_id, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str order_id: Retrieve the data of the order with the specified ID (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(gate_api.SpotPriceTriggeredOrder, status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['order_id']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'" " to method get_spot_price_triggered_order" % k
)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'order_id' is set
if self.api_client.client_side_validation and (
'order_id' not in local_var_params or local_var_params['order_id'] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `order_id` when calling `get_spot_price_triggered_order`"
) # noqa: E501
collection_formats = {}
path_params = {}
if 'order_id' in local_var_params:
path_params['order_id'] = local_var_params['order_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/price_orders/{order_id}',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SpotPriceTriggeredOrder', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
def cancel_spot_price_triggered_order(self, order_id, **kwargs): # noqa: E501
"""Cancel a single order # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_spot_price_triggered_order(order_id, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str order_id: Retrieve the data of the order with the specified ID (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: gate_api.SpotPriceTriggeredOrder
:return: If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.cancel_spot_price_triggered_order_with_http_info(order_id, **kwargs) # noqa: E501
def cancel_spot_price_triggered_order_with_http_info(self, order_id, **kwargs): # noqa: E501
"""Cancel a single order # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_spot_price_triggered_order_with_http_info(order_id, async_req=True)
>>> result = thread.get()
:param bool async_req: execute request asynchronously
:param str order_id: Retrieve the data of the order with the specified ID (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:rtype: tuple(gate_api.SpotPriceTriggeredOrder, status_code(int), headers(HTTPHeaderDict))
:return: If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['order_id']
all_params.extend(['async_req', '_return_http_data_only', '_preload_content', '_request_timeout'])
for k, v in six.iteritems(local_var_params['kwargs']):
if k not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'" " to method cancel_spot_price_triggered_order" % k
)
local_var_params[k] = v
del local_var_params['kwargs']
# verify the required parameter 'order_id' is set
if self.api_client.client_side_validation and (
'order_id' not in local_var_params or local_var_params['order_id'] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `order_id` when calling `cancel_spot_price_triggered_order`"
) # noqa: E501
collection_formats = {}
path_params = {}
if 'order_id' in local_var_params:
path_params['order_id'] = local_var_params['order_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiv4'] # noqa: E501
return self.api_client.call_api(
'/spot/price_orders/{order_id}',
'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SpotPriceTriggeredOrder', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
12816,
7824,
410,
19,
628,
220,
220,
220,
19134,
284,
12816,
13,
952,
7824,
220,
7824,
85,
19,
3769,
4136,
11,
10330,
290,
25650,
7313,
4560,
13,
1318,
389,
1171... | 2.334373 | 62,816 |
#!/usr/bin/python
'''
This file is part of pyfuzz.
Copyright 2009 Frederic Morcos <fred.morcos@gmail.com>
pyfuzz is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyfuzz is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyfuzz. If not, see <http://www.gnu.org/licenses/>.
'''
from ui import UI
from flc import FLC
from variable import Variable
from function import Function
# front side sensor (input)
fssLow = Function("Low")
fssLow.add_point([0.0, 1.0])
fssLow.add_point([10.0, 1.0])
fssLow.add_point([15.0, 0.0])
fssMed = Function("Medium")
fssMed.add_point([10.0, 0.0])
fssMed.add_point([15.0, 1.0])
fssMed.add_point([25.0, 1.0])
fssMed.add_point([30.0, 0.0])
fssHig = Function("High")
fssHig.add_point([25.0, 0.0])
fssHig.add_point([30.0, 1.0])
fss = Variable("Front Side Sensor")
fss.add_function(fssLow)
fss.add_function(fssMed)
fss.add_function(fssHig)
# back side sensor (input)
bssLow = Function("Low")
bssLow.add_point([0.0, 1.0])
bssLow.add_point([20.0, 1.0])
bssLow.add_point([40.0, 0.0])
bssMed = Function("Medium")
bssMed.add_point([20.0, 0.0])
bssMed.add_point([40.0, 1.0])
bssMed.add_point([60.0, 0.0])
bssHig = Function("High")
bssHig.add_point([40.0, 0.0])
bssHig.add_point([60.0, 1.0])
bss = Variable("Back Side Sensor")
bss.add_function(bssLow)
bss.add_function(bssMed)
bss.add_function(bssHig)
# wheel speed (output)
wsLow = Function("Low")
wsLow.add_point([20.0, 0.0])
wsLow.add_point([30.0, 1.0])
wsLow.add_point([40.0, 0.0])
wsMed = Function("Medium")
wsMed.add_point([40.0, 0.0])
wsMed.add_point([50.0, 1.0])
wsMed.add_point([55.0, 0.0])
wsHig = Function("High")
wsHig.add_point([55.0, 0.0])
wsHig.add_point([60.0, 1.0])
wsHig.add_point([70.0, 0.0])
ws = Variable("Wheel Speed")
ws.add_function(wsLow)
ws.add_function(wsMed)
ws.add_function(wsHig)
# steering (output)
sLef = Function("Left")
sLef.add_point([-180.0, 0.0])
sLef.add_point([-90.0, 1.0])
sLef.add_point([-40.0, 0.0])
sZer = Function("Zero")
sZer.add_point([-40.0, 0.0])
sZer.add_point([-0.0, 1.0])
sZer.add_point([40.0, 0.0])
sRig = Function("Right")
sRig.add_point([40.0, 0.0])
sRig.add_point([90.0, 1.0])
sRig.add_point([180.0, 0.0])
s = Variable("Steering")
s.add_function(sLef)
s.add_function(sZer)
s.add_function(sRig)
# the fuzzy logic controller
flc = FLC("Right-Edge Following Behavior of Mobile Robot")
flc.add_input(fss)
flc.add_input(bss)
flc.add_output(ws)
flc.add_output(s)
flc.generate_rulebase()
# ui stuff
ui = UI(flc)
ui.showUI()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
7061,
6,
197,
198,
220,
197,
1212,
2393,
318,
636,
286,
12972,
69,
4715,
13,
198,
220,
198,
220,
197,
15269,
3717,
197,
30847,
35626,
3461,
6966,
1279,
39193,
13,
4491,
6966,
31,
14816,
13,... | 2.31717 | 1,258 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import argparse
from time import time
from datetime import datetime
import numpy as np
try:
import tensorflow_gpu as tf
except:
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Dense, ELU, Dropout, BatchNormalization
from keras.losses import binary_crossentropy
from keras.metrics import top_k_categorical_accuracy
from keras.models import Model, load_model
from keras.regularizers import l2
from keras.optimizers import Adam
from keras.utils import HDF5Matrix
from keras.callbacks import Callback, ProgbarLogger, TerminateOnNaN, ModelCheckpoint, LearningRateScheduler, LambdaCallback, ReduceLROnPlateau, EarlyStopping
from keras.applications import ResNet50
from tensorboard_logging import Logger
from thumb_from_sd09 import scan_dir
import h5py
from kerastoolbox.callbacks import TelegramMonitor, PrintMonitor
def binary_sparse_softmax_cross_entropy(target, output, from_logits=False):
"""
Expects the output of a sigmoid layer, but computes the
sparse softmax cross entropy.
"""
# TF expects logits, Keras expects probabilities.
if not from_logits:
# transform from sigmoid back to logits
_epsilon = tf.convert_to_tensor(1E-7, output.dtype.base_dtype)
output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)
output = tf.log(output / (1 - output))
output_shape = output.get_shape()
targets = tf.cast(tf.reshape(target, [-1]), 'int64')
logits = tf.reshape(output, [-1, int(output_shape[-1])])
res = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets,
logits=logits)
if len(output_shape) >= 3:
# if our output includes timestep dimension
# or spatial dimensions we need to reshape
return tf.reshape(res, tf.shape(output)[:-1])
else:
return res
if __name__ == '__main__':
tuplify = lambda fn: lambda str_: [fn(x) for x in str_.split('/')]
checkArch = lambda strin: tuple(int(val) for val in strin.split('-'))
checkImgSize = lambda strin: tuple(int(val) for val in strin.split('x'))
# Parse command line arguments
parser = argparse.ArgumentParser(description="Train a classifier for fingerprints")
parser.add_argument("in", help="Full path to the input database file (HDF5)")
parser.add_argument("out", help="Name of the output folder, created in the current directory")
parser.add_argument("--load", default=None, help="Name of the folder containing the pre-trained model")
parser.add_argument("--save-epochs", default=50, type=int, help="Save checkpoint every this many epochs")
parser.add_argument("-E", "--epochs", default=200, type=int, help="Number of training steps")
# Batch run support
# Batch run support
parser.add_argument("-B", "--batch-size", default=[64], type=tuplify(int), help="Number of images to feed per iteration (support batch run through '/' separator)")
parser.add_argument("-L", "--learning-rate", default=[1E-6], type=tuplify(float), help="Learning rate for Adam optimizer (support batch run through '/' separator)")
parser.add_argument("-D", "--decay-rate", default=[None], type=tuplify(float), help="Decay rate for Adam optimizer (support batch run through '/' separator)")
parser.add_argument("--ES-patience", default=[None], type=tuplify(int), help="Early stopping patience (support batch run through '/' separator)")
parser.add_argument("--ES-mindelta", default=[None], type=tuplify(float), help="Early stopping minimum difference (support batch run through '/' separator)")
parser.add_argument("--RLROP-patience", default=[None], type=tuplify(int), help="ReduceLROnPlateau patience (support batch run through '/' separator)")
parser.add_argument("--RLROP-factor", default=[None], type=tuplify(float), help="ReduceLROnPlateau factor (support batch run through '/' separator)")
print()
args = vars(parser.parse_args())
for key, val in args.items():
print(key, val)
print()
# Set fixed parameters from cmd line arguments
nb_epoch = args["epochs"]
db_path = os.path.abspath(os.path.normpath(args["in"]))
load_path = args["load"]
# Load data
with h5py.File(db_path, "r") as f:
if 'training' in f.keys() and 'validation' in f.keys():
train_db = f['training']
valid_db = f['validation']
if 'num_classes' in train_db.attrs and 'repetitions' in train_db.attrs:
train_N, train_num_classes = train_db.attrs['repetitions'], train_db.attrs['num_classes']
else: raise ValueError("The training dataset lacks 'num_classes' and 'repetitions' attributes")
if 'num_classes' in valid_db.attrs and 'repetitions' in valid_db.attrs:
valid_N, valid_num_classes = valid_db.attrs['repetitions'], valid_db.attrs['num_classes']
else: raise ValueError("The validation dataset lacks 'num_classes' and 'repetitions' attributes")
if train_num_classes != valid_num_classes:
raise ValueError("The number of classes in training and validation databases differ")
num_classes = train_num_classes
else: raise ValueError("The input database lacks training and validation datasets")
print("Training and validation data loaded")
print("Training data:", num_classes, "classes repeated", train_N, "times")
print("Validation data:", num_classes, "classes repeated", valid_N, "times")
train_data = HDF5Matrix(db_path, 'training')
valid_data = HDF5Matrix(db_path, 'validation')
train_labels = np.tile(np.arange(num_classes), (train_N,))
valid_labels = np.tile(np.arange(num_classes), (valid_N,))
print(train_data.shape, train_labels.shape)
print(valid_data.shape, valid_labels.shape)
if train_data.shape[1] != valid_data.shape[1]:
ValueError("Different model used for training and validation, not allowed")
logits_length = train_data.shape[1]
# Get info about loaded data
additional_info = {
'Logits length': logits_length,
'Number of classes': num_classes,
'DB training repetitions': train_N,
'Training samples': train_data.shape[0],
'DB validation repetitions': valid_N,
'Validation samples': valid_data.shape[0]
}
# Define the function that will be executed for each parameter
# Set variable parameters from cmd line args
tests = [len(list_) for list_ in args.values() if isinstance(list_, list)]
n_tests = 1
for t in tests: n_tests *= t
print(n_tests, "total tests will be performed... be patient!")
for batch_size in args["batch_size"]:
for learning_rate in args["learning_rate"]:
for decay_rate in args["decay_rate"]:
for ES_patience in args["ES_patience"]:
for ES_mindelta in args["ES_mindelta"]:
for RLROP_patience in args["RLROP_patience"]:
for RLROP_factor in args["RLROP_factor"]:
runOnce(batch_size = batch_size,
learning_rate = learning_rate,
decay_rate = decay_rate,
ES_patience = ES_patience,
ES_mindelta = ES_mindelta,
RLROP_patience = RLROP_patience,
RLROP_factor = RLROP_factor,
additional_info = additional_info) | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
6738,
640,
1330,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
299,
... | 2.879447 | 2,389 |
"""Application Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
| [
37811,
23416,
32329,
526,
15931,
198,
198,
6738,
22397,
42725,
1330,
7032,
11,
10011,
2611,
198,
6738,
22397,
42725,
13,
12102,
378,
1330,
1881,
5189,
198,
6738,
11485,
268,
5700,
1330,
1635,
198,
6738,
11485,
27530,
13,
14881,
27054,
261... | 3.283333 | 60 |
# -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pkg_resources
all_commands = {}
for ep in pkg_resources.iter_entry_points("repo.subcmds"):
cmd = ep.resolve()
cmdname = ep.name
cmd.NAME = cmdname
all_commands[cmdname] = cmd
if 'help' in all_commands:
all_commands['help'].commands = all_commands
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
34,
8,
3648,
383,
5565,
4946,
8090,
4935,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
19... | 3.256318 | 277 |
"""Module with helpers to declare capabilities and plugin behavior."""
from enum import Enum, EnumMeta
from typing import Any, Optional
from warnings import warn
class DeprecatedEnum(Enum):
"""Base class for capabilities enumeration."""
def __new__(cls, value: Any, deprecation: Optional[str] = None) -> "DeprecatedEnum":
"""Create a new enum member.
Args:
value: Enum member value.
deprecation: Deprecation message.
Returns:
An enum member value.
"""
member: "DeprecatedEnum" = object.__new__(cls)
member._value_ = value
member._deprecation = deprecation
return member
@property
def deprecation_message(self) -> Optional[str]:
"""Get deprecation message.
Returns:
Deprecation message.
"""
self._deprecation: Optional[str]
return self._deprecation
def emit_warning(self) -> None:
"""Emit deprecation warning."""
warn(
f"{self.name} is deprecated. {self.deprecation_message}",
DeprecationWarning,
stacklevel=3,
)
class DeprecatedEnumMeta(EnumMeta):
"""Metaclass for enumeration with deprecation support."""
def __getitem__(self, name: str) -> Any:
"""Retrieve mapping item.
Args:
name: Item name.
Returns:
Enum member.
"""
obj: Enum = super().__getitem__(name)
if isinstance(obj, DeprecatedEnum) and obj.deprecation_message:
obj.emit_warning()
return obj
def __getattribute__(cls, name: str) -> Any:
"""Retrieve enum attribute.
Args:
name: Attribute name.
Returns:
Attribute.
"""
obj = super().__getattribute__(name)
if isinstance(obj, DeprecatedEnum) and obj.deprecation_message:
obj.emit_warning()
return obj
def __call__(self, *args: Any, **kwargs: Any) -> Enum:
"""Call enum member.
Args:
args: Positional arguments.
kwargs: Keyword arguments.
Returns:
Enum member.
"""
obj: Enum = super().__call__(*args, **kwargs)
if isinstance(obj, DeprecatedEnum) and obj.deprecation_message:
obj.emit_warning()
return obj
class CapabilitiesEnum(DeprecatedEnum, metaclass=DeprecatedEnumMeta):
"""Base capabilities enumeration."""
def __str__(self) -> str:
"""String representation.
Returns:
Stringified enum value.
"""
return str(self.value)
def __repr__(self) -> str:
"""String representation.
Returns:
Stringified enum value.
"""
return str(self.value)
class PluginCapabilities(CapabilitiesEnum):
"""Core capabilities which can be supported by taps and targets."""
#: Support plugin capability and setting discovery.
ABOUT = "about"
#: Support :doc:`inline stream map transforms</stream_maps>`.
STREAM_MAPS = "stream-maps"
#: Support the
#: `ACTIVATE_VERSION <https://hub.meltano.com/singer/docs#activate-version>`_
#: extension.
ACTIVATE_VERSION = "activate-version"
#: Input and output from
#: `batched files <https://hub.meltano.com/singer/docs#batch>`_.
#: A.K.A ``FAST_SYNC``.
BATCH = "batch"
class TapCapabilities(CapabilitiesEnum):
"""Tap-specific capabilities."""
#: Generate a catalog with `--discover`.
DISCOVER = "discover"
#: Accept input catalog, apply metadata and selection rules.
CATALOG = "catalog"
#: Incremental refresh by means of state tracking.
STATE = "state"
#: Automatic connectivity and stream init test via :ref:`--test<Test connectivity>`.
TEST = "test"
#: Support for ``replication_method: LOG_BASED``. You can read more about this
#: feature in `MeltanoHub <https://hub.meltano.com/singer/docs#log-based>`_.
LOG_BASED = "log-based"
#: Deprecated. Please use :attr:`~TapCapabilities.CATALOG` instead.
PROPERTIES = "properties", "Please use CATALOG instead."
class TargetCapabilities(CapabilitiesEnum):
"""Target-specific capabilities."""
#: Allows a ``soft_delete=True`` config option.
#: Requires a tap stream supporting :attr:`PluginCapabilities.ACTIVATE_VERSION`
#: and/or :attr:`TapCapabilities.LOG_BASED`.
SOFT_DELETE = "soft-delete"
#: Allows a ``hard_delete=True`` config option.
#: Requires a tap stream supporting :attr:`PluginCapabilities.ACTIVATE_VERSION`
#: and/or :attr:`TapCapabilities.LOG_BASED`.
HARD_DELETE = "hard-delete"
#: Fail safe for unknown JSON Schema types.
DATATYPE_FAILSAFE = "datatype-failsafe"
#: Allow denesting complex properties.
RECORD_FLATTENING = "record-flattening"
| [
37811,
26796,
351,
49385,
284,
13627,
9889,
290,
13877,
4069,
526,
15931,
198,
198,
6738,
33829,
1330,
2039,
388,
11,
2039,
388,
48526,
198,
6738,
19720,
1330,
4377,
11,
32233,
198,
6738,
14601,
1330,
9828,
628,
198,
4871,
2129,
31023,
... | 2.441545 | 1,993 |
from .resource import ResourceAPI
| [
6738,
764,
31092,
1330,
20857,
17614,
628
] | 5 | 7 |
"""ParameterMap class"""
from typing import List, Dict, Tuple
import hjson
import copy
import numpy as np
import tensorflow as tf
from c3.c3objs import Quantity
from c3.signal.gates import Instruction
from c3.signal.pulse import components as comp_lib
class ParameterMap:
"""
Collects information about control and model parameters and provides different
representations depending on use.
"""
def load_values(self, init_point):
"""
Load a previous parameter point to start the optimization from.
Parameters
----------
init_point : str
File location of the initial point
"""
with open(init_point) as init_file:
best = hjson.load(init_file)
best_opt_map = [[tuple(par) for par in pset] for pset in best["opt_map"]]
init_p = best["optim_status"]["params"]
self.set_parameters(init_p, best_opt_map)
def read_config(self, filepath: str) -> None:
"""
Load a file and parse it to create a ParameterMap object.
Parameters
----------
filepath : str
Location of the configuration file
"""
with open(filepath, "r") as cfg_file:
cfg = hjson.loads(cfg_file.read())
for key, gate in cfg.items():
if "mapto" in gate.keys():
instr = copy.deepcopy(self.instructions[gate["mapto"]])
instr.name = key
for drive_chan, comps in gate["drive_channels"].items():
for comp, props in comps.items():
for par, val in props["params"].items():
instr.comps[drive_chan][comp].params[par].set_value(val)
else:
instr = Instruction(
name=key,
t_start=0.0,
t_end=gate["gate_length"],
channels=list(gate["drive_channels"].keys()),
)
for drive_chan, comps in gate["drive_channels"].items():
for comp, props in comps.items():
ctype = props.pop("c3type")
instr.add_component(
comp_lib[ctype](name=comp, **props), chan=drive_chan
)
self.instructions[key] = instr
self.__initialize_parameters()
def write_config(self, filepath: str) -> None:
"""
Write dictionary to a HJSON file.
"""
with open(filepath, "w") as cfg_file:
hjson.dump(self.asdict(), cfg_file)
def asdict(self) -> dict:
"""
Return a dictionary compatible with config files.
"""
instructions = {}
for name, instr in self.instructions.items():
instructions[name] = instr.asdict()
return instructions
def get_full_params(self) -> Dict[str, Quantity]:
"""
Returns the full parameter vector, including model and control parameters.
"""
return self.__pars
def get_opt_units(self) -> List[str]:
"""
Returns a list of the units of the optimized quantities.
"""
units = []
for equiv_ids in self.opt_map:
key = "-".join(equiv_ids[0])
units.append(self.__pars[key].unit)
return units
def get_parameter(self, par_id: Tuple[str]) -> Quantity:
"""
Return one the current parameters.
Parameters
----------
par_id: tuple
Hierarchical identifier for parameter.
Returns
-------
Quantity
"""
key = "-".join(par_id)
try:
value = self.__pars[key]
except KeyError as ke:
raise Exception(f"C3:ERROR:Parameter {key} not defined.") from ke
return value
def get_parameters(self) -> List[Quantity]:
"""
Return the current parameters.
Parameters
----------
opt_map: list
Hierarchical identifier for parameters.
Returns
-------
list of Quantity
"""
values = []
for equiv_ids in self.opt_map:
key = "-".join(equiv_ids[0])
values.append(self.__pars[key])
return values
def set_parameters(self, values: list, opt_map=None) -> None:
"""Set the values in the original instruction class.
Parameters
----------
values: list
List of parameter values. Can be nested, if a parameter is matrix valued.
opt_map: list
Corresponding identifiers for the parameter values.
"""
val_indx = 0
if opt_map is None:
opt_map = self.opt_map
for equiv_ids in opt_map:
for par_id in equiv_ids:
key = "-".join(par_id)
try:
par = self.__pars[key]
except ValueError as ve:
raise Exception(f"C3:ERROR:{key} not defined.") from ve
try:
par.set_value(values[val_indx])
except ValueError as ve:
raise Exception(
f"C3:ERROR:Trying to set {'-'.join(par_id)} "
f"to value {values[val_indx]} "
f"but has to be within {par.offset:.3} .."
f" {(par.offset + par.scale):.3}."
) from ve
val_indx += 1
def get_parameters_scaled(self) -> np.ndarray:
"""
Return the current parameters. This fuction should only be called by an
optimizer. Are you an optimizer?
Parameters
----------
opt_map: tuple
Hierarchical identifier for parameters.
Returns
-------
list of Quantity
"""
values = []
for equiv_ids in self.opt_map:
key = "-".join(equiv_ids[0])
par = self.__pars[key]
values.append(par.get_opt_value())
return np.array(values).flatten()
def set_parameters_scaled(self, values: tf.Variable) -> None:
"""
Set the values in the original instruction class. This fuction should only be
called by an optimizer. Are you an optimizer?
Parameters
----------
values: list
List of parameter values. Matrix valued parameters need to be flattened.
opt_map: list
Corresponding identifiers for the parameter values.
"""
val_indx = 0
for equiv_ids in self.opt_map:
key = "-".join(equiv_ids[0])
par_len = self.__pars[key].length
for par_id in equiv_ids:
key = "-".join(par_id)
par = self.__pars[key]
par.set_opt_value(values[val_indx : val_indx + par_len])
val_indx += par_len
def set_opt_map(self, opt_map) -> None:
"""
Set the opt_map, i.e. which parameters will be optimized.
"""
for equiv_ids in opt_map:
for pid in equiv_ids:
key = "-".join(pid)
if key not in self.__pars:
par_strings = "\n".join(self.__pars.keys())
raise Exception(
f"C3:ERROR:Parameter {key} not defined in {par_strings}"
)
self.opt_map = opt_map
def str_parameters(self, opt_map: List[List[Tuple[str]]]) -> str:
"""
Return a multi-line human-readable string of the optmization parameter names and
current values.
Parameters
----------
opt_map: list
Optionally use only the specified parameters.
Returns
-------
str
Parameters and their values
"""
ret = []
for equiv_ids in opt_map:
par_id = equiv_ids[0]
key = "-".join(par_id)
par = self.__pars[key]
ret.append(f"{key:38}: {par}\n")
if len(equiv_ids) > 1:
for eid in equiv_ids[1:]:
ret.append("-".join(eid))
ret.append("\n")
ret.append("\n")
return "".join(ret)
def print_parameters(self) -> None:
"""
Print current parameters to stdout.
"""
print(self.str_parameters(self.opt_map))
| [
37811,
36301,
13912,
1398,
37811,
198,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
11,
309,
29291,
198,
11748,
289,
17752,
198,
11748,
4866,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
269,
... | 2.013092 | 4,201 |
import asyncio
import datetime
import json
import pathlib
import cleo
import pkg_resources
import structlog
from wraeblast import errors, insights, logging
from wraeblast.filtering.parsers.extended import config, loads, render
from wraeblast.filtering.serializers.standard import dumps
log = structlog.get_logger()
class RenderFilterCommand(BaseCommand):
"""Render an item filter template
render_filter
{file : filter template file}
{--O|options-file= : Options JSON file}
{--d|output-directory=. : Output directory}
{--i|keep-intermediate : Keep rendered intermediate template}
{--o|output= : Output file}
{--l|league=TEMP : Current league name}
{--N|no-sync : Prevents automatic insights syncing}
{--p|preset=default : Preset name}
"""
class SyncInsightsCommand(BaseCommand):
"""Fetch Path of Exile economy insights
sync_insights
{--l|league=TEMP : Current league name}
"""
| [
11748,
30351,
952,
198,
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
3108,
8019,
198,
198,
11748,
1190,
78,
198,
11748,
279,
10025,
62,
37540,
198,
11748,
2878,
6404,
198,
198,
6738,
7917,
1765,
12957,
1330,
8563,
11,
17218,
11,
18... | 2.880466 | 343 |
import numpy as np
import matplotlib.pyplot as plt
rt = np.loadtxt('./responseTime_t1rm.out')
fig1, ax1 = plt.subplots()
n = np.arange(1,len(rt)+1) / np.float(len(rt)) * 100
rtSorted = np.sort(rt)
ax1.step(rtSorted, n, color='k', label='task t1rm')
ax1.set_xlabel('Response Time (microseconds)')
ax1.set_ylabel('Percentage (%)')
plt.legend()
plt.tight_layout()
plt.show()
#plt.savefig('./responseTimeCDF.pdf',
# format='pdf', dpi=1000, bbox_inches='tight') | [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
17034,
796,
45941,
13,
2220,
14116,
7,
4458,
14,
26209,
7575,
62,
83,
16,
26224,
13,
448,
11537,
198,
198,
5647,
16,
11,
7877,
16... | 2.236967 | 211 |
from .... pyaz_utils import _call_az
def list(resource_group, workspace_name):
'''
List all firewall rules.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The workspace name.
'''
return _call_az("az synapse workspace firewall-rule list", locals())
def show(name, resource_group, workspace_name):
'''
Get a firewall rule.
Required Parameters:
- name -- The IP firewall rule name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The workspace name.
'''
return _call_az("az synapse workspace firewall-rule show", locals())
def create(end_ip_address, name, resource_group, start_ip_address, workspace_name, no_wait=None):
'''
Create a firewall rule.
Required Parameters:
- end_ip_address -- The end IP address of the firewall rule. Must be IPv4 format. Must be greater than or equal to startIpAddress.
- name -- The IP firewall rule name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- start_ip_address -- The start IP address of the firewall rule. Must be IPv4 format.
- workspace_name -- The workspace name.
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az synapse workspace firewall-rule create", locals())
def update(name, resource_group, workspace_name, end_ip_address=None, no_wait=None, start_ip_address=None):
'''
Update a firewall rule.
Required Parameters:
- name -- The IP firewall rule name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The workspace name.
Optional Parameters:
- end_ip_address -- The end IP address of the firewall rule. Must be IPv4 format. Must be greater than or equal to startIpAddress.
- no_wait -- Do not wait for the long-running operation to finish.
- start_ip_address -- The start IP address of the firewall rule. Must be IPv4 format.
'''
return _call_az("az synapse workspace firewall-rule update", locals())
def delete(name, resource_group, workspace_name, no_wait=None, yes=None):
'''
Delete a firewall rule.
Required Parameters:
- name -- The IP firewall rule name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The workspace name.
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
- yes -- Do not prompt for confirmation.
'''
return _call_az("az synapse workspace firewall-rule delete", locals())
def wait(resource_group, rule_name, workspace_name, created=None, custom=None, deleted=None, exists=None, interval=None, timeout=None, updated=None):
'''
Place the CLI in a waiting state until a condition of a firewall rule is met.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- rule_name -- The IP firewall rule name.
- workspace_name -- The workspace name.
Optional Parameters:
- created -- wait until created with 'provisioningState' at 'Succeeded'
- custom -- Wait until the condition satisfies a custom JMESPath query. E.g. provisioningState!='InProgress', instanceView.statuses[?code=='PowerState/running']
- deleted -- wait until deleted
- exists -- wait until the resource exists
- interval -- polling interval in seconds
- timeout -- maximum wait in seconds
- updated -- wait until updated with provisioningState at 'Succeeded'
'''
return _call_az("az synapse workspace firewall-rule wait", locals())
| [
6738,
19424,
12972,
1031,
62,
26791,
1330,
4808,
13345,
62,
1031,
198,
198,
4299,
1351,
7,
31092,
62,
8094,
11,
44573,
62,
3672,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
7343,
477,
32928,
3173,
13,
628,
220,
220,
220... | 3.377517 | 1,192 |
"""
Author: Ryan Faulkner
Date: October 19th, 2014
Container for mashup logic.
"""
import json
import random
from sqlalchemy.orm.exc import UnmappedInstanceError
from flickipedia.redisio import DataIORedis
from flickipedia.model.articles import ArticleModel, ArticleContentModel
from flickipedia.config import log, settings
from flickipedia.model.likes import LikeModel
from flickipedia.model.exclude import ExcludeModel
from flickipedia.model.photos import PhotoModel
from flickipedia.parse import parse_strip_elements, parse_convert_links, \
handle_photo_integrate, format_title_link, add_formatting_generic
def get_article_count():
"""
Fetch total article count
:return: int; total count of articles
"""
DataIORedis().connect()
# Fetch article count from redis (query from DB if not present)
# Refresh according to config for rate
article_count = DataIORedis().read(settings.ARTICLE_COUNT_KEY)
if not article_count \
or random.randint(1, settings.ARTICLE_COUNT_REFRESH_RATE) == 1 \
or article_count < settings.MYSQL_MAX_ROWS:
with ArticleModel() as am:
article_count = am.get_article_count()
DataIORedis().write(settings.ARTICLE_COUNT_KEY, article_count)
return int(article_count)
def get_max_article_id():
"""
Fetch the maximum article ID
:return: int; maximum id from article meta
"""
max_aid = DataIORedis().read(settings.MAX_ARTICLE_ID_KEY)
if not max_aid \
or random.randint(1, settings.ARTICLE_MAXID_REFRESH_RATE) == 1:
with ArticleModel() as am:
max_aid = am.get_max_id()
DataIORedis().write(settings.MAX_ARTICLE_ID_KEY, max_aid)
return max_aid
def get_article_stored_body(article):
"""
Fetch corresponding article object
:param article: str; article name
:return: json, Article; stored page content, corresponding
article model object
"""
with ArticleModel() as am:
article_obj = am.get_article_by_name(article)
try:
with ArticleContentModel() as acm:
body = acm.get_article_content(article_obj._id).markup
except Exception as e:
log.info('Article markup not found: "%s"' % e.message)
body = ''
return body
def get_wiki_content(article):
"""
Retrieve the wiki content from the mediawiki API
:param article: str; article name
:return: Wikipedia; mediawiki api response object
"""
pass
def get_flickr_photos(flickr_json):
"""
Retrience Flickr photo content from Flickr API
:param article: str; article name
:return: list; list of Flickr photo json
"""
photos = []
for i in xrange(settings.NUM_PHOTOS_TO_FETCH):
try:
photos.append(
{
'owner': flickr_json['photos']['photo'][i]['owner'],
'photo_id': flickr_json['photos']['photo'][i]['id'],
'farm': flickr_json['photos']['photo'][i]['farm'],
'server': flickr_json['photos']['photo'][i]['server'],
'title': flickr_json['photos']['photo'][i]['title'],
'secret': flickr_json['photos']['photo'][i]['secret'],
},
)
except (IndexError, KeyError) as e:
log.error('No more photos to process for: - "%s"' % (e.message))
log.debug('Photo info: %s' % (str(photos)))
return photos
def manage_article_storage(max_article_id, article_count):
"""
Handle the storage of new articles
:param max_article_id: int; article id
:param article_count: int; total count of articles
:return: bool; success
"""
if article_count >= settings.MYSQL_MAX_ROWS:
if max_article_id:
# TODO - CHANGE THIS be careful, could iterate many times
article_removed = False
attempts = 0
while not article_removed \
or attempts > settings.MAX_RETRIES_FOR_REMOVE:
attempts += 1
article_id = random.randint(0, int(max_article_id))
with ArticleModel() as am:
log.info('Removing article id: ' + str(article_id))
try:
am.delete_article(article_id)
article_removed = True
except UnmappedInstanceError:
continue
else:
log.error('Could not determine a max article id.')
return True
def handle_article_insert(article, wiki_page_id):
"""
Handle insertion of article meta data
:param article_id: int; article id
:return: int, bool; success
"""
with ArticleModel() as am:
if am.insert_article(article, wiki_page_id):
article_obj = am.get_article_by_name(article)
article_id = article_obj._id
success = True
else:
log.error('Couldn\'t insert article: "%s"' % article)
article_id = -1
success = False
return article_id, success
def handle_article_content_insert(article_id, page_content, is_new_article):
"""
Handle the insertion of article content
:param article_id: int; article id
:param page_content: json; page content
:param is_new_article: bool; a new article?
:return: bool; success
"""
with ArticleContentModel() as acm:
if is_new_article:
acm.insert_article(article_id, json.dumps(page_content))
else:
acm.update_article(article_id, json.dumps(page_content))
def prep_page_content(article_id, article, wiki, photos, user_obj):
"""
Prepare the formatted article content
:param article_id: int; article id
:param article: str; article name
:param wiki_resp: wikipedia; mediawiki api response
:param photos: list; list of photo json
:param user_obj: User; user object for request
:return: dict; formatted page response passed to jinja template
"""
html = parse_strip_elements(wiki.html())
html = parse_convert_links(html)
html = add_formatting_generic(html)
photo_ids = process_photos(article_id, photos, user_obj)
html = handle_photo_integrate(photos, html, article)
page_content = {
'title': format_title_link(wiki.title, article),
'content': html,
'section_img_class': settings.SECTION_IMG_CLASS,
'num_photos': len(photos),
'article_id': article_id,
'user_id': user_obj.get_id(),
'photo_ids': photo_ids
}
return page_content
def update_last_access(article_id):
"""
Update article last access
:param article_id: int; article id
:return: bool; success
"""
pass
def order_photos_by_rank(article_id, photos):
""" Reorders photos by score """
# Compute scores
for i in xrange(len(photos)):
# Get Exclusions & Endorsements
with ExcludeModel() as em:
exclusions = em.get_excludes_article_photo(article_id,
photos[i]['photo_id'])
with LikeModel() as lm:
endorsements = lm.get_likes_article_photo(article_id,
photos[i]['photo_id'])
photos[i]['score'] = len(endorsements) - len(exclusions)
# lambda method for sorting by score descending
f = lambda x, y: cmp(-x['score'], -y['score'])
return sorted(photos, f)
def process_photos(article_id, photos, user_obj):
"""
Handles linking photo results with the model and returns a list of
Flickr photo ids to pass to templating
:param article_id: int; article id
:param photos: list of photos
:param user_obj: User; user object for request
:return: List of Flickr photo ids
"""
photo_ids = []
for photo in photos:
# Ensure that each photo is modeled
with PhotoModel() as pm:
photo_obj = pm.get_photo(photo['photo_id'], article_id)
if not photo_obj:
log.info('Processing photo: "%s"' % str(photo))
if pm.insert_photo(photo['photo_id'], article_id):
photo_obj = pm.get_photo(
photo['photo_id'], article_id)
if not photo_obj:
log.error('DB Error: Could not retrieve or '
'insert: "%s"' % str(photo))
continue
else:
log.error('Couldn\'t insert photo: "%s"' % (
photo['photo_id']))
photo['id'] = photo_obj._id
photo['votes'] = photo_obj.votes
# Retrieve like data
with LikeModel() as lm:
if lm.get_like(article_id, photo_obj._id,
user_obj.get_id()):
photo['like'] = True
else:
photo['like'] = False
photo_ids.append(photo['photo_id'])
return photo_ids | [
37811,
198,
220,
220,
220,
6434,
25,
6047,
44760,
74,
1008,
198,
220,
220,
220,
7536,
25,
220,
220,
3267,
678,
400,
11,
1946,
628,
220,
220,
220,
43101,
329,
30407,
929,
9156,
13,
198,
37811,
198,
198,
11748,
33918,
198,
11748,
4738... | 2.251296 | 4,051 |
# coding: utf-8
# # Chart Presentation (6) - Adding a source annotation
# In this lesson we'll put into practise what we've learnt so far about creating and positioning annotations and increasing the margins of our charts to add a source annotation to some charts that we've previously created.
#
# Citing and referencing your sources is a vital part of producing any data visualisation as it's important for people to be able to replicate your work.
# ## Module Imports
# In[1]:
#plotly.offline doesn't push your charts to the clouds
import plotly.offline as pyo
#allows us to create the Data and Figure objects
from plotly.graph_objs import *
#plotly.plotly pushes your charts to the cloud
import plotly.plotly as py
#pandas is a data analysis library
import pandas as pd
from pandas import DataFrame
# In[2]:
#lets us see the charts in an iPython Notebook
pyo.offline.init_notebook_mode() # run at the start of every ipython
# ## Getting the charts
# We're going to add source annotations to two charts that we've previously produced; the Gapminder plot, and the plot which shows Life Expectancy against cigarette prices.
# In[3]:
gapMinder = py.get_figure("rmuir", 225)
lifeExp = py.get_figure("rmuir", 223)
# ### Setting the source for the Gapminder plot
#
# Let's add the source as an annotation to the Gapminder plot, remembering to set the <code>'xref'</code> and <code>'yref'</code> to <code>'paper'</code> to allow us to position this annotation outside of the plotting area.
#
# I'm going to position this annotation at the bottom-right of the chart, in italics, and in a small, light grey font.
# In[4]:
gapMinder['layout'].update({'annotations' : [{'text' : "<i>Source: https://www.gapminder.org/data/</i>",
'xref' : 'paper',
'yref' : 'paper',
'x' : 0,
'y' : -0.4,
'font' : {'size' : 12,
'color' : 'grey'},
'xanchor' : 'left',
'showarrow' : False}]})
pyo.iplot(gapMinder)
py.image.save_as(gapMinder, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(05) Chart Presentation 2\Notebooks\images\Chart Presentation (6) - Adding a source annotation\pyo.iplot-0.png")
#
# ## Changing the source for the Life Expectancy plot
#
# I sourced the data for this plot from the WHO. Let's add this to the chart, keeping the same parameters for the annotation:
# In[5]:
lifeExp['layout'].update({'annotations' : [{'text' : "<i>Source: The World Health Organisation (WHO)</i>",
'xref' : 'paper',
'yref' : 'paper',
'x' : 0,
'y' : -0.4,
'font' : {'size' : 12,
'color' : 'grey'},
'xanchor' : 'left',
'showarrow' : False}]})
pyo.iplot(lifeExp)
py.image.save_as(lifeExp, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(05) Chart Presentation 2\Notebooks\images\Chart Presentation (6) - Adding a source annotation\pyo.iplot-1.png")
#
# Let's increase the height and bottom margin of the chart so we can see the source:
# In[6]:
lifeExp['layout'].update({'height' : 500,
'margin' : {'b' : 130}})
pyo.iplot(lifeExp)
py.image.save_as(lifeExp, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(05) Chart Presentation 2\Notebooks\images\Chart Presentation (6) - Adding a source annotation\pyo.iplot-2.png")
#
# Great, these two plots are looking much better. Let's send them to the PLotly cloud:
# In[7]:
py.plot(gapMinder, filename="Life expectancy and GPD per capita", fileopt="overwrite")
py.image.save_as(gapMinder, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(05) Chart Presentation 2\Notebooks\images\Chart Presentation (6) - Adding a source annotation\py.plot-0.png")
#
py.plot(lifeExp, filename="Life expectancy against cost of cigarettes (Male & Female regressions)", fileopt="overwrite")
py.image.save_as(lifeExp, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(05) Chart Presentation 2\Notebooks\images\Chart Presentation (6) - Adding a source annotation\py.plot-1.png")
#
# ### What have we learnt this lesson?
# In this lesson we've practise adding, positioning and styling annotations. We've also practise modifying the chart margins and size to allow the annotations to be seen.
#
# From now on we'll use this knowledge in almost every lesson to add a source to every chart that we create.
# If you have any questions, please ask in the comments section or email <a href="mailto:me@richard-muir.com">me@richard-muir.com</a>
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
22086,
21662,
341,
357,
21,
8,
532,
18247,
257,
2723,
23025,
198,
198,
2,
554,
428,
11483,
356,
1183,
1234,
656,
44811,
644,
356,
1053,
26338,
523,
1290,
546,
4441,
290,
2209... | 2.417652 | 2,198 |
# Copyright 2021 Jonas Hallqvist
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the base parsing components for use in the various submodules."""
from pyisc.shared.parsing import BaseParser, Token
from pyisc.shared.nodes import RootNode, Node, PropertyNode
from pyisc.bind.utils import BindSplitter
class BindParser(BaseParser):
"""A parser for ISC BIND9 configs.
The constants are the various RegEx patterns that is used in the
tokens variable. Token variable is a list of tuples that contains
previously mentioned RegEx patterns as well as lambda functions
that are meant to be used by the re.Scanner in tokenize function.
"""
DECLARATION_GENERAL = r"[\w]+\s*?[^\n]*?{"
PARAMETER_GENERAL = r"[\w\"!]+\s*?[^\n]*?;"
SECTION_END = r"\};"
COMMENT_CPLUS = r"/\*.*?\*/"
COMMENT_C = r"//.*?\n"
tokens = [
(DECLARATION_GENERAL, lambda scanner, token: Token(
type='declaration_general', value=token)),
(PARAMETER_GENERAL, lambda scanner, token: Token(
type='parameter_general', value=token)),
(COMMENT_CPLUS, lambda scanner, token: Token(
type='comment_cplus', value=token)),
(COMMENT_C, lambda scanner, token: Token(
type='comment_c', value=token)),
(SECTION_END, lambda scanner, token: Token(
type='section_end', value=token)),
] + BaseParser.tokens
def build_tree(self, content):
"""
Return a tree like structure of token objects.
Args:
content (str): A supplied string to supply to the tokenize
method.
Returns:
pyisc.shared.nodes.RootNode: A tree like representation of the
supplied string.
Examples:
>>> isc_string = 'zone "berry.home" {'
>>> parser = bind.parsing.BindParser()
>>> parser.build_tree(isc_string)
RootNode(Root)
"""
node = RootNode()
node_stack = []
splitter = BindSplitter()
next_comment = None
for token in self.tokenize(content):
if token.type in ['whitespace', 'newline']:
continue
if token.type == 'section_end':
node = node_stack.pop()
if token.type.startswith('comment'):
if not next_comment:
next_comment = ''
else:
next_comment += '\n'
next_comment += token.value.strip()
if token.type.startswith('parameter'):
key, value, parameters, *_ = splitter.switch(token)
prop = PropertyNode(
type=key, value=value, parameters=parameters)
prop.comment = next_comment
next_comment = None
node.children.append(prop)
if token.type.startswith('declaration'):
key, value, parameters, *_ = splitter.switch(token)
section = Node(type=key, value=value, parameters=parameters)
section.comment = next_comment
next_comment = None
node.children.append(section)
node_stack += [node]
node = section
return node
| [
2,
15069,
33448,
40458,
4789,
44179,
396,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
... | 2.332716 | 1,620 |
from great_expectations.core.usage_statistics.anonymizers.action_anonymizer import (
ActionAnonymizer,
)
from great_expectations.core.usage_statistics.anonymizers.anonymizer import Anonymizer
from great_expectations.validation_operators import (
ActionListValidationOperator,
ValidationOperator,
WarningAndFailureExpectationSuitesValidationOperator,
)
| [
6738,
1049,
62,
1069,
806,
602,
13,
7295,
13,
26060,
62,
14269,
3969,
13,
272,
5177,
11341,
13,
2673,
62,
272,
5177,
7509,
1330,
357,
198,
220,
220,
220,
7561,
2025,
5177,
7509,
11,
198,
8,
198,
6738,
1049,
62,
1069,
806,
602,
13,... | 3.153846 | 117 |
from aoc_input import get_input
import aoc_helpers as ah
import re
try:
from tabulate import tabulate
except ImportError:
tabulate = lambda *_: print("The tabulate module seems to be missing")
RE_TICKET = re.compile(r"^(.*): (\d+)-(\d+) or (\d+)-(\d+)$")
DAY = 16
YEAR = 2020
| [
6738,
257,
420,
62,
15414,
1330,
651,
62,
15414,
198,
11748,
257,
420,
62,
16794,
364,
355,
29042,
198,
198,
11748,
302,
198,
28311,
25,
198,
197,
6738,
7400,
5039,
1330,
7400,
5039,
198,
16341,
17267,
12331,
25,
198,
197,
8658,
5039,... | 2.626168 | 107 |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
# firefox plugin
# https://askubuntu.com/questions/870530/how-to-install-geckodriver-in-ubuntu
# hide browser window
chrome_options = Options()
chrome_options.add_argument("--headless") # define headless
# add the option when creating driver
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get("https://mofanpy.com/")
driver.find_element_by_xpath(u"//img[@alt='强化学习 (Reinforcement Learning)']").click()
driver.find_element_by_link_text("About").click()
driver.find_element_by_link_text(u"赞助").click()
driver.find_element_by_link_text(u"教程 ▾").click()
driver.find_element_by_link_text(u"数据处理 ▾").click()
driver.find_element_by_link_text(u"网页爬虫").click()
print(driver.page_source[:200])
driver.get_screenshot_as_file("./img/sreenshot2.png")
driver.close()
print('finish') | [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
46659,
13,
25811,
1330,
18634,
198,
198,
2,
2046,
12792,
13877,
198,
2,
3740,
1378,
2093,
32230,
13,
785,
14,
6138,
507,
14,
23,
34801,
1270... | 2.573099 | 342 |
import unittest
from textwrap import dedent
import trafaret as T
from .util import get_err
| [
11748,
555,
715,
395,
198,
6738,
2420,
37150,
1330,
4648,
298,
198,
198,
11748,
1291,
69,
8984,
355,
309,
198,
6738,
764,
22602,
1330,
651,
62,
8056,
628,
628
] | 3.275862 | 29 |
# 根据每日 气温 列表,请重新生成一个列表,对应位置的输入是你需要再等待多久温度才会升高超过该日的天数。如果之后都不会升高,请在该位置用 0 来代替。
#
# 例如,给定一个列表 temperatures = [73, 74, 75, 71, 69, 72, 76, 73],你的输出应该是 [1, 1, 4, 2, 1, 1, 0, 0]。
#
# 提示:气温 列表长度的范围是 [1, 30000]。每个气温的值的均为华氏度,都是在 [30, 100] 范围内的整数。
#
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/daily-temperatures
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
from typing import List
if __name__ == '__main__':
s = Solution()
print(s.dailyTemperatures([73, 74, 75, 71, 69, 72, 76, 73])) | [
2,
10545,
254,
117,
162,
235,
106,
162,
107,
237,
33768,
98,
10545,
108,
242,
162,
116,
102,
10263,
230,
245,
26193,
101,
171,
120,
234,
46237,
115,
34932,
235,
23877,
108,
37955,
22755,
238,
31660,
10310,
103,
26344,
245,
26193,
101,... | 0.942197 | 519 |
from pip._internal.utils.appdirs import user_cache_dir
# The user_cache_dir helper comes straight from pip itself
CACHE_DIR = user_cache_dir("pip-tools")
| [
6738,
7347,
13557,
32538,
13,
26791,
13,
1324,
15908,
82,
1330,
2836,
62,
23870,
62,
15908,
198,
198,
2,
383,
2836,
62,
23870,
62,
15908,
31904,
2058,
3892,
422,
7347,
2346,
198,
34,
2246,
13909,
62,
34720,
796,
2836,
62,
23870,
62,
... | 3.1 | 50 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'form.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
import os
import sys
import time
import traceback
from os.path import join
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QWidget, QMainWindow, QApplication
from PyQt5.QtWidgets import QFileDialog, QMessageBox
from ExcelToWord_109 import ExcelToWord_doc, ExcelToWord_first
from ExcelToWord_109 import ExcelToWord_second
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_()) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
687,
13,
9019,
6,
198,
2,
198,
2,
15622,
416,
25,
9485,
48,
83,
20,
12454,
2438,
17301,
642,
13,
24,
13,
... | 2.781893 | 243 |
import argparse, librosa
import numpy as np
import torch
import torch.nn as nn
import torchaudio
from model import Tacotron as Tacotron
from collate_fn import collate_fn
from dataset import *
from util import *
from griffin_lim import GLA
from audio_processing import *
parser = argparse.ArgumentParser(description='training script')
# data load
parser.add_argument('--data', type=str, default='vctk', help='vctk')
parser.add_argument('--batch_size', type=int, default=6, help='batch size')
# generation option
parser.add_argument('--out_dir', type=str, default='generated', help='')
parser.add_argument('--init_from', type=str, default='./model_545th.pt', help='load parameters from...')
parser.add_argument('--caption', type=str, default='', help='text to generate speech')
parser.add_argument('--speaker_id', type=str, default='0', help='speaker id to generate speech, seperate by comma for mixing id')
parser.add_argument('--teacher_forcing_ratio', type=float, default=0, help='value between 0~1, use this for scheduled sampling')
# audio related option
parser.add_argument('--n_fft', type=int, default=2048, help='fft bin size')
parser.add_argument('--sample_rate', type=int, default=16000, help='sampling rate')
parser.add_argument('--frame_len_inMS', type=int, default=50, help='used to determine window size of fft')
parser.add_argument('--frame_shift_inMS', type=int, default=12.5, help='used to determine stride in sfft')
parser.add_argument('--num_recon_iters', type=int, default=50, help='# of iteration in griffin-lim recon')
# misc
parser.add_argument('--gpu', type=int, nargs='+', help='index of gpu machines to run')
parser.add_argument('--seed', type=int, default=0, help='random seed')
new_args = vars(parser.parse_args())
# load and override some arguments
checkpoint = torch.load(new_args['init_from'], map_location=lambda storage, loc: storage)
args = checkpoint['args']
for i in new_args:
args.__dict__[i] = new_args[i]
torch.manual_seed(args.seed)
if args.gpu is None:
args.use_gpu = False
args.gpu = []
device = torch.device('cpu')
print("[*] Run in CPU mode")
else:
args.use_gpu = True
torch.cuda.manual_seed(args.seed)
torch.cuda.set_device(args.gpu[0])
device = torch.device('cuda:{}'.format(args.gpu[0]))
print("[*] Run in GPU mode")
model = Tacotron(args)
if args.init_from:
model.load_state_dict(checkpoint['state_dict'])
model.reset_decoder_states()
print('loaded checkpoint %s' % (args.init_from))
model = model.eval()
GLA = Griffin_Lim(filter_length=args.n_fft)
if args.use_gpu:
model = model.cuda()
stft = stft.cuda()
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
11,
9195,
4951,
64,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
24051,
198,
198,
6738,
2746,
1330,
26075,
313,
1313,
355,
26075,
313,
131... | 2.868676 | 929 |
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn_all_layer_SE_low_nms_score.py', '../_base_/datasets/voc0712_with_ClassBalancedDataset_config_and_data_augumentation.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=4)))
# optimizer
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
# batchsize=16,lr=0.02
optimizer_config = dict(grad_clip=None)
# learning policy
# actual epoch = 3 * 3 = 9
lr_config = dict(policy='step', step=[3])
# runtime settings
runner = dict(
type='EpochBasedRunner', max_epochs=48) # actual epoch = 4 * 3 = 12
# actual epoch explanation: In file configs/_base_/datasets/voc0712.py line 34 & 35,
# voc uses RepeatDataset ( time=3 ) as default,
# which means 1 epoch equals to 3 epochs
| [
62,
8692,
62,
796,
685,
198,
220,
220,
220,
705,
40720,
62,
8692,
62,
14,
27530,
14,
69,
1603,
62,
6015,
20471,
62,
81,
1120,
62,
69,
21999,
62,
439,
62,
29289,
62,
5188,
62,
9319,
62,
77,
907,
62,
26675,
13,
9078,
3256,
705,
... | 2.545161 | 310 |
import asyncio
import vaex.jupyter
import time
import pytest
@pytest.mark.asyncio
@pytest.mark.parametrize("reentrant", [False, True])
@pytest.mark.parametrize("as_coroutine", [False, True])
@pytest.mark.parametrize("as_method", [False, True])
| [
11748,
30351,
952,
198,
11748,
46935,
1069,
13,
73,
929,
88,
353,
198,
11748,
640,
198,
11748,
12972,
9288,
628,
198,
31,
9078,
9288,
13,
4102,
13,
292,
13361,
952,
628,
198,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,... | 2.545455 | 99 |
from math import pi, sin, cos
import os
from OpenGL.GL import *
from OpenGL.GLUT import *
from PIL import Image
def set_img(name, path, imgID):
'''画像の設定: name.pngを読み込む
Parameters
----------
name : str
画像のファイル名.
path : str > 'W', 'B'
画像があるフォルダ名.
imgID : int
指定する画像ID.
'''
img = Image.open(f'C:/Users/willi/Desktop/projects/chess/img/{path}/{name}.png') # 画像を読み込む
w, h = img.size # 画像の横幅、縦幅
glBindTexture(GL_TEXTURE_2D, imgID) # imgID のテクスチャを有効化する
# 画像とテクスチャを関連づける
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0,
GL_RGBA, GL_UNSIGNED_BYTE, img.tobytes())
# テクスチャの設定
glTexParameter(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameter(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
def draw_img(x, y, imgID):
'''
画像を描画する
Parameters
----------
x, y : float
描画する座標.
imgID : int
画像ID.
'''
glPushMatrix() # 変形範囲の開始
glBindTexture(GL_TEXTURE_2D, imgID) # imgID のテクスチャを有効化する
glTranslate(x, y, 0) # 平行移動
glColor(1, 1, 1) # 色指定
# テクスチャ座標を指定する
glBegin(GL_QUADS)
glTexCoord(0.0, 1.0)
glVertex(-1.0 / 2, -1.0 / 2)
glTexCoord(1.0, 1.0)
glVertex(1.0 / 2, -1.0 / 2)
glTexCoord(1.0, 0.0)
glVertex(1.0 / 2, 1.0 / 2)
glTexCoord(0.0, 0.0)
glVertex(-1.0 / 2, 1.0 / 2)
glEnd()
glPopMatrix() # 変形範囲の終了
def window2world(x, y, wsize):
'''
ウィンドウ座標を世界座標に変換する
Parameters
----------
x, y : int
変換するもとの座標.
wsize : int
画面の大きさ.
Returns
-------
list > [float, float]
変換先の座標.
'''
return [9*x / wsize - 1, 7 - (9*y / wsize - 1)]
def draw_pieces(gameboard, imgID_dict, size=8):
'''
駒を描画する
Parematers
----------
gameboard : dict > {(int, int): obj, ...}
盤面.
imgID : int
画像ID.
size : int, default 8
盤面の大きさ.
'''
glEnable(GL_TEXTURE_2D) # テクスチャマッピングを有効化
for i in range(size):
for j in range(size):
piece = gameboard.get((i, j)) # (i, j)にある駒オブジェクトを取得
if piece:
draw_img(i, j, imgID_dict[piece.name])
glDisable(GL_TEXTURE_2D) # テクスチャマッピングを無効化
def draw_str(x, y, string, font=GLUT_BITMAP_HELVETICA_18, gap=0.25):
'''
文字列を描画する
Parameters
----------
x, y : float
描画する座標.
string : str
描画する文字列.
font : , default GLUT_BITMAP_HELVETICA_18
フォント.以下から指定.
GLUT_BITMAP_8_BY_13
GLUT_BITMAP_9_BY_15
GLUT_BITMAP_TIMES_ROMAN_10
GLUT_BITMAP_TIMES_ROMAN_24
GLUT_BITMAP_HELVETICA_10
GLUT_BITMAP_HELVETICA_12
GLUT_BITMAP_HELVETICA_18
gap : float, default 0.25
文字間隔.
'''
for k in range(len(string)):
glRasterPos2f(x + gap*k, y) # 描画位置指定
glutBitmapCharacter(font, ord(string[k])) # 文字列描画
def square(x, y):
'''
正方形を描画する
Parameters
----------
x, y : float
中心の座標.
'''
glPushMatrix() # 変形が及ぶ範囲の開始
glTranslate(x, y, 0) # 以下の対象を平行移動
glBegin(GL_QUADS) # 四角形の描画を宣言
glVertex(-1.0 / 2, -1.0 / 2) # 頂点1の座標
glVertex(1.0 / 2, -1.0 / 2) # 頂点2
glVertex(1.0 / 2, 1.0 / 2) # 頂点3
glVertex(-1.0 / 2, 1.0 / 2) # 頂点4
glEnd() # 描画終了
glPopMatrix() # 変形が及ぶ範囲の終了
def circle(x, y, opponent, r=0.25):
'''
円を描画する
Parameters
----------
x, y : float
中心の座標.
opponent : bool
True のとき,赤色で描画する.
r : float, default 0.25
半径.
'''
glPushMatrix()
glTranslate(x, y, 0)
if opponent:
glColor(1.0, 0.5, 0.5, 0.7)
else:
glColor(0.5, 0.5, 1.0, 0.7)
glBegin(GL_POLYGON)
for k in range(12):
xr = r * cos(2 * pi * k / 12)
yr = r * sin(2 * pi * k / 12)
glVertex(xr, yr, 0)
glEnd()
glPopMatrix()
def draw_balloon(x, y):
'''
プロモーションのときの吹き出しを描画する
Parameters
----------
x, y : int
駒の座標.
'''
glColor(0.5, 0.5, 0.5) # 色の指定
glBegin(GL_QUADS) # 四角形を描画
glVertex(-0.5, 2.5)
glVertex(-0.5, 4.5)
glVertex(7.5, 4.5)
glVertex(7.5, 2.5)
glEnd()
glBegin(GL_TRIANGLES) # 三角形を描画
glVertex(3.0, 3.5)
glVertex(4.0, 3.5)
glVertex(x, y)
glEnd()
def on_square(x, y, left, right, bottom, top):
'''
left < x < right かつ bottom < y < top のとき,True
Parameters
----------
x, y : float
測定する座標.
left, right, bottom, top : float
ボタンの左右下上端の座標.
Returns
-------
bool
'''
if left < x < right and bottom < y < top:
return True
dark_squares_list = ([(i, j) for i in range(0, 8, 2) for j in range(0, 8, 2)]
+ [(i, j) for i in range(1, 8, 2) for j in range(1, 8, 2)])
def draw_squares():
'''マス目を描画する'''
for i in range(8):
for j in range(8):
if (i, j) in dark_squares_list:
glColor(0.82, 0.55, 0.28)
square(i, j)
else:
glColor(1.00, 0.81, 0.62)
square(i, j)
def draw_file():
'''ファイルの文字を描画する'''
glColor(1.0, 1.0, 1.0)
for x in range(8):
draw_str(x, -0.75, chr(x + 97))
def draw_rank():
'''ランクの文字を描画する'''
glColor(1.0, 1.0, 1.0)
for y in range(8):
draw_str(-0.75, y, str(y + 1))
def draw_available_moves(poslist, opponent=None):
'''動かせる位置を描画する
Parameters
----------
poslist : list > [(int, int), ...]
移動先の座標のリスト.
opponent : bool or None, default None
True のとき,赤色で描画する.
'''
for pos in poslist:
circle(*pos, opponent)
| [
6738,
10688,
1330,
31028,
11,
7813,
11,
8615,
201,
198,
11748,
28686,
201,
198,
6738,
30672,
13,
8763,
1330,
1635,
201,
198,
6738,
30672,
13,
8763,
3843,
1330,
1635,
201,
198,
201,
198,
6738,
350,
4146,
1330,
7412,
201,
198,
201,
198,... | 1.443213 | 4,059 |
from collections import OrderedDict
import json
import base64
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
33918,
198,
11748,
2779,
2414,
628,
220,
220,
220,
220,
220,
220,
220,
220,
628,
198
] | 2.96 | 25 |
from abc import abstractmethod
from typing import List, Match
import datetime
from recognizers_text.extractor import ExtractResult
from recognizers_date_time.date_time.date_extractor import DateExtractor
from recognizers_date_time.date_time.constants import Constants
from recognizers_text.utilities import RegExpUtility
| [
6738,
450,
66,
1330,
12531,
24396,
198,
6738,
19720,
1330,
7343,
11,
13225,
198,
11748,
4818,
8079,
198,
6738,
3018,
11341,
62,
5239,
13,
2302,
40450,
1330,
29677,
23004,
198,
198,
6738,
3018,
11341,
62,
4475,
62,
2435,
13,
4475,
62,
... | 3.845238 | 84 |
import sys
from automlstreams.streams import KafkaStream
from automlstreams.meta import MetaClassifier
from skmultiflow.trees import HoeffdingTree
from skmultiflow.evaluation import EvaluatePrequential
from kafka import KafkaConsumer, KafkaProducer
from io import StringIO
import pandas as pd
import numpy as np
from config import *
if __name__ == "__main__":
try:
input_topic = sys.argv[1]
output_topic = sys.argv[2]
target_index = int(sys.argv[3])
run_indefinetly(input_topic, output_topic, target_index)
except IndexError:
raise SystemExit(f"Usage: {sys.argv[0]} input_topic output_topic target_index")
| [
11748,
25064,
198,
6738,
3557,
75,
5532,
82,
13,
5532,
82,
1330,
46906,
12124,
198,
6738,
3557,
75,
5532,
82,
13,
28961,
1330,
30277,
9487,
7483,
198,
6738,
1341,
16680,
361,
9319,
13,
83,
6037,
1330,
367,
2577,
487,
12083,
27660,
198... | 2.738589 | 241 |
#!/usr/bin/env python3
# _*_ coding:utf-8 _*_
import requests
import re
import urllib
import binascii
from Config.config_requests import headers
requests.packages.urllib3.disable_warnings()
########################################################################################################################
# 脚本信息
NAME = 'CVE_2018_1000861'
AUTHOR = "RabbitMask"
REMARK = 'Jenkins远程命令执行漏洞'
FOFA_RULE = 'app="Jenkins"'
########################################################################################################################
# 漏洞检测模块
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
4808,
9,
62,
19617,
25,
40477,
12,
23,
4808,
9,
62,
198,
198,
11748,
7007,
198,
11748,
302,
198,
11748,
2956,
297,
571,
198,
11748,
9874,
292,
979,
72,
198,
6738,
17056,
13,... | 3.164773 | 176 |
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compare the artifacts from two builds."""
import ast
import difflib
import json
import optparse
import os
import re
import shutil
import struct
import subprocess
import sys
import time
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def get_files_to_compare(build_dir, recursive=False):
"""Get the list of files to compare."""
# TODO(maruel): Add '.pdb'.
allowed = frozenset(
('', '.apk', '.app', '.dll', '.dylib', '.exe', '.nexe', '.so'))
non_x_ok_exts = frozenset(('.apk', '.isolated'))
ret_files = set()
for root, dirs, files in os.walk(build_dir):
if not recursive:
dirs[:] = [d for d in dirs if d.endswith('_apk')]
for f in (f for f in files if check(os.path.join(root, f))):
ret_files.add(os.path.relpath(os.path.join(root, f), build_dir))
return ret_files
def diff_dict(a, b):
"""Returns a yaml-like textural diff of two dict.
It is currently optimized for the .isolated format.
"""
out = ''
for key in set(a) | set(b):
va = a.get(key)
vb = b.get(key)
if va.__class__ != vb.__class__:
out += '- %s: %r != %r\n' % (key, va, vb)
elif isinstance(va, dict):
c = diff_dict(va, vb)
if c:
out += '- %s:\n%s\n' % (
key, '\n'.join(' ' + l for l in c.splitlines()))
elif va != vb:
out += '- %s: %s != %s\n' % (key, va, vb)
return out.rstrip()
def diff_binary(first_filepath, second_filepath, file_len):
"""Returns a compact binary diff if the diff is small enough."""
BLOCK_SIZE = 8192
CHUNK_SIZE = 32
NUM_CHUNKS_IN_BLOCK = BLOCK_SIZE / CHUNK_SIZE
MAX_STREAMS = 10
diffs = 0
streams = []
offset = 0
with open(first_filepath, 'rb') as lhs:
with open(second_filepath, 'rb') as rhs:
# Skip part of Win32 COFF header if timestamps are different.
#
# COFF header:
# 0 - 1: magic.
# 2 - 3: # sections.
# 4 - 7: timestamp.
# ....
#
# COFF BigObj header:
# 0 - 3: signature (0000 FFFF)
# 4 - 5: version
# 6 - 7: machine
# 8 - 11: timestamp.
COFF_HEADER_TO_COMPARE_SIZE = 12
if (sys.platform == 'win32'
and os.path.splitext(first_filepath)[1] in ('.o', '.obj')
and file_len > COFF_HEADER_TO_COMPARE_SIZE):
rhs_data = rhs.read(COFF_HEADER_TO_COMPARE_SIZE)
lhs_data = lhs.read(COFF_HEADER_TO_COMPARE_SIZE)
if (lhs_data[0:4] == rhs_data[0:4] and lhs_data[4:8] != rhs_data[4:8]
and lhs_data[8:12] == rhs_data[8:12]):
offset += COFF_HEADER_TO_COMPARE_SIZE
elif (lhs_data[0:4] == '\x00\x00\xff\xff' and
lhs_data[0:8] == rhs_data[0:8] and
lhs_data[8:12] != rhs_data[8:12]):
offset += COFF_HEADER_TO_COMPARE_SIZE
else:
lhs.seek(0)
rhs.seek(0)
while True:
lhs_data = lhs.read(BLOCK_SIZE)
rhs_data = rhs.read(BLOCK_SIZE)
if not lhs_data:
break
if lhs_data != rhs_data:
diffs += sum(l != r for l, r in zip(lhs_data, rhs_data))
for idx in xrange(NUM_CHUNKS_IN_BLOCK):
lhs_chunk = lhs_data[idx * CHUNK_SIZE:(idx + 1) * CHUNK_SIZE]
rhs_chunk = rhs_data[idx * CHUNK_SIZE:(idx + 1) * CHUNK_SIZE]
if streams is not None and lhs_chunk != rhs_chunk:
if len(streams) < MAX_STREAMS:
streams.append((offset + CHUNK_SIZE * idx,
lhs_chunk, rhs_chunk))
else:
streams = None
offset += len(lhs_data)
del lhs_data
del rhs_data
if not diffs:
return None
result = '%d out of %d bytes are different (%.2f%%)' % (
diffs, file_len, 100.0 * diffs / file_len)
if streams:
encode = lambda text: ''.join(i if 31 < ord(i) < 127 else '.' for i in text)
for offset, lhs_data, rhs_data in streams:
lhs_line = '%s \'%s\'' % (lhs_data.encode('hex'), encode(lhs_data))
rhs_line = '%s \'%s\'' % (rhs_data.encode('hex'), encode(rhs_data))
diff = list(difflib.Differ().compare([lhs_line], [rhs_line]))[-1][2:-1]
result += '\n 0x%-8x: %s\n %s\n %s' % (
offset, lhs_line, rhs_line, diff)
return result
def compare_files(first_filepath, second_filepath):
"""Compares two binaries and return the number of differences between them.
Returns None if the files are equal, a string otherwise.
"""
if first_filepath.endswith('.isolated'):
with open(first_filepath, 'rb') as f:
lhs = json.load(f)
with open(second_filepath, 'rb') as f:
rhs = json.load(f)
diff = diff_dict(lhs, rhs)
if diff:
return '\n' + '\n'.join(' ' + line for line in diff.splitlines())
# else, falls through binary comparison, it must be binary equal too.
file_len = os.stat(first_filepath).st_size
if file_len != os.stat(second_filepath).st_size:
return 'different size: %d != %d' % (
file_len, os.stat(second_filepath).st_size)
return diff_binary(first_filepath, second_filepath, file_len)
def get_deps(build_dir, target):
"""Returns list of object files needed to build target."""
NODE_PATTERN = re.compile(r'label="([a-zA-Z0-9_\\/.-]+)"')
CHECK_EXTS = ('.o', '.obj')
# Rename to possibly original directory name if possible.
fixed_build_dir = build_dir
if build_dir.endswith('.1') or build_dir.endswith('.2'):
fixed_build_dir = build_dir[:-2]
if os.path.exists(fixed_build_dir):
print >> sys.stderr, ('fixed_build_dir %s exists.'
' will try to use orig dir.' % fixed_build_dir)
fixed_build_dir = build_dir
else:
shutil.move(build_dir, fixed_build_dir)
try:
out = subprocess.check_output(['ninja', '-C', fixed_build_dir,
'-t', 'graph', target])
except subprocess.CalledProcessError as e:
print >> sys.stderr, 'error to get graph for %s: %s' % (target, e)
return []
finally:
# Rename again if we renamed before.
if fixed_build_dir != build_dir:
shutil.move(fixed_build_dir, build_dir)
files = []
for line in out.splitlines():
matched = NODE_PATTERN.search(line)
if matched:
path = matched.group(1)
if not os.path.splitext(path)[1] in CHECK_EXTS:
continue
if os.path.isabs(path):
print >> sys.stderr, ('not support abs path %s used for target %s'
% (path, target))
continue
files.append(path)
return files
def compare_deps(first_dir, second_dir, targets):
"""Print difference of dependent files."""
diffs = set()
for target in targets:
first_deps = get_deps(first_dir, target)
second_deps = get_deps(second_dir, target)
print 'Checking %s difference: (%s deps)' % (target, len(first_deps))
if set(first_deps) != set(second_deps):
# Since we do not thiks this case occur, we do not do anything special
# for this case.
print 'deps on %s are different: %s' % (
target, set(first_deps).symmetric_difference(set(second_deps)))
continue
max_filepath_len = max(len(n) for n in first_deps)
for d in first_deps:
first_file = os.path.join(first_dir, d)
second_file = os.path.join(second_dir, d)
result = compare_files(first_file, second_file)
if result:
print(' %-*s: %s' % (max_filepath_len, d, result))
diffs.add(d)
return list(diffs)
def compare_build_artifacts(first_dir, second_dir, target_platform,
json_output, recursive=False):
"""Compares the artifacts from two distinct builds."""
if not os.path.isdir(first_dir):
print >> sys.stderr, '%s isn\'t a valid directory.' % first_dir
return 1
if not os.path.isdir(second_dir):
print >> sys.stderr, '%s isn\'t a valid directory.' % second_dir
return 1
epoch_hex = struct.pack('<I', int(time.time())).encode('hex')
print('Epoch: %s' %
' '.join(epoch_hex[i:i+2] for i in xrange(0, len(epoch_hex), 2)))
with open(os.path.join(BASE_DIR, 'deterministic_build_blacklist.json')) as f:
blacklist = frozenset(json.load(f))
with open(os.path.join(BASE_DIR, 'deterministic_build_whitelist.pyl')) as f:
whitelist = frozenset(ast.literal_eval(f.read())[target_platform])
# The two directories.
first_list = get_files_to_compare(first_dir, recursive) - blacklist
second_list = get_files_to_compare(second_dir, recursive) - blacklist
equals = []
expected_diffs = []
unexpected_diffs = []
unexpected_equals = []
all_files = sorted(first_list & second_list)
missing_files = sorted(first_list.symmetric_difference(second_list))
if missing_files:
print >> sys.stderr, 'Different list of files in both directories:'
print >> sys.stderr, '\n'.join(' ' + i for i in missing_files)
unexpected_diffs.extend(missing_files)
max_filepath_len = max(len(n) for n in all_files)
for f in all_files:
first_file = os.path.join(first_dir, f)
second_file = os.path.join(second_dir, f)
result = compare_files(first_file, second_file)
if not result:
tag = 'equal'
equals.append(f)
if f in whitelist:
unexpected_equals.append(f)
else:
if f in whitelist:
expected_diffs.append(f)
tag = 'expected'
else:
unexpected_diffs.append(f)
tag = 'unexpected'
result = 'DIFFERENT (%s): %s' % (tag, result)
print('%-*s: %s' % (max_filepath_len, f, result))
unexpected_diffs.sort()
print('Equals: %d' % len(equals))
print('Expected diffs: %d' % len(expected_diffs))
print('Unexpected diffs: %d' % len(unexpected_diffs))
if unexpected_diffs:
print('Unexpected files with diffs:\n')
for u in unexpected_diffs:
print(' %s' % u)
if unexpected_equals:
print('Unexpected files with no diffs:\n')
for u in unexpected_equals:
print(' %s' % u)
all_diffs = expected_diffs + unexpected_diffs
diffs_to_investigate = sorted(set(all_diffs).difference(missing_files))
deps_diff = compare_deps(first_dir, second_dir, diffs_to_investigate)
if json_output:
try:
out = {
'expected_diffs': expected_diffs,
'unexpected_diffs': unexpected_diffs,
'deps_diff': deps_diff,
}
with open(json_output, 'w') as f:
json.dump(out, f)
except Exception as e:
print('failed to write json output: %s' % e)
return int(bool(unexpected_diffs))
if __name__ == '__main__':
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
1946,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,... | 2.233653 | 4,802 |
import http.client, urllib.request, urllib.parse, urllib.error, base64
import requests
from src.util.env import get_env
from src.util import log
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': get_env('CV_API'),
}
params = urllib.parse.urlencode({
# Request parameters
'visualFeatures': 'Adult'
})
cognitiveUrl = 'https://northeurope.api.cognitive.microsoft.com/vision/v2.0/analyze'
| [
11748,
2638,
13,
16366,
11,
2956,
297,
571,
13,
25927,
11,
2956,
297,
571,
13,
29572,
11,
2956,
297,
571,
13,
18224,
11,
2779,
2414,
198,
11748,
7007,
198,
6738,
12351,
13,
22602,
13,
24330,
1330,
651,
62,
24330,
198,
6738,
12351,
1... | 2.718563 | 167 |
from functools import wraps
from threading import Lock
mutex: Lock = Lock()
def synchronized():
"""
Notice: currently does not support nested locking beware will result in a deadlock
"""
return wrapper
| [
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
4704,
278,
1330,
13656,
198,
21973,
1069,
25,
13656,
796,
13656,
3419,
628,
198,
4299,
47192,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
220,
220,
220,
220,
17641,
25,
3058,
8... | 3.446154 | 65 |
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
=========
fMRI: FSL
=========
A workflow that uses fsl to perform a first level analysis on the nipype
tutorial data set::
python fmri_fsl.py
First tell python where to find the appropriate functions.
"""
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
import os # system functions
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.fsl as fsl # fsl
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.algorithms.modelgen as model # model generation
import nipype.algorithms.rapidart as ra # artifact detection
"""
Preliminaries
-------------
Setup any package specific configuration. The output file format for FSL
routines is being set to compressed NIFTI.
"""
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
"""
Setting up workflows
--------------------
In this tutorial we will be setting up a hierarchical workflow for fsl
analysis. This will demonstrate how pre-defined workflows can be setup and
shared across users, projects and labs.
Setup preprocessing workflow
----------------------------
This is a generic fsl feat preprocessing workflow encompassing skull stripping,
motion correction and smoothing operations.
"""
preproc = pe.Workflow(name='preproc')
"""
Set up a node to define all inputs required for the preprocessing workflow
"""
inputnode = pe.Node(
interface=util.IdentityInterface(fields=[
'func',
'struct',
]),
name='inputspec')
"""
Convert functional images to float representation. Since there can be more than
one functional run we use a MapNode to convert each run.
"""
img2float = pe.MapNode(
interface=fsl.ImageMaths(
out_data_type='float', op_string='', suffix='_dtype'),
iterfield=['in_file'],
name='img2float')
preproc.connect(inputnode, 'func', img2float, 'in_file')
"""
Extract the middle volume of the first run as the reference
"""
extract_ref = pe.Node(interface=fsl.ExtractROI(t_size=1), name='extractref')
"""
Define a function to pick the first file from a list of files
"""
preproc.connect(img2float, ('out_file', pickfirst), extract_ref, 'in_file')
"""
Define a function to return the 1 based index of the middle volume
"""
preproc.connect(inputnode, ('func', getmiddlevolume), extract_ref, 't_min')
"""
Realign the functional runs to the middle volume of the first run
"""
motion_correct = pe.MapNode(
interface=fsl.MCFLIRT(save_mats=True, save_plots=True),
name='realign',
iterfield=['in_file'])
preproc.connect(img2float, 'out_file', motion_correct, 'in_file')
preproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file')
"""
Plot the estimated motion parameters
"""
plot_motion = pe.MapNode(
interface=fsl.PlotMotionParams(in_source='fsl'),
name='plot_motion',
iterfield=['in_file'])
plot_motion.iterables = ('plot_type', ['rotations', 'translations'])
preproc.connect(motion_correct, 'par_file', plot_motion, 'in_file')
"""
Extract the mean volume of the first functional run
"""
meanfunc = pe.Node(
interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'),
name='meanfunc')
preproc.connect(motion_correct, ('out_file', pickfirst), meanfunc, 'in_file')
"""
Strip the skull from the mean functional to generate a mask
"""
meanfuncmask = pe.Node(
interface=fsl.BET(mask=True, no_output=True, frac=0.3),
name='meanfuncmask')
preproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file')
"""
Mask the functional runs with the extracted mask
"""
maskfunc = pe.MapNode(
interface=fsl.ImageMaths(suffix='_bet', op_string='-mas'),
iterfield=['in_file'],
name='maskfunc')
preproc.connect(motion_correct, 'out_file', maskfunc, 'in_file')
preproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2')
"""
Determine the 2nd and 98th percentile intensities of each functional run
"""
getthresh = pe.MapNode(
interface=fsl.ImageStats(op_string='-p 2 -p 98'),
iterfield=['in_file'],
name='getthreshold')
preproc.connect(maskfunc, 'out_file', getthresh, 'in_file')
"""
Threshold the first run of the functional data at 10% of the 98th percentile
"""
threshold = pe.Node(
interface=fsl.ImageMaths(out_data_type='char', suffix='_thresh'),
name='threshold')
preproc.connect(maskfunc, ('out_file', pickfirst), threshold, 'in_file')
"""
Define a function to get 10% of the intensity
"""
preproc.connect(getthresh, ('out_stat', getthreshop), threshold, 'op_string')
"""
Determine the median value of the functional runs using the mask
"""
medianval = pe.MapNode(
interface=fsl.ImageStats(op_string='-k %s -p 50'),
iterfield=['in_file'],
name='medianval')
preproc.connect(motion_correct, 'out_file', medianval, 'in_file')
preproc.connect(threshold, 'out_file', medianval, 'mask_file')
"""
Dilate the mask
"""
dilatemask = pe.Node(
interface=fsl.ImageMaths(suffix='_dil', op_string='-dilF'),
name='dilatemask')
preproc.connect(threshold, 'out_file', dilatemask, 'in_file')
"""
Mask the motion corrected functional runs with the dilated mask
"""
maskfunc2 = pe.MapNode(
interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'),
iterfield=['in_file'],
name='maskfunc2')
preproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file')
preproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2')
"""
Determine the mean image from each functional run
"""
meanfunc2 = pe.MapNode(
interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'),
iterfield=['in_file'],
name='meanfunc2')
preproc.connect(maskfunc2, 'out_file', meanfunc2, 'in_file')
"""
Merge the median values with the mean functional images into a coupled list
"""
mergenode = pe.Node(interface=util.Merge(2, axis='hstack'), name='merge')
preproc.connect(meanfunc2, 'out_file', mergenode, 'in1')
preproc.connect(medianval, 'out_stat', mergenode, 'in2')
"""
Smooth each run using SUSAN with the brightness threshold set to 75% of the
median value for each run and a mask constituting the mean functional
"""
smooth = pe.MapNode(
interface=fsl.SUSAN(),
iterfield=['in_file', 'brightness_threshold', 'usans'],
name='smooth')
"""
Define a function to get the brightness threshold for SUSAN
"""
preproc.connect(maskfunc2, 'out_file', smooth, 'in_file')
preproc.connect(medianval, ('out_stat', getbtthresh), smooth,
'brightness_threshold')
preproc.connect(mergenode, ('out', getusans), smooth, 'usans')
"""
Mask the smoothed data with the dilated mask
"""
maskfunc3 = pe.MapNode(
interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'),
iterfield=['in_file'],
name='maskfunc3')
preproc.connect(smooth, 'smoothed_file', maskfunc3, 'in_file')
preproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2')
"""
Scale each volume of the run so that the median value of the run is set to 10000
"""
intnorm = pe.MapNode(
interface=fsl.ImageMaths(suffix='_intnorm'),
iterfield=['in_file', 'op_string'],
name='intnorm')
preproc.connect(maskfunc3, 'out_file', intnorm, 'in_file')
"""
Define a function to get the scaling factor for intensity normalization
"""
preproc.connect(medianval, ('out_stat', getinormscale), intnorm, 'op_string')
"""
Perform temporal highpass filtering on the data
"""
highpass = pe.MapNode(
interface=fsl.ImageMaths(suffix='_tempfilt'),
iterfield=['in_file'],
name='highpass')
preproc.connect(intnorm, 'out_file', highpass, 'in_file')
"""
Generate a mean functional image from the first run
"""
meanfunc3 = pe.MapNode(
interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'),
iterfield=['in_file'],
name='meanfunc3')
preproc.connect(highpass, ('out_file', pickfirst), meanfunc3, 'in_file')
"""
Strip the structural image and coregister the mean functional image to the
structural image
"""
nosestrip = pe.Node(interface=fsl.BET(frac=0.3), name='nosestrip')
skullstrip = pe.Node(interface=fsl.BET(mask=True), name='stripstruct')
coregister = pe.Node(interface=fsl.FLIRT(dof=6), name='coregister')
"""
Use :class:`nipype.algorithms.rapidart` to determine which of the
images in the functional series are outliers based on deviations in
intensity and/or movement.
"""
art = pe.MapNode(
interface=ra.ArtifactDetect(
use_differences=[True, False],
use_norm=True,
norm_threshold=1,
zintensity_threshold=3,
parameter_source='FSL',
mask_type='file'),
iterfield=['realigned_files', 'realignment_parameters'],
name="art")
preproc.connect([
(inputnode, nosestrip, [('struct', 'in_file')]),
(nosestrip, skullstrip, [('out_file', 'in_file')]),
(skullstrip, coregister, [('out_file', 'in_file')]),
(meanfunc2, coregister, [(('out_file', pickfirst), 'reference')]),
(motion_correct, art, [('par_file', 'realignment_parameters')]),
(maskfunc2, art, [('out_file', 'realigned_files')]),
(dilatemask, art, [('out_file', 'mask_file')]),
])
"""
Set up model fitting workflow
-----------------------------
"""
modelfit = pe.Workflow(name='modelfit')
"""
Use :class:`nipype.algorithms.modelgen.SpecifyModel` to generate design information.
"""
modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec")
"""
Use :class:`nipype.interfaces.fsl.Level1Design` to generate a run specific fsf
file for analysis
"""
level1design = pe.Node(interface=fsl.Level1Design(), name="level1design")
"""
Use :class:`nipype.interfaces.fsl.FEATModel` to generate a run specific mat
file for use by FILMGLS
"""
modelgen = pe.MapNode(
interface=fsl.FEATModel(),
name='modelgen',
iterfield=['fsf_file', 'ev_files'])
"""
Use :class:`nipype.interfaces.fsl.FILMGLS` to estimate a model specified by a
mat file and a functional run
"""
modelestimate = pe.MapNode(
interface=fsl.FILMGLS(smooth_autocorr=True, mask_size=5, threshold=1000),
name='modelestimate',
iterfield=['design_file', 'in_file'])
"""
Use :class:`nipype.interfaces.fsl.ContrastMgr` to generate contrast estimates
"""
conestimate = pe.MapNode(
interface=fsl.ContrastMgr(),
name='conestimate',
iterfield=[
'tcon_file', 'param_estimates', 'sigmasquareds', 'corrections',
'dof_file'
])
modelfit.connect([
(modelspec, level1design, [('session_info', 'session_info')]),
(level1design, modelgen, [('fsf_files', 'fsf_file'), ('ev_files',
'ev_files')]),
(modelgen, modelestimate, [('design_file', 'design_file')]),
(modelgen, conestimate, [('con_file', 'tcon_file')]),
(modelestimate, conestimate,
[('param_estimates', 'param_estimates'), ('sigmasquareds',
'sigmasquareds'),
('corrections', 'corrections'), ('dof_file', 'dof_file')]),
])
"""
Set up fixed-effects workflow
-----------------------------
"""
fixed_fx = pe.Workflow(name='fixedfx')
"""
Use :class:`nipype.interfaces.fsl.Merge` to merge the copes and
varcopes for each condition
"""
copemerge = pe.MapNode(
interface=fsl.Merge(dimension='t'),
iterfield=['in_files'],
name="copemerge")
varcopemerge = pe.MapNode(
interface=fsl.Merge(dimension='t'),
iterfield=['in_files'],
name="varcopemerge")
"""
Use :class:`nipype.interfaces.fsl.L2Model` to generate subject and condition
specific level 2 model design files
"""
level2model = pe.Node(interface=fsl.L2Model(), name='l2model')
"""
Use :class:`nipype.interfaces.fsl.FLAMEO` to estimate a second level model
"""
flameo = pe.MapNode(
interface=fsl.FLAMEO(run_mode='fe'),
name="flameo",
iterfield=['cope_file', 'var_cope_file'])
fixed_fx.connect([
(copemerge, flameo, [('merged_file', 'cope_file')]),
(varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
(level2model, flameo, [('design_mat', 'design_file'),
('design_con', 't_con_file'), ('design_grp',
'cov_split_file')]),
])
"""
Set up first-level workflow
---------------------------
"""
firstlevel = pe.Workflow(name='firstlevel')
firstlevel.connect(
[(preproc, modelfit, [('highpass.out_file', 'modelspec.functional_runs'),
('art.outlier_files', 'modelspec.outlier_files'),
('highpass.out_file', 'modelestimate.in_file')]),
(preproc, fixed_fx,
[('coregister.out_file', 'flameo.mask_file')]), (modelfit, fixed_fx, [
(('conestimate.copes', sort_copes), 'copemerge.in_files'),
(('conestimate.varcopes', sort_copes), 'varcopemerge.in_files'),
(('conestimate.copes', num_copes), 'l2model.num_copes'),
])])
"""
Experiment specific components
------------------------------
The nipype tutorial contains data for two subjects. Subject data
is in two subdirectories, ``s1`` and ``s2``. Each subject directory
contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And
one anatomical volume named struct.nii.
Below we set some variables to inform the ``datasource`` about the
layout of our data. We specify the location of the data, the subject
sub-directories and a dictionary that maps each run to a mnemonic (or
field) for the run type (``struct`` or ``func``). These fields become
the output fields of the ``datasource`` node in the pipeline.
In the example below, run 'f3' is of type 'func' and gets mapped to a
nifti filename through a template '%s.nii'. So 'f3' would become
'f3.nii'.
"""
# Specify the location of the data.
data_dir = os.path.abspath('data')
# Specify the subject directories
subject_list = ['s1'] # , 's3']
# Map field names to individual subject runs.
info = dict(
func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]],
struct=[['subject_id', 'struct']])
infosource = pe.Node(
interface=util.IdentityInterface(fields=['subject_id']), name="infosource")
"""Here we set up iteration over all the subjects. The following line
is a particular example of the flexibility of the system. The
``datasource`` attribute ``iterables`` tells the pipeline engine that
it should repeat the analysis on each of the items in the
``subject_list``. In the current example, the entire first level
preprocessing and estimation will be repeated for each subject
contained in subject_list.
"""
infosource.iterables = ('subject_id', subject_list)
"""
Now we create a :class:`nipype.interfaces.io.DataSource` object and
fill in the information from above about the layout of our data. The
:class:`nipype.pipeline.NodeWrapper` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""
datasource = pe.Node(
interface=nio.DataGrabber(
infields=['subject_id'], outfields=['func', 'struct']),
name='datasource')
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '%s/%s.nii'
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
Use the get_node function to retrieve an internal node by name. Then set the
iterables on this node to perform two different extents of smoothing.
"""
smoothnode = firstlevel.get_node('preproc.smooth')
assert (str(smoothnode) == 'preproc.smooth')
smoothnode.iterables = ('fwhm', [5., 10.])
hpcutoff = 120
TR = 3. # ensure float
firstlevel.inputs.preproc.highpass.suffix = '_hpf'
firstlevel.inputs.preproc.highpass.op_string = '-bptf %d -1' % (hpcutoff / TR)
"""
Setup a function that returns subject-specific information about the
experimental paradigm. This is used by the
:class:`nipype.interfaces.spm.SpecifyModel` to create the information necessary
to generate an SPM design matrix. In this tutorial, the same paradigm was used
for every participant. Other examples of this function are available in the
`doc/examples` folder. Note: Python knowledge required here.
"""
"""
Setup the contrast structure that needs to be evaluated. This is a list of
lists. The inner list specifies the contrasts and has the following format -
[Name,Stat,[list of condition names],[weights on those conditions]. The
condition names must match the `names` listed in the `subjectinfo` function
described above.
"""
cont1 = ['Task>Baseline', 'T', ['Task-Odd', 'Task-Even'], [0.5, 0.5]]
cont2 = ['Task-Odd>Task-Even', 'T', ['Task-Odd', 'Task-Even'], [1, -1]]
cont3 = ['Task', 'F', [cont1, cont2]]
contrasts = [cont1, cont2]
firstlevel.inputs.modelfit.modelspec.input_units = 'secs'
firstlevel.inputs.modelfit.modelspec.time_repetition = TR
firstlevel.inputs.modelfit.modelspec.high_pass_filter_cutoff = hpcutoff
firstlevel.inputs.modelfit.level1design.interscan_interval = TR
firstlevel.inputs.modelfit.level1design.bases = {'dgamma': {'derivs': False}}
firstlevel.inputs.modelfit.level1design.contrasts = contrasts
firstlevel.inputs.modelfit.level1design.model_serial_correlations = True
"""
Set up complete workflow
========================
"""
l1pipeline = pe.Workflow(name="level1")
l1pipeline.base_dir = os.path.abspath('./fsl/workingdir')
l1pipeline.config = {
"execution": {
"crashdump_dir": os.path.abspath('./fsl/crashdumps')
}
}
l1pipeline.connect([
(infosource, datasource, [('subject_id', 'subject_id')]),
(infosource, firstlevel, [(('subject_id', subjectinfo),
'modelfit.modelspec.subject_info')]),
(datasource, firstlevel, [
('struct', 'preproc.inputspec.struct'),
('func', 'preproc.inputspec.func'),
]),
])
"""
Execute the pipeline
--------------------
The code discussed above sets up all the necessary data structures with
appropriate parameters and the connectivity between the processes, but does not
generate any output. To actually run the analysis on the data the
``nipype.pipeline.engine.Pipeline.Run`` function needs to be called.
"""
if __name__ == '__main__':
l1pipeline.write_graph()
outgraph = l1pipeline.run()
# l1pipeline.run(plugin='MultiProc', plugin_args={'n_procs':2})
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
795,
16436,
25,
532,
9,
12,
4235,
25,
21015,
26,
12972,
12,
521,
298,
12,
28968,
25,
604,
26,
33793,
12,
8658,
82,
12,
14171,
25,
18038,
532,
9,
12,
198,
2,
25357,
25,
900,
... | 2.765837 | 6,551 |
# -*- coding: utf-8 -*-
#@+leo-ver=5-thin
#@+node:ekr.20161021090740.1: * @file ../commands/checkerCommands.py
#@@first
'''Commands that invoke external checkers'''
#@+<< imports >>
#@+node:ekr.20161021092038.1: ** << imports >> checkerCommands.py
import leo.core.leoGlobals as g
try:
# pylint: disable=import-error
# We can't assume the user has this.
import flake8
except Exception: # May not be ImportError.
flake8 = None
try:
import pyflakes
except ImportError:
pyflakes = None
# import os
import shlex
# import subprocess
import sys
import time
#@-<< imports >>
#@+others
#@+node:ekr.20161021091557.1: ** Commands
#@+node:ekr.20171211055756.1: *3* checkConventions (checkerCommands.py)
@g.command('check-conventions')
@g.command('cc')
def checkConventions(event):
'''Experimental script to test Leo's conventsions.'''
c = event.get('c')
if c:
if c.changed: c.save()
import imp
import leo.core.leoCheck as leoCheck
imp.reload(leoCheck)
leoCheck.ConventionChecker(c).check()
#@+node:ekr.20190608084751.1: *3* find-long-lines
@g.command('find-long-lines')
def find_long_lines(event):
'''Report long lines in the log, with clickable links.'''
c = event.get('c')
if not c:
return
#@+others # helper functions
#@+node:ekr.20190609135639.1: *4* function: get_root
def get_root(p):
'''Return True if p is any @<file> node.'''
for parent in p.self_and_parents():
if parent.anyAtFileNodeName():
return parent
return None
#@+node:ekr.20190608084751.2: *4* function: in_no_pylint
def in_nopylint(p):
'''Return p if p is controlled by @nopylint.'''
for parent in p.self_and_parents():
if '@nopylint' in parent.h:
return True
return False
#@-others
max_line = c.config.getInt('max-find-long-lines-length') or 110
count, files, ignore = 0, [], []
for p in c.all_unique_positions():
if in_nopylint(p):
continue
root = get_root(p)
if not root:
continue
if root.v not in files:
files.append(root.v)
for i, s in enumerate(g.splitLines(p.b)):
if len(s) > max_line:
if not root:
if p.v not in ignore:
ignore.append(p.v)
g.es_print('no root', p.h)
else:
count += 1
short_s = g.truncate(s, 30)
g.es('')
g.es_print(root.h)
g.es_print(p.h)
print(short_s)
g.es_clickable_link(c, p, line_number=i, message=short_s)
break
g.es_print('found %s long line%s longer than %s characters in %s file%s' % (
count, g.plural(count), max_line, len(files), g.plural(len(files))))
#@+node:ekr.20190615180048.1: *3* find-missing-docstrings
@g.command('find-missing-docstrings')
def find_missing_docstrings(event):
'''Report missing docstrings in the log, with clickable links.'''
c = event.get('c')
if not c:
return
#@+others # Define functions
#@+node:ekr.20190615181104.1: *4* function: has_docstring
def has_docstring(lines, n):
'''
Returns True if function/method/class whose definition
starts on n-th line in lines has a docstring
'''
# By Виталије Милошевић.
for line in lines[n:]:
s = line.strip()
if not s or s.startswith('#'):
continue
if s.startswith(('"""', "'''")):
return True
return False
#@+node:ekr.20190615181104.2: *4* function: is_a_definition
def is_a_definition(line):
'''Return True if line is a definition line.'''
# By Виталије Милошевић.
# It may be useful to skip __init__ methods because their docstring
# is usually docstring of the class
return (
line.startswith(('def ', 'class ')) and
not line.partition(' ')[2].startswith('__init__')
)
#@+node:ekr.20190615182754.1: *4* function: is_root
def is_root(p):
'''
A predicate returning True if p is an @<file> node that is not under @nopylint.
'''
for parent in p.self_and_parents():
if g.match_word(parent.h, 0, '@nopylint'):
return False
return p.isAnyAtFileNode() and p.h.strip().endswith('.py')
#@+node:ekr.20190615180900.1: *4* function: clickable_link
def clickable_link (p, i):
'''Return a clickable link to line i of p.b.'''
link = p.get_UNL(with_proto=True, with_count=True, with_index=True)
return "%s,%d" % (link, i)
#@-others
count, found, t1 = 0, [], time.clock()
for root in g.findRootsWithPredicate(c, c.p, predicate=is_root):
for p in root.self_and_subtree():
lines = p.b.split('\n')
for i, line in enumerate(lines):
if is_a_definition(line) and not has_docstring(lines, i):
count += 1
if root.v not in found:
found.append(root.v)
g.es_print('')
g.es_print(root.h)
print(line)
g.es(line, nodeLink=clickable_link(p, i+1))
break
g.es_print('')
g.es_print('found %s missing docstring%s in %s file%s in %5.2f sec.' % (
count, g.plural(count),
len(found), g.plural(len(found)),
(time.clock() - t1)))
#@+node:ekr.20161026092059.1: *3* kill-pylint
@g.command('kill-pylint')
@g.command('pylint-kill')
def kill_pylint(event):
'''Kill any running pylint processes and clear the queue.'''
g.app.backgroundProcessManager.kill('pylint')
#@+node:ekr.20160517133001.1: *3* flake8 command
@g.command('flake8')
def flake8_command(event):
'''
Run flake8 on all nodes of the selected tree,
or the first @<file> node in an ancestor.
'''
c = event.get('c')
if c:
if c.isChanged():
c.save()
if flake8:
Flake8Command(c).run()
else:
g.es_print('can not import flake8')
#@+node:ekr.20150514125218.7: *3* pylint command
@g.command('pylint')
def pylint_command(event):
'''
Run pylint on all nodes of the selected tree,
or the first @<file> node in an ancestor.
'''
c = event.get('c')
if c:
if c.isChanged():
c.save()
PylintCommand(c).run()
#@+node:ekr.20160516072613.1: *3* pyflakes command
@g.command('pyflakes')
def pyflakes_command(event):
'''
Run pyflakes on all nodes of the selected tree,
or the first @<file> node in an ancestor.
'''
c = event.get('c')
if c:
if c.isChanged():
c.save()
if pyflakes:
PyflakesCommand(c).run(force=True)
else:
g.es_print('can not import pyflakes')
#@+node:ekr.20160517133049.1: ** class Flake8Command
class Flake8Command:
'''A class to run flake8 on all Python @<file> nodes in c.p's tree.'''
def __init__(self, c, quiet=False):
'''ctor for Flake8Command class.'''
self.c = c
self.quiet = quiet
self.seen = [] # List of checked paths.
#@+others
#@+node:ekr.20160517133049.2: *3* flake8.check_all
def check_all(self, paths):
'''Run flake8 on all paths.'''
try:
# pylint: disable=import-error
# We can't assume the user has this.
from flake8 import engine, main
except Exception:
return
config_file = self.get_flake8_config()
if config_file:
style = engine.get_style_guide(
parse_argv=False,
config_file=config_file,
)
report = style.check_files(paths=paths)
# Set statistics here, instead of from the command line.
options = style.options
options.statistics = True
options.total_errors = True
# options.benchmark = True
main.print_report(report, style)
#@+node:ekr.20160517133049.3: *3* flake8.find
def find(self, p):
'''Return True and add p's path to self.seen if p is a Python @<file> node.'''
c = self.c
found = False
if p.isAnyAtFileNode():
aList = g.get_directives_dict_list(p)
path = c.scanAtPathDirectives(aList)
fn = p.anyAtFileNodeName()
if fn.endswith('.py'):
fn = g.os_path_finalize_join(path, fn)
if fn not in self.seen:
self.seen.append(fn)
found = True
return found
#@+node:ekr.20160517133049.4: *3* flake8.get_flake8_config
def get_flake8_config(self):
'''Return the path to the pylint configuration file.'''
join = g.os_path_finalize_join
dir_table = (
g.app.homeDir,
join(g.app.homeDir, '.leo'),
join(g.app.loadDir, '..', '..', 'leo', 'test'),
)
for base in ('flake8', 'flake8.txt'):
for path in dir_table:
fn = g.os_path_abspath(join(path, base))
if g.os_path_exists(fn):
return fn
if not g.unitTesting:
g.es_print('no flake8 configuration file found in\n%s' % (
'\n'.join(dir_table)))
return None
#@+node:ekr.20160517133049.5: *3* flake8.run
def run(self, p=None):
'''Run flake8 on all Python @<file> nodes in c.p's tree.'''
c = self.c
root = p or c.p
# Make sure Leo is on sys.path.
leo_path = g.os_path_finalize_join(g.app.loadDir, '..')
if leo_path not in sys.path:
sys.path.append(leo_path)
# Run flake8 on all Python @<file> nodes in root's tree.
t1 = time.time()
found = False
for p in root.self_and_subtree():
found |= self.find(p)
# Look up the tree if no @<file> nodes were found.
if not found:
for p in root.parents():
if self.find(p):
found = True
break
# If still not found, expand the search if root is a clone.
if not found:
isCloned = any([p.isCloned() for p in root.self_and_parents()])
if isCloned:
for p in c.all_positions():
if p.isAnyAtFileNode():
isAncestor = any([z.v == root.v for z in p.self_and_subtree()])
if isAncestor and self.find(p):
break
paths = list(set(self.seen))
if paths:
self.check_all(paths)
g.es_print('flake8: %s file%s in %s' % (
len(paths), g.plural(paths), g.timeSince(t1)))
#@-others
#@+node:ekr.20160516072613.2: ** class PyflakesCommand
class PyflakesCommand:
'''A class to run pyflakes on all Python @<file> nodes in c.p's tree.'''
def __init__(self, c):
'''ctor for PyflakesCommand class.'''
self.c = c
self.seen = [] # List of checked paths.
#@+others
#@+node:ekr.20171228013818.1: *3* class LogStream
class LogStream:
'''A log stream for pyflakes.'''
#@+node:ekr.20160516072613.6: *3* pyflakes.check_all
def check_all(self, log_flag, paths, pyflakes_errors_only, roots=None):
'''Run pyflakes on all files in paths.'''
try:
from pyflakes import api, reporter
except Exception: # ModuleNotFoundError
return True # Pretend all is fine.
total_errors = 0
# pylint: disable=cell-var-from-loop
for fn_n, fn in enumerate(sorted(paths)):
# Report the file name.
sfn = g.shortFileName(fn)
s = g.readFileIntoEncodedString(fn)
if s and s.strip():
if not pyflakes_errors_only:
g.es('Pyflakes: %s' % sfn)
# Send all output to the log pane.
r = reporter.Reporter(
errorStream=self.LogStream(fn_n, roots),
warningStream=self.LogStream(fn_n, roots),
)
errors = api.check(s, sfn, r)
total_errors += errors
return total_errors
#@+node:ekr.20171228013625.1: *3* pyflakes.check_script
def check_script(self, p, script):
'''Call pyflakes to check the given script.'''
try:
from pyflakes import api, reporter
except Exception: # ModuleNotFoundError
return True # Pretend all is fine.
r = reporter.Reporter(
errorStream=self.LogStream(),
warningStream=self.LogStream(),
)
errors = api.check(script, '', r)
return errors == 0
#@+node:ekr.20170220114553.1: *3* pyflakes.finalize
def finalize(self, p):
'''Finalize p's path.'''
aList = g.get_directives_dict_list(p)
path = self.c.scanAtPathDirectives(aList)
fn = p.anyAtFileNodeName()
return g.os_path_finalize_join(path, fn)
#@+node:ekr.20160516072613.3: *3* pyflakes.find (no longer used)
def find(self, p):
'''Return True and add p's path to self.seen if p is a Python @<file> node.'''
c = self.c
found = False
if p.isAnyAtFileNode():
aList = g.get_directives_dict_list(p)
path = c.scanAtPathDirectives(aList)
fn = p.anyAtFileNodeName()
if fn.endswith('.py'):
fn = g.os_path_finalize_join(path, fn)
if fn not in self.seen:
self.seen.append(fn)
found = True
return found
#@+node:ekr.20160516072613.5: *3* pyflakes.run
def run(self, p=None, force=False, pyflakes_errors_only=False):
'''Run Pyflakes on all Python @<file> nodes in c.p's tree.'''
c = self.c
root = p or c.p
# Make sure Leo is on sys.path.
leo_path = g.os_path_finalize_join(g.app.loadDir, '..')
if leo_path not in sys.path:
sys.path.append(leo_path)
t1 = time.time()
roots = g.findRootsWithPredicate(c, root, predicate=None)
if root:
paths = [self.finalize(z) for z in roots]
# These messages are important for clarity.
log_flag = not force
total_errors = self.check_all(log_flag, paths, pyflakes_errors_only, roots=roots)
if total_errors > 0:
g.es('ERROR: pyflakes: %s error%s' % (
total_errors, g.plural(total_errors)))
elif force:
g.es('OK: pyflakes: %s file%s in %s' % (
len(paths), g.plural(paths), g.timeSince(t1)))
elif not pyflakes_errors_only:
g.es('OK: pyflakes')
ok = total_errors == 0
else:
ok = True
return ok
#@-others
#@+node:ekr.20150514125218.8: ** class PylintCommand
class PylintCommand:
'''A class to run pylint on all Python @<file> nodes in c.p's tree.'''
regex = r'^.*:([0-9]+):[0-9]+:.*?(\(.*\))\s*$'
# m.group(1) is the line number.
# m.group(2) is the (unused) test name.
# Example message: file-name:3966:12: R1705:xxxx (no-else-return)
#@+others
#@+node:ekr.20150514125218.11: *3* 1. pylint.run
def run(self):
'''Run Pylint on all Python @<file> nodes in c.p's tree.'''
c, root = self.c, self.c.p
if not self.import_lint():
return
self.rc_fn = self.get_rc_file()
if not self.rc_fn:
return
# Make sure Leo is on sys.path.
leo_path = g.os_path_finalize_join(g.app.loadDir, '..')
if leo_path not in sys.path:
sys.path.append(leo_path)
# Ignore @nopylint trees.
roots = g.findRootsWithPredicate(c, root, predicate=predicate)
data = [(self.get_fn(p), p.copy()) for p in roots]
data = [z for z in data if z[0] is not None]
if not data:
g.es('pylint: no files found', color='red')
return
for fn, p in data:
self.run_pylint(fn, p)
#@+node:ekr.20190605183824.1: *3* 2. pylint.import_lint
def import_lint(self):
'''Make sure lint can be imported.'''
try:
from pylint import lint
g.placate_pyflakes(lint)
return True
except ImportError:
g.es_print('pylint is not installed')
return False
#@+node:ekr.20150514125218.10: *3* 3. pylint.get_rc_file
def get_rc_file(self):
'''Return the path to the pylint configuration file.'''
base = 'pylint-leo-rc.txt'
table = (
g.os_path_finalize_join(g.app.homeDir, '.leo', base),
# In ~/.leo
g.os_path_finalize_join(g.app.loadDir, '..', '..', 'leo', 'test', base),
# In leo/test
)
for fn in table:
fn = g.os_path_abspath(fn)
if g.os_path_exists(fn):
return fn
g.es_print('no pylint configuration file found in\n%s' % (
'\n'.join(table)))
return None
#@+node:ekr.20150514125218.9: *3* 4. pylint.get_fn
def get_fn(self, p):
'''
Finalize p's file name.
Return if p is not an @file node for a python file.
'''
c = self.c
if not p.isAnyAtFileNode():
g.trace('not an @<file> node: %r' % p.h)
return None
# #67.
aList = g.get_directives_dict_list(p)
path = c.scanAtPathDirectives(aList)
fn = p.anyAtFileNodeName()
if not fn.endswith('.py'):
g.trace('not a python file: %r' % p.h)
return None
return g.os_path_finalize_join(path, fn)
#@+node:ekr.20150514125218.12: *3* 5. pylint.run_pylint
def run_pylint(self, fn, p):
'''Run pylint on fn with the given pylint configuration file.'''
c, rc_fn = self.c, self.rc_fn
#
# Invoke pylint directly.
is_win = sys.platform.startswith('win')
args = ','.join(["'--rcfile=%s'" % (rc_fn), "'%s'" % (fn),])
if is_win:
args = args.replace('\\','\\\\')
command = '%s -c "from pylint import lint; args=[%s]; lint.Run(args)"' % (
sys.executable, args)
if not is_win:
command = shlex.split(command)
#
# Run the command using the BPM.
bpm = g.app.backgroundProcessManager
bpm.start_process(c, command,
fn=fn,
kind='pylint',
link_pattern = self.regex,
link_root = p,
)
# Old code: Invoke g.run_pylint.
# args = ["fn=r'%s'" % (fn), "rc=r'%s'" % (rc_fn),]
# # When shell is True, it's recommended to pass a string, not a sequence.
# command = '%s -c "import leo.core.leoGlobals as g; g.run_pylint(%s)"' % (
# sys.executable, ','.join(args))
#@-others
#@-others
#@@language python
#@@tabwidth -4
#@@pagewidth 70
#@-leo
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
31,
10,
293,
78,
12,
332,
28,
20,
12,
40871,
198,
2,
31,
10,
17440,
25,
988,
81,
13,
5304,
15377,
14454,
2998,
1821,
13,
16,
25,
1635,
2488,
7753,
11485,
14,
... | 1.931186 | 10,027 |
from datetime import datetime
from os import makedirs, chdir, walk
from os.path import join, isdir, basename, exists, relpath
from shutil import make_archive, rmtree, copyfile, move
from tempfile import mkdtemp
import re
import tempfile
import sys
from pkg_resources import get_distribution
from bagit import Bag, make_manifests # pylint: disable=no-name-in-module
from ocrd_utils import (
pushd_popd,
getLogger,
is_local_filename,
unzip_file_to_dir,
MIMETYPE_PAGE,
VERSION,
)
from ocrd_validators.constants import BAGIT_TXT, TMP_BAGIT_PREFIX, OCRD_BAGIT_PROFILE_URL
from ocrd_modelfactory import page_from_file
from ocrd_models.ocrd_page import to_xml
from .workspace import Workspace
tempfile.tempdir = '/tmp' # TODO hard-coded
log = getLogger('ocrd.workspace_bagger')
BACKUPDIR = join('/tmp', TMP_BAGIT_PREFIX + 'backup')
class WorkspaceBagger():
"""
Serialize/De-serialize from OCRD-ZIP to workspace and back.
"""
def bag(self,
workspace,
ocrd_identifier,
dest=None,
ocrd_mets='mets.xml',
ocrd_manifestation_depth='full',
ocrd_base_version_checksum=None,
processes=1,
skip_zip=False,
in_place=False,
tag_files=None
):
"""
Bag a workspace
See https://ocr-d.github.com/ocrd_zip#packing-a-workspace-as-ocrd-zip
Arguments:
workspace (ocrd.Workspace): workspace to bag
ord_identifier (string): Ocrd-Identifier in bag-info.txt
dest (string): Path of the generated OCRD-ZIP.
ord_mets (string): Ocrd-Mets in bag-info.txt
ord_manifestation_depth (string): Ocrd-Manifestation-Depth in bag-info.txt
ord_base_version_checksum (string): Ocrd-Base-Version-Checksum in bag-info.txt
processes (integer): Number of parallel processes checksumming
skip_zip (boolean): Whether to leave directory unzipped
in_place (boolean): Whether to **replace** the workspace with its BagIt variant
tag_files (list<string>): Path names of additional tag files to be bagged at the root of the bag
"""
if ocrd_manifestation_depth not in ('full', 'partial'):
raise Exception("manifestation_depth must be 'full' or 'partial'")
if in_place and (dest is not None):
raise Exception("Setting 'dest' and 'in_place' is a contradiction")
if in_place and not skip_zip:
raise Exception("Setting 'skip_zip' and not 'in_place' is a contradiction")
if tag_files is None:
tag_files = []
# create bagdir
bagdir = mkdtemp(prefix=TMP_BAGIT_PREFIX)
if dest is None:
if in_place:
dest = workspace.directory
elif not skip_zip:
dest = '%s.ocrd.zip' % workspace.directory
else:
dest = '%s.ocrd' % workspace.directory
log.info("Bagging %s to %s (temp dir %s)", workspace.directory, '(in-place)' if in_place else dest, bagdir)
# create data dir
makedirs(join(bagdir, 'data'))
# create bagit.txt
with open(join(bagdir, 'bagit.txt'), 'wb') as f:
f.write(BAGIT_TXT.encode('utf-8'))
# create manifests
total_bytes, total_files = self._bag_mets_files(workspace, bagdir, ocrd_manifestation_depth, ocrd_mets, processes)
# create bag-info.txt
bag = Bag(bagdir)
self._set_bag_info(bag, total_bytes, total_files, ocrd_identifier, ocrd_manifestation_depth, ocrd_base_version_checksum)
for tag_file in tag_files:
copyfile(tag_file, join(bagdir, basename(tag_file)))
# save bag
bag.save()
# ZIP it
self._serialize_bag(workspace, bagdir, dest, in_place, skip_zip)
log.info('Created bag at %s', dest)
return dest
def spill(self, src, dest):
"""
Spill a workspace, i.e. unpack it and turn it into a workspace.
See https://ocr-d.github.com/ocrd_zip#unpacking-ocrd-zip-to-a-workspace
Arguments:
src (string): Path to OCRD-ZIP
dest (string): Path to directory to unpack data folder to
"""
# print(dest)
if exists(dest) and not isdir(dest):
raise Exception("Not a directory: %s" % dest)
# If dest is an existing directory, try to derive its name from src
if isdir(dest):
workspace_name = re.sub(r'(\.ocrd)?\.zip$', '', basename(src))
new_dest = join(dest, workspace_name)
if exists(new_dest):
raise Exception("Directory exists: %s" % new_dest)
dest = new_dest
log.info("Spilling %s to %s", src, dest)
bagdir = mkdtemp(prefix=TMP_BAGIT_PREFIX)
unzip_file_to_dir(src, bagdir)
datadir = join(bagdir, 'data')
for root, _, files in walk(datadir):
for f in files:
srcfile = join(root, f)
destdir = join(dest, relpath(root, datadir))
destfile = join(destdir, f)
if not exists(destdir):
makedirs(destdir)
log.debug("Copy %s -> %s", srcfile, destfile)
copyfile(srcfile, destfile)
# TODO copy allowed tag files if present
# TODO validate bagit
# Drop tempdir
rmtree(bagdir)
# Create workspace
workspace = Workspace(self.resolver, directory=dest)
# TODO validate workspace
return workspace
def validate(self, bag):
"""
Validate conformance with BagIt and OCR-D bagit profile.
See:
- https://ocr-d.github.io/ocrd_zip
- https://ocr-d.github.io/bagit-profile.json
- https://ocr-d.github.io/bagit-profile.yml
"""
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
28686,
1330,
285,
4335,
17062,
11,
442,
15908,
11,
2513,
198,
6738,
28686,
13,
6978,
1330,
4654,
11,
318,
15908,
11,
1615,
12453,
11,
7160,
11,
823,
6978,
198,
6738,
4423,
346,
1330,
787,... | 2.163318 | 2,737 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019~2999 - Cologler <skyoflw@gmail.com>
# ----------
#
# ----------
import datetime
from collections import Mapping
from click import get_current_context
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
66,
8,
13130,
93,
1959,
2079,
532,
327,
928,
1754,
1279,
15688,
1659,
75,
86,
31,
14816,
13,
785,
29,
198,
2,
24200,
438,
198,
2,
198,
2,
... | 2.76 | 75 |
x=1
while x>0:
Seconds = input ('Please enter any amount of seconds')
Y=Seconds/60.0000/60/24/365
if Y>100:
print "No the baby won't live"
else:
print "Yes the baby will live"
print "There are %d years in %d Seconds" % (Y, Seconds)
name = input ('')
| [
87,
28,
16,
198,
4514,
2124,
29,
15,
25,
198,
220,
220,
220,
40876,
796,
5128,
19203,
5492,
3802,
597,
2033,
286,
4201,
11537,
198,
220,
220,
220,
575,
28,
12211,
82,
14,
1899,
13,
2388,
14,
1899,
14,
1731,
14,
24760,
198,
220,
... | 2.357724 | 123 |
from __future__ import absolute_import, division, print_function
from six.moves import range
'''
Standard command-line parser for CCTBX programs
The CCTBXParser class will read files and process PHIL parameters from the
command-line as well as have standard command-line flags for showing the
PHIL scope and citations for a program.
'''
import argparse, getpass, logging, os, sys, time
import iotbx.phil
import libtbx.phil
from iotbx.data_manager import DataManager, data_manager_type
from iotbx.file_reader import any_file
from libtbx import citations
from libtbx.program_template import ProgramTemplate
from libtbx.str_utils import wordwrap
from libtbx.utils import multi_out, show_times, Sorry
# =============================================================================
def run_program(program_class=None, custom_process_arguments=None,
args=None, logger=None):
'''
Function for running programs using CCTBXParser and the program template
:param program_class: ProgramTemplate type (required)
:param custom_process_arguments:
Custom function to parse unknown arguments (optional)
:param args: list of command-line arguments (optional)
:param logger: logger (e.g. multi_out) for output (optional)
:rtype: whatever is returned from program_class.get_results()
'''
assert (program_class is not None)
if (args is None):
args = sys.argv[1:]
# create logger
if (logger is None):
logger = multi_out()
logger.register('stdout', sys.stdout)
# start timer
t = show_times(out=logger)
# create parser
parser = CCTBXParser(program_class=program_class,
custom_process_arguments=custom_process_arguments,
logger=logger)
namespace = parser.parse_args(args)
# start program
print('Starting job', file=logger)
print('='*79, file=logger)
task = program_class(parser.data_manager, parser.working_phil.extract(),
master_phil=parser.master_phil,
logger=logger)
# custom constructor (optional)
task.custom_init()
# validate inputs
task.validate()
# run program
task.run()
# clean up (optional)
task.clean_up()
# stop timer
print('', file=logger)
print('='*79, file=logger)
print('Job complete', file=logger)
t()
return task.get_results()
# =============================================================================
# =============================================================================
class ParsePositionalArgumentsAction(argparse.Action):
'''
This action is a first pass for command-line arguments. It does basic checks
to see if an argument is a file, a directory, or a phil parameter (contains
an equals sign). Command-line switches (options beginning with "-") are
handled by default actions in the parser
'''
# =============================================================================
# =============================================================================
# end
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
6738,
2237,
13,
76,
5241,
1330,
2837,
198,
198,
7061,
6,
198,
23615,
3141,
12,
1370,
30751,
329,
327,
4177,
33,
55,
4056,
198,
198,
464,
327,
4177,
... | 3.219769 | 951 |
from collections import Iterable
import numpy as np
import litenn as nn
import litenn.core as nc
def resize2D_bilinear(input_t, size_or_output_hw):
"""
resize2D_bilinear operator
arguments
size_or_output_hw int
float
Iterable of height,weight
"""
N,C,H,W = input_t.shape
if isinstance(size_or_output_hw, Iterable):
OH, OW = int(size_or_output_hw[0]), int(size_or_output_hw[1])
elif isinstance(size_or_output_hw, (int, float)):
OH = int(H * size_or_output_hw)
OW = int(W * size_or_output_hw)
else:
raise ValueError(f'Unknown type of size_or_output_hw : {size_or_output_hw.__class__.__name__}')
OH = max(1, OH)
OW = max(1, OW)
coords_shape = nc.TensorShape( (OH,OW,2) )
coords_t = nn.Tensor( coords_shape, nn.initializer.CoordsArange(0, H-1, 0, W-1) )
output_t = nn.spatial_transform2D(input_t, coords_t, grad_to_coords=False)
return output_t
| [
6738,
17268,
1330,
40806,
540,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
6578,
1697,
355,
299,
77,
198,
11748,
6578,
1697,
13,
7295,
355,
299,
66,
198,
198,
4299,
47558,
17,
35,
62,
33473,
259,
451,
7,
15414,
62,
83,
11,
2546,... | 2.005871 | 511 |
from .basic import *
from .Feature import *
from .builtin_features import *
feature = Feature | [
6738,
764,
35487,
1330,
1635,
198,
6738,
764,
38816,
1330,
1635,
198,
6738,
764,
18780,
259,
62,
40890,
1330,
1635,
198,
30053,
796,
27018
] | 3.875 | 24 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'languages',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:promise_resolver',
'<(EXTERNS_GYP):chrome_send',
'<(EXTERNS_GYP):input_method_private',
'<(EXTERNS_GYP):language_settings_private',
'<(INTERFACES_GYP):input_method_private_interface',
'<(INTERFACES_GYP):language_settings_private_interface',
'../prefs/compiled_resources2.gyp:prefs_types',
'../prefs/compiled_resources2.gyp:prefs',
'languages_types',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'languages_page',
'dependencies': [
'../compiled_resources2.gyp:lifetime_browser_proxy',
'../compiled_resources2.gyp:route',
'../settings_page/compiled_resources2.gyp:settings_animated_pages',
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/paper-checkbox/compiled_resources2.gyp:paper-checkbox-extracted',
'<(DEPTH)/ui/webui/resources/cr_elements/cr_action_menu/compiled_resources2.gyp:cr_action_menu',
'<(DEPTH)/ui/webui/resources/cr_elements/cr_expand_button/compiled_resources2.gyp:cr_expand_button',
'<(DEPTH)/ui/webui/resources/cr_elements/cr_lazy_render/compiled_resources2.gyp:cr_lazy_render',
'<(DEPTH)/ui/webui/resources/js/chromeos/compiled_resources2.gyp:ui_account_tweaks',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:load_time_data',
'languages',
'languages_types',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'languages_types',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
'<(EXTERNS_GYP):language_settings_private',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'manage_input_methods_page',
'dependencies': [
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/paper-checkbox/compiled_resources2.gyp:paper-checkbox-extracted',
'<(EXTERNS_GYP):language_settings_private',
'../prefs/compiled_resources2.gyp:prefs',
'languages',
'languages_types',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'add_languages_dialog',
'dependencies': [
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/paper-checkbox/compiled_resources2.gyp:paper-checkbox-extracted',
'languages',
'languages_types',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
],
}
| [
2,
15069,
1584,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
90,
198,
... | 2.201224 | 1,471 |
# This is not a valid Python module - Don't run it.
>>> _ = list
>>> test = [2, 5, 8, 0, 0, 1, 0]
>>> _(filter(None, test))
[2, 5, 8, 1]
>>> _(filter(lambda x: x, test)) # equivalent to previous one
[2, 5, 8, 1]
>>> _(filter(lambda x: x > 4, test)) # keep only items > 4
[5, 8]
| [
2,
770,
318,
407,
257,
4938,
11361,
8265,
532,
2094,
470,
1057,
340,
13,
198,
198,
33409,
4808,
796,
1351,
198,
33409,
1332,
796,
685,
17,
11,
642,
11,
807,
11,
657,
11,
657,
11,
352,
11,
657,
60,
198,
33409,
4808,
7,
24455,
7,
... | 2.401709 | 117 |
import feedparser
import listparser
import argparse
import time
import datetime
if __name__ == "__main__":
main()
| [
11748,
3745,
48610,
198,
11748,
1351,
48610,
198,
11748,
1822,
29572,
198,
11748,
640,
198,
11748,
4818,
8079,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 3.216216 | 37 |
# -*- coding: utf-8
import unittest
import time
import datetime
import math
from mock import patch, Mock
from blinds_lib import Blind
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
198,
11748,
555,
715,
395,
198,
11748,
640,
198,
11748,
4818,
8079,
198,
11748,
10688,
198,
6738,
15290,
1330,
8529,
11,
44123,
198,
6738,
7770,
82,
62,
8019,
1330,
24507,
198,
198,
361,
... | 2.95082 | 61 |
import binascii
import os
from textwrap import dedent
from pyramid.compat import native_
from pyramid.scaffolds.template import Template # API
class PyramidTemplate(Template):
"""
A class that can be used as a base class for Pyramid scaffolding
templates.
"""
def pre(self, command, output_dir, vars):
""" Overrides :meth:`pyramid.scaffolds.template.Template.pre`, adding
several variables to the default variables list (including
``random_string``, and ``package_logger``). It also prevents common
misnamings (such as naming a package "site" or naming a package
logger "root".
"""
vars['random_string'] = native_(binascii.hexlify(os.urandom(20)))
package_logger = vars['package']
if package_logger == 'root':
# Rename the app logger in the rare case a project is named 'root'
package_logger = 'app'
vars['package_logger'] = package_logger
return Template.pre(self, command, output_dir, vars)
def post(self, command, output_dir, vars): # pragma: no cover
""" Overrides :meth:`pyramid.scaffolds.template.Template.post`, to
print "Welcome to Pyramid. Sorry for the convenience." after a
successful scaffolding rendering."""
separator = "=" * 79
msg = dedent(
"""
%(separator)s
Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials/en/latest/
Documentation: http://docs.pylonsproject.org/projects/pyramid/en/latest/
Twitter: https://twitter.com/PylonsProject
Mailing List: https://groups.google.com/forum/#!forum/pylons-discuss
Welcome to Pyramid. Sorry for the convenience.
%(separator)s
""" % {'separator': separator})
self.out(msg)
return Template.post(self, command, output_dir, vars)
| [
11748,
9874,
292,
979,
72,
198,
11748,
28686,
198,
6738,
2420,
37150,
1330,
4648,
298,
198,
198,
6738,
27944,
13,
5589,
265,
1330,
6868,
62,
198,
198,
6738,
27944,
13,
1416,
2001,
10119,
13,
28243,
1330,
37350,
220,
1303,
7824,
198,
1... | 2.477419 | 775 |
#!/usr/bin/env python2.7
import numpy as np
import matplotlib.pyplot as plt
Freq=np.array([30,40,45,50,55,60,65,70,80,90,95,100,105,110,125,130,140,145,150,160,170,180,190,195,200,210,220,230,240,245,250,260,270,280,290,295,300,305,310,320,325,330,335,340,345,350,355,360])
Db=np.array([70,77,86.7,97.2,90.4,87.7,87.2,88,91.9,99.9,106.8,107.4,102.8,100.4,100.2,100.3,111.9,116.2,113.4,108.2,108.4,111.8,118.7,119.5,117.2,113.8,114.8,118.1,121.7,121.6,121,119.9,120.4,121.1,122.5,123.5,124.4,124.7,124.1,121,120,119.7,120.2,122.1,126.2,126.9,126.5,123.3])
plt.xlabel('Frecuencia')
plt.ylabel('Decibel')
plt.title('DecibelvsFreq a 0.3volts PEGADO')
#for i in range(len(Freq)):
# plt.text(Freq[i],Db[i], r'$Freq=%f, \ Db=%f$' % (Freq[i], Db[i]))
plt.axis([0, 370, 50, 130])
plt.plot(Freq,Db,'bo',Freq,Db,'k')
plt.grid(True)
plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
197,
29412,
17,
13,
22,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
20366,
80,
28,
37659,
13,
18747,
26933,
1270,
11,
1821,
11,
22... | 1.755789 | 475 |
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import mem_snapshots
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
from nova import exception
from nova.policies import mem_snapshots as ms_policies
#from ics_sdk import manager
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
ALIAS = "memsnapshots"
class MemSnapshots(extensions.V21APIExtensionBase):
"""Memory snapshots support."""
name = "MemSnapshots"
alias = ALIAS
version = 1
| [
2,
15069,
2813,
4946,
25896,
5693,
198,
2,
15069,
2211,
19764,
11421,
13,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
407,
779,
428,
23... | 3.294118 | 408 |
# Copyright 2014 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| [
2,
15069,
1946,
657,
25306,
486,
2154,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,... | 3.813333 | 150 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Invoice) on 2019-07-29.
# 2019, SMART Health IT.
import sys
from dataclasses import dataclass
from typing import ClassVar, Optional, List
from .fhirabstractbase import empty_list
from .annotation import Annotation
from .backboneelement import BackboneElement
from .codeableconcept import CodeableConcept
from .domainresource import DomainResource
from .fhirdate import FHIRDate
from .fhirreference import FHIRReference
from .identifier import Identifier
from .money import Money
@dataclass
class InvoiceLineItemPriceComponent(BackboneElement):
""" Components of total line item price.
The price for a ChargeItem may be calculated as a base price with
surcharges/deductions that apply in certain conditions. A
ChargeItemDefinition resource that defines the prices, factors and
conditions that apply to a billing code is currently under development. The
priceComponent element can be used to offer transparency to the recipient
of the Invoice as to how the prices have been calculated.
"""
resource_type: ClassVar[str] = "InvoiceLineItemPriceComponent"
type: str = None
code: Optional[CodeableConcept] = None
factor: Optional[float] = None
amount: Optional[Money] = None
@dataclass
class InvoiceParticipant(BackboneElement):
""" Participant in creation of this Invoice.
Indicates who or what performed or participated in the charged service.
"""
resource_type: ClassVar[str] = "InvoiceParticipant"
role: Optional[CodeableConcept] = None
actor: FHIRReference = None
@dataclass
class InvoiceLineItem(BackboneElement):
""" Line items of this Invoice.
Each line item represents one charge for goods and services rendered.
Details such as date, code and amount are found in the referenced
ChargeItem resource.
"""
resource_type: ClassVar[str] = "InvoiceLineItem"
sequence: Optional[int] = None
chargeItemReference: FHIRReference = None
chargeItemCodeableConcept: CodeableConcept = None
priceComponent: Optional[List[InvoiceLineItemPriceComponent]] = empty_list()
@dataclass
class Invoice(DomainResource):
""" Invoice containing ChargeItems from an Account.
Invoice containing collected ChargeItems from an Account with calculated
individual and total price for Billing purpose.
"""
resource_type: ClassVar[str] = "Invoice"
identifier: Optional[List[Identifier]] = empty_list()
status: str = None
cancelledReason: Optional[str] = None
type: Optional[CodeableConcept] = None
subject: Optional[FHIRReference] = None
recipient: Optional[FHIRReference] = None
date: Optional[FHIRDate] = None
participant: Optional[List[InvoiceParticipant]] = empty_list()
issuer: Optional[FHIRReference] = None
account: Optional[FHIRReference] = None
lineItem: Optional[List[InvoiceLineItem]] = empty_list()
totalPriceComponent: Optional[List[InvoiceLineItemPriceComponent]] = empty_list()
totalNet: Optional[Money] = None
totalGross: Optional[Money] = None
paymentTerms: Optional[str] = None
note: Optional[List[Annotation]] = empty_list() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
220,
2980,
515,
422,
376,
39,
4663,
604,
13,
15,
13,
15,
12,
64,
4310,
721,
21,
1453,
16,
65,
357,
4... | 3.300203 | 986 |
import unittest
from checkov.terraform.context_parsers.registry import parser_registry
from tests.terraform.context_parsers.mock_context_parser import MockContextParser
import os
mock_definition = (os.path.dirname(os.path.realpath(__file__)) + '/mock_tf_files/mock.tf', {'mock': [
{
'mock_type': {
'mock_name': {
'value': [
'mock_value']}}}
]})
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
2198,
709,
13,
353,
430,
687,
13,
22866,
62,
79,
945,
364,
13,
2301,
4592,
1330,
30751,
62,
2301,
4592,
198,
6738,
5254,
13,
353,
430,
687,
13,
22866,
62,
79,
945,
364,
13,
76,
735,
62,
2286... | 2.14554 | 213 |
# -*- coding: utf-8 -*-
"""Provides regridding for irregular grids."""
import ESMF
import iris
import numpy as np
from ._mapping import get_empty_data, map_slices, ref_to_dims_index
ESMF_MANAGER = ESMF.Manager(debug=False)
ESMF_LON, ESMF_LAT = 0, 1
ESMF_REGRID_METHODS = {
'linear': ESMF.RegridMethod.BILINEAR,
'area_weighted': ESMF.RegridMethod.CONSERVE,
'nearest': ESMF.RegridMethod.NEAREST_STOD,
}
MASK_REGRIDDING_MASK_VALUE = {
ESMF.RegridMethod.BILINEAR: np.array([1]),
ESMF.RegridMethod.CONSERVE: np.array([1]),
ESMF.RegridMethod.NEAREST_STOD: np.array([]),
}
# ESMF_REGRID_METHODS = {
# 'bilinear': ESMF.RegridMethod.BILINEAR,
# 'patch': ESMF.RegridMethod.PATCH,
# 'conserve': ESMF.RegridMethod.CONSERVE,
# 'nearest_stod': ESMF.RegridMethod.NEAREST_STOD,
# 'nearest_dtos': ESMF.RegridMethod.NEAREST_DTOS,
# }
def cf_2d_bounds_to_esmpy_corners(bounds, circular):
"""Convert cf style 2d bounds to normal (esmpy style) corners."""
no_lat_points, no_lon_points = bounds.shape[:2]
no_lat_bounds = no_lat_points + 1
if circular:
no_lon_bounds = no_lon_points
else:
no_lon_bounds = no_lon_points + 1
esmpy_corners = np.empty((no_lon_bounds, no_lat_bounds))
esmpy_corners[:no_lon_points, :no_lat_points] = bounds[:, :, 0].T
esmpy_corners[:no_lon_points, no_lat_points:] = bounds[-1:, :, 3].T
esmpy_corners[no_lon_points:, :no_lat_points] = bounds[:, -1:, 1].T
esmpy_corners[no_lon_points:, no_lat_points:] = bounds[-1:, -1:, 2].T
return esmpy_corners
def coords_iris_to_esmpy(lat, lon, circular):
"""Build ESMF compatible coordinate information from iris coords."""
dim = lat.ndim
if lon.ndim != dim:
msg = 'Different dimensions in latitude({}) and longitude({}) coords.'
raise ValueError(msg.format(lat.ndim, lon.ndim))
if dim == 1:
for coord in [lat, lon]:
if not coord.has_bounds():
coord.guess_bounds()
esmpy_lat, esmpy_lon = np.meshgrid(lat.points, lon.points)
lat_corners = np.concatenate([lat.bounds[:, 0], lat.bounds[-1:, 1]])
if circular:
lon_corners = lon.bounds[:, 0]
else:
lon_corners = np.concatenate([lon.bounds[:, 0],
lon.bounds[-1:, 1]])
esmpy_lat_corners, esmpy_lon_corners = np.meshgrid(lat_corners,
lon_corners)
elif dim == 2:
esmpy_lat, esmpy_lon = lat.points.T.copy(), lon.points.T.copy()
esmpy_lat_corners = cf_2d_bounds_to_esmpy_corners(lat.bounds, circular)
esmpy_lon_corners = cf_2d_bounds_to_esmpy_corners(lon.bounds, circular)
else:
raise NotImplementedError('Coord dimension is {}. Expected 1 or 2.'
''.format(dim))
return esmpy_lat, esmpy_lon, esmpy_lat_corners, esmpy_lon_corners
def get_grid(esmpy_lat, esmpy_lon,
esmpy_lat_corners, esmpy_lon_corners, circular):
"""Build EMSF grid from given coordinate information."""
if circular:
num_peri_dims = 1
else:
num_peri_dims = 0
grid = ESMF.Grid(np.array(esmpy_lat.shape),
num_peri_dims=num_peri_dims,
staggerloc=[ESMF.StaggerLoc.CENTER])
grid.get_coords(ESMF_LON)[...] = esmpy_lon
grid.get_coords(ESMF_LAT)[...] = esmpy_lat
grid.add_coords([ESMF.StaggerLoc.CORNER])
grid_lon_corners = grid.get_coords(ESMF_LON,
staggerloc=ESMF.StaggerLoc.CORNER)
grid_lat_corners = grid.get_coords(ESMF_LAT,
staggerloc=ESMF.StaggerLoc.CORNER)
grid_lon_corners[...] = esmpy_lon_corners
grid_lat_corners[...] = esmpy_lat_corners
grid.add_item(ESMF.GridItem.MASK, ESMF.StaggerLoc.CENTER)
return grid
def is_lon_circular(lon):
"""Determine if longitudes are circular."""
if isinstance(lon, iris.coords.DimCoord):
circular = lon.circular
elif isinstance(lon, iris.coords.AuxCoord):
if lon.ndim == 1:
seam = lon.bounds[-1, 1] - lon.bounds[0, 0]
elif lon.ndim == 2:
seam = (lon.bounds[1:-1, -1, (1, 2)]
- lon.bounds[1:-1, 0, (0, 3)])
else:
raise NotImplementedError('AuxCoord longitude is higher '
'dimensional than 2d. Giving up.')
circular = np.alltrue(abs(seam) % 360. < 1.e-3)
else:
raise ValueError('longitude is neither DimCoord nor AuxCoord. '
'Giving up.')
return circular
def cube_to_empty_field(cube):
"""Build an empty ESMF field from a cube."""
lat = cube.coord('latitude')
lon = cube.coord('longitude')
circular = is_lon_circular(lon)
esmpy_coords = coords_iris_to_esmpy(lat, lon, circular)
grid = get_grid(*esmpy_coords, circular=circular)
field = ESMF.Field(grid,
name=cube.long_name,
staggerloc=ESMF.StaggerLoc.CENTER)
return field
def get_representant(cube, ref_to_slice):
"""Get a representative slice from a cube."""
slice_dims = ref_to_dims_index(cube, ref_to_slice)
rep_ind = [0] * cube.ndim
for dim in slice_dims:
rep_ind[dim] = slice(None, None)
rep_ind = tuple(rep_ind)
return cube[rep_ind]
def build_regridder_2d(src_rep, dst_rep, regrid_method, mask_threshold):
"""Build regridder for 2d regridding."""
dst_field = cube_to_empty_field(dst_rep)
src_field = cube_to_empty_field(src_rep)
regridding_arguments = {
'srcfield': src_field,
'dstfield': dst_field,
'regrid_method': regrid_method,
'unmapped_action': ESMF.UnmappedAction.IGNORE,
'ignore_degenerate': True,
}
if np.ma.is_masked(src_rep.data):
src_field.data[...] = ~src_rep.data.mask.T
src_mask = src_field.grid.get_item(ESMF.GridItem.MASK,
ESMF.StaggerLoc.CENTER)
src_mask[...] = src_rep.data.mask.T
center_mask = dst_field.grid.get_item(ESMF.GridItem.MASK,
ESMF.StaggerLoc.CENTER)
center_mask[...] = 0
mask_regridder = ESMF.Regrid(
src_mask_values=MASK_REGRIDDING_MASK_VALUE[regrid_method],
dst_mask_values=np.array([]),
**regridding_arguments)
regr_field = mask_regridder(src_field, dst_field)
dst_mask = regr_field.data[...].T < mask_threshold
center_mask[...] = dst_mask.T
else:
dst_mask = False
field_regridder = ESMF.Regrid(src_mask_values=np.array([1]),
dst_mask_values=np.array([1]),
**regridding_arguments)
def regridder(src):
"""Regrid 2d for irregular grids."""
res = get_empty_data(dst_rep.shape, src.dtype)
data = src.data
if np.ma.is_masked(data):
data = data.data
src_field.data[...] = data.T
regr_field = field_regridder(src_field, dst_field)
res.data[...] = regr_field.data[...].T
res.mask[...] = dst_mask
return res
return regridder
def build_regridder_3d(src_rep, dst_rep, regrid_method, mask_threshold):
# pylint: disable=too-many-locals
# The necessary refactoring will be done for the full 3d regridding.
"""Build regridder for 2.5d regridding."""
esmf_regridders = []
no_levels = src_rep.shape[0]
for level in range(no_levels):
esmf_regridders.append(
build_regridder_2d(src_rep[level], dst_rep[level],
regrid_method, mask_threshold)
)
def regridder(src):
"""Regrid 2.5d for irregular grids."""
res = get_empty_data(dst_rep.shape, src.dtype)
for i, esmf_regridder in enumerate(esmf_regridders):
res[i, ...] = esmf_regridder(src[i])
return res
return regridder
def build_regridder(src_rep, dst_rep, method, mask_threshold=.99):
"""Build regridders from representants."""
regrid_method = ESMF_REGRID_METHODS[method]
if src_rep.ndim == 2:
regridder = build_regridder_2d(src_rep, dst_rep,
regrid_method, mask_threshold)
elif src_rep.ndim == 3:
regridder = build_regridder_3d(src_rep, dst_rep,
regrid_method, mask_threshold)
return regridder
def get_grid_representant(cube, horizontal_only=False):
"""Extract the spatial grid from a cube."""
horizontal_slice = ['latitude', 'longitude']
ref_to_slice = horizontal_slice
if not horizontal_only:
try:
cube_z_coord = cube.coord(axis='Z')
n_zdims = len(cube.coord_dims(cube_z_coord))
if n_zdims == 0:
# scalar z coordinate, go on with 2d regridding
pass
elif n_zdims == 1:
ref_to_slice = [cube_z_coord] + horizontal_slice
else:
raise ValueError("Cube has multidimensional Z coordinate.")
except iris.exceptions.CoordinateNotFoundError:
# no z coordinate, go on with 2d regridding
pass
return get_representant(cube, ref_to_slice)
def get_grid_representants(src, dst):
"""
Construct cubes representing the source and destination grid.
This method constructs two new cubes that representant the grids,
i.e. the spatial dimensions of the given cubes.
Parameters
----------
src: :class:`iris.cube.Cube`
Cube to be regridded. Typically a time series of 2d or 3d slices.
dst: :class:`iris.cube.Cube`
Cube defining the destination grid. Usually just a 2d or 3d cube.
Returns
-------
tuple of :class:`iris.cube.Cube`:
A tuple containing two cubes, representing the source grid and the
destination grid, respectively.
"""
src_rep = get_grid_representant(src)
dst_horiz_rep = get_grid_representant(dst, horizontal_only=True)
if src_rep.ndim == 3:
dst_shape = (src_rep.shape[0],)
dim_coords = [src_rep.coord(dimensions=[0], dim_coords=True)]
else:
dst_shape = tuple()
dim_coords = []
dst_shape += dst_horiz_rep.shape
dim_coords += dst_horiz_rep.coords(dim_coords=True)
dim_coords_and_dims = [(c, i) for i, c in enumerate(dim_coords)]
dst_rep = iris.cube.Cube(
data=get_empty_data(dst_shape, src.dtype),
standard_name=src.standard_name,
long_name=src.long_name,
var_name=src.var_name,
units=src.units,
attributes=src.attributes,
cell_methods=src.cell_methods,
dim_coords_and_dims=dim_coords_and_dims,
)
return src_rep, dst_rep
def regrid(src, dst, method='linear'):
"""
Regrid src_cube to the grid defined by dst_cube.
Regrid the data in src_cube onto the grid defined by dst_cube.
Parameters
----------
src_cube: :class:`iris.cube.Cube`
Source data. Must have latitude and longitude coords.
These can be 1d or 2d and should have bounds.
dst_cube: :class:`iris.cube.Cube`
Defines the target grid.
regrid_method:
Selects the regridding method.
Can be 'linear', 'area_weighted',
or 'nearest'. See ESMPy_.
Returns
-------
:class:`iris.cube.Cube`:
The regridded cube.
.. _ESMPy: http://www.earthsystemmodeling.org/
esmf_releases/non_public/ESMF_7_0_0/esmpy_doc/html/
RegridMethod.html#ESMF.api.constants.RegridMethod
"""
src_rep, dst_rep = get_grid_representants(src, dst)
regridder = build_regridder(src_rep, dst_rep, method)
res = map_slices(src, regridder, src_rep, dst_rep)
return res
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
15946,
1460,
842,
81,
13494,
329,
21388,
50000,
526,
15931,
198,
198,
11748,
412,
12310,
37,
198,
11748,
4173,
271,
198,
11748,
299,
32152,
355,
45941,
198,
198,
... | 2.051411 | 5,777 |
import datetime
import logging
from django import test
from services.common import misc, helpers as db_tools
from services.configuration.jrpc.serializers import channels as \
channel_serializers
from services.configuration.jrpc.views import rules as jrpc_rules
from services.configuration.jrpc.views.channels import \
groundstations as jrpc_gs_chs
from services.configuration.jrpc.views.channels import \
spacecraft as jrpc_sc_chs
from services.scheduling.jrpc.views.operational import \
groundstations as jrpc_gs_scheduling
from services.scheduling.jrpc.views.operational import \
spacecraft as jrpc_sc_scheduling
from services.scheduling.models import operational
"""
Copyright 2013, 2014 Ricardo Tubio-Pardavila
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = 'rtubiopa@calpoly.edu'
class JRPCBookingProcessTest(test.TestCase):
"""Testing class for the booking process.
This class tests completely the booking process in which a GroundStation
operator and a Spacecraft operator collaborate through the <Scheduling>
service for arranging the remote operation of the Spacecraft.
"""
def setUp(self):
"""
This method populates the database with some information to be used
only for this test.
"""
self.__verbose_testing = False
if not self.__verbose_testing:
logging.getLogger('common').setLevel(level=logging.CRITICAL)
logging.getLogger('configuration').setLevel(level=logging.CRITICAL)
logging.getLogger('scheduling').setLevel(level=logging.CRITICAL)
operational.OperationalSlot.objects.set_debug()
self.__sc_1_id = 'xatcobeo-sc'
self.__sc_1_tle_id = 'HUMSAT-D'
self.__sc_1_ch_1_id = 'xatcobeo-fm'
self.__sc_1_ch_1_cfg = {
channel_serializers.FREQUENCY_K: '437000000',
channel_serializers.MODULATION_K: 'FM',
channel_serializers.POLARIZATION_K: 'LHCP',
channel_serializers.BITRATE_K: '300',
channel_serializers.BANDWIDTH_K: '12.500000000'
}
self.__gs_1_id = 'gs-la'
self.__gs_1_ch_1_id = 'gs-la-fm'
self.__gs_1_ch_1_cfg = {
channel_serializers.BAND_K:
'UHF / U / 435000000.000000 / 438000000.000000',
channel_serializers.AUTOMATED_K: False,
channel_serializers.MODULATIONS_K: ['FM'],
channel_serializers.POLARIZATIONS_K: ['LHCP'],
channel_serializers.BITRATES_K: [300, 600, 900],
channel_serializers.BANDWIDTHS_K: [12.500000000, 25.000000000]
}
self.__gs_1_ch_2_id = 'gs-la-fm-2'
self.__gs_1_ch_2_cfg = {
channel_serializers.BAND_K:
'UHF / U / 435000000.000000 / 438000000.000000',
channel_serializers.AUTOMATED_K: False,
channel_serializers.MODULATIONS_K: ['FM'],
channel_serializers.POLARIZATIONS_K: ['LHCP'],
channel_serializers.BITRATES_K: [300, 600, 900],
channel_serializers.BANDWIDTHS_K: [12.500000000, 25.000000000]
}
self.__band = db_tools.create_band()
self.__user_profile = db_tools.create_user_profile()
self.__sc_1 = db_tools.create_sc(
user_profile=self.__user_profile,
identifier=self.__sc_1_id,
tle_id=self.__sc_1_tle_id,
)
self.__gs_1 = db_tools.create_gs(
user_profile=self.__user_profile, identifier=self.__gs_1_id,
)
self.assertEqual(
jrpc_gs_chs.gs_channel_create(
groundstation_id=self.__gs_1_id,
channel_id=self.__gs_1_ch_1_id,
configuration=self.__gs_1_ch_1_cfg
), True, 'Channel should have been created!'
)
self.assertRaises(
Exception,
jrpc_gs_scheduling.get_operational_slots,
self.__gs_1_ch_1_id
)
# 3) basic test, should generate 2 FREE slots
self.assertEqual(
jrpc_sc_chs.sc_channel_create(
spacecraft_id=self.__sc_1_id,
channel_id=self.__sc_1_ch_1_id,
configuration=self.__sc_1_ch_1_cfg
), True, 'Channel should have been created!'
)
# 4) we add a daily rule 12 hours, 00:00:01am to 11:59:59pm UTC
# all pass slots should became operational slots.
self.__rule_1 = jrpc_rules.add_rule(
self.__gs_1_id,
db_tools.create_jrpc_daily_rule(
date_i=misc.get_today_utc(),
date_f=misc.get_today_utc() + datetime.timedelta(days=50),
starting_time=misc.get_next_midnight() + datetime.timedelta(
seconds=1
),
ending_time=misc.get_next_midnight() + datetime.timedelta(
hours=23, minutes=59, seconds=59
)
)
)
def test_1_booking(self):
"""Basic booking test.
This test should validate the basic booking process of remote
operations, which involve:
* Spacecraft operators SELECT slots (remote operation request).
* GroundStation operatotrs CONFIRM the selection of the slots (remote
operation is RESERVED).
* Spacecraft operators and GroundStation operators can retrieve this
final status of the slots through the 'getChanges' method.
"""
if self.__verbose_testing:
print('##### test_1_booking')
selection_1 = [
int(x.identifier) for x in
operational.OperationalSlot.objects.filter(
state=operational.STATE_FREE
).order_by('id')[:3]
]
# 0) Spacecraft operators selected a set of slots...
sc_s_slots = jrpc_sc_scheduling.select_slots(
self.__sc_1_id, selection_1
)
self.assertEqual(
[int(x['identifier']) for x in sc_s_slots], selection_1
)
# 1) GroundStation operators confirm the selected slots...
gs_c_slots = jrpc_gs_scheduling.confirm_selections(
self.__gs_1_id, selection_1
)
self.assertEqual(
[int(x['identifier']) for x in gs_c_slots], selection_1
)
# 5) GroundStation operators cancel the selected slots...
jrpc_gs_scheduling.cancel_reservations(self.__gs_1_id, selection_1)
# 5.a) No canceled Operational Slots
self.assertEqual(
[
x.identifier
for x in operational.OperationalSlot.objects.filter(
state=operational.STATE_CANCELED
)
],
[]
)
# 5.b) No selected Operational Slots
self.assertEqual(
[
x.identifier
for x in operational.OperationalSlot.objects.filter(
state=operational.STATE_SELECTED
)
],
[]
)
# 7) SpacecraftOperator retries the selection...
sc_s_slots = jrpc_sc_scheduling.select_slots(
self.__sc_1_id, selection_1
)
self.assertEqual(
[int(x['identifier']) for x in sc_s_slots], selection_1
)
# 8) GroundStation operator denies the selection...
gs_d_slots = jrpc_gs_scheduling.deny_selections(
self.__gs_1_id, selection_1
)
self.assertEqual(
[int(x['identifier']) for x in gs_d_slots], selection_1
)
# 5.a) No canceled Operational Slots
self.assertEqual(
[
x.identifier
for x in operational.OperationalSlot.objects.filter(
state=operational.STATE_CANCELED
)
],
[]
)
# 5.b) No selected Operational Slots
self.assertEqual(
[
x.identifier
for x in operational.OperationalSlot.objects.filter(
state=operational.STATE_SELECTED
)
],
[]
)
# ### clean up sc/gs
self.assertTrue(
jrpc_gs_chs.gs_channel_delete(
groundstation_id=self.__gs_1_id, channel_id=self.__gs_1_ch_1_id
),
'Could not delete GroundStationChannel = ' + str(
self.__gs_1_ch_1_id
)
)
self.assertTrue(
jrpc_sc_chs.sc_channel_delete(
spacecraft_id=self.__sc_1_id, channel_id=self.__sc_1_ch_1_id
),
'Could not delete SpacecraftChannel = ' + str(self.__sc_1_ch_1_id)
)
| [
198,
11748,
4818,
8079,
198,
11748,
18931,
198,
6738,
42625,
14208,
1330,
1332,
198,
198,
6738,
2594,
13,
11321,
1330,
12747,
11,
49385,
355,
20613,
62,
31391,
198,
6738,
2594,
13,
11250,
3924,
13,
73,
81,
14751,
13,
46911,
11341,
1330,... | 2.049035 | 4,507 |
from graphene import relay, ObjectType
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from .models import Colaborador
| [
6738,
42463,
1330,
24248,
11,
9515,
6030,
198,
6738,
42463,
62,
28241,
14208,
1330,
37770,
10267,
6030,
198,
6738,
42463,
62,
28241,
14208,
13,
24455,
1330,
37770,
22417,
32048,
15878,
198,
198,
6738,
764,
27530,
1330,
1623,
4820,
7079,
6... | 4.380952 | 42 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-29 19:15
from __future__ import unicode_literals
from django.db import migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
20,
319,
1584,
12,
3023,
12,
1959,
678,
25,
1314,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
19... | 2.672727 | 55 |
'''
Transforming extracted data for application of Zeilis et al.'s algorithm
Usage: python3 02_zeilis_01_extraction.py
'''
import os
import numpy as np
import pandas as pd
import constants
# Constants
NUMBER_OF_DIMENSIONS = constants.NUMBER_OF_DIMENSIONS
RESAMPLE_TIME_WINDOW = "M"
DIMENSION_NAMES = constants.DIMENSION_NAMES
EVENTS_TO_BURN_IN = constants.EVENTS_TO_BURN_IN
# prepare time series csv
all_datasets = os.listdir(constants.PATH_TO_DESTINATION_DATASET)
too_small_datasets = ["sitecore", "esperanto", "ai", "monero"]
selected_datasets = list(set(all_datasets) - set(too_small_datasets))
age_in_days = 36 * 30 # a little less than 3 years
__prepare_time_series_csv(selected_datasets, age_in_days)
| [
7061,
6,
198,
8291,
15464,
21242,
1366,
329,
3586,
286,
9033,
346,
271,
2123,
435,
2637,
82,
11862,
198,
28350,
25,
21015,
18,
7816,
62,
2736,
346,
271,
62,
486,
62,
2302,
7861,
13,
9078,
198,
7061,
6,
198,
11748,
28686,
198,
198,
... | 2.621324 | 272 |
# -*- coding: utf-8 -*-
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
@login_required(login_url='/', redirect_field_name='')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
42625,
14208,
13,
28243,
1330,
19390,
21947,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
62,
1462,
62,
26209,
198,
6738,
42625,
14208,
13,
3642,
822,... | 3.093333 | 75 |
#!/usr/bin/env python3
import sys
from aiida import load_profile
from aiida.plugins import DataFactory
from aiida.orm import Code
from aiida.engine import submit
from mpds_aiida.workflows.crystal import MPDSCrystalWorkchain
from mpds_aiida.common import get_template
load_profile()
calc_setup = get_template('production.yml')
phase = sys.argv[1].split("/")
if len(phase) == 3:
formula, sgs, pearson = phase
else:
formula, sgs, pearson = phase[0], phase[1], None
sgs = int(sgs)
calc_setup['parameters']['crystal']['title'] = "/".join(phase)
inputs = MPDSCrystalWorkchain.get_builder()
inputs.crystal_code = Code.get_from_string('{}@{}'.format(calc_setup['codes'][0], calc_setup['cluster']))
inputs.crystal_parameters = DataFactory('dict')(dict=calc_setup['parameters']['crystal'])
inputs.basis_family, _ = DataFactory('crystal_dft.basis_family').get_or_create(calc_setup['basis_family'])
inputs.options = DataFactory('dict')(dict=calc_setup['options'])
inputs.metadata = dict(label="/".join(phase))
inputs.mpds_query = DataFactory('dict')(dict={'formulae': formula, 'sgs': sgs})
wc = submit(MPDSCrystalWorkchain, **inputs)
print("Submitted WorkChain %s" % wc.pk) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
6738,
257,
72,
3755,
1330,
3440,
62,
13317,
198,
6738,
257,
72,
3755,
13,
37390,
1330,
6060,
22810,
198,
6738,
257,
72,
3755,
13,
579,
1330,
6127,
198,
67... | 2.713626 | 433 |
"""Collect metrics about coverage of whole genome sequencing."""
import os
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
Process,
SchedulingClass,
StringField,
)
def replace_metrics_class(fname):
"""Replace metrics class name.
This temporary fix is needed due to compatibility issue with GATK
4.1.2.0 and MultiQC 1.8. MultiQC searches for CollectWgsMetrics
instead of WgsMetrics in the report file. Note that this should be
resolved in the 1.9 release of MultiQC.
"""
with open(fname, "r") as report:
newlines = []
for line in report.readlines():
if line == "## METRICS CLASS\tpicard.analysis.WgsMetrics\n":
line = "## METRICS CLASS\tCollectWgsMetrics$WgsMetrics\n"
newlines.append(line)
else:
newlines.append(line)
with open(fname, "w") as report:
for line in newlines:
report.writelines(line)
class InsertSizeMetrics(Process):
"""Collect metrics about coverage of whole genome sequencing.
Tool from Picard, wrapped by GATK4. See GATK
CollectWgsMetrics for more information.
"""
slug = "wgs-metrics"
name = "Picard WGS Metrics"
category = "Picard"
process_type = "data:picard:wgsmetrics"
version = "2.0.0"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {"docker": {"image": "resolwebio/dnaseq:4.2.0"}},
}
data_name = '{{ bam|sample_name|default("?") }}'
class Input:
"""Input fields for CollectWgsMetrics."""
bam = DataField("alignment:bam", label="Alignment BAM file")
genome = DataField("seq:nucleotide", label="Genome")
read_length = IntegerField(label="Average read length", default=150)
create_histogram = BooleanField(
label="Include data for base quality histogram in the metrics file",
default=False,
)
advanced = BooleanField(
label="Show advanced options",
description="Inspect and modify parameters.",
default=False,
)
class Options:
"""Options."""
min_map_quality = IntegerField(
label="Minimum mapping quality for a read to contribute coverage",
default=20,
)
min_quality = IntegerField(
label="Minimum base quality for a base to contribute coverage",
description="N bases will be treated as having a base quality of "
"negative infinity and will therefore be excluded from coverage "
"regardless of the value of this parameter.",
default=20,
)
coverage_cap = IntegerField(
label="Maximum coverage cap",
description="Treat positions with coverage exceeding this value as "
"if they had coverage at this set value.",
default=250,
)
accumulation_cap = IntegerField(
label="Ignore positions with coverage above this value",
description="At positions with coverage exceeding this value, "
"completely ignore reads that accumulate beyond this value",
default=100000,
)
count_unpaired = BooleanField(
label="Count unpaired reads and paired reads with one end unmapped",
default=False,
)
sample_size = IntegerField(
label="Sample Size used for Theoretical Het Sensitivity sampling",
default=10000,
)
validation_stringency = StringField(
label="Validation stringency",
description="Validation stringency for all SAM files read by this "
"program. Setting stringency to SILENT can improve "
"performance when processing a BAM file in which "
"variable-length data (read, qualities, tags) do not "
"otherwise need to be decoded. Default is STRICT.",
choices=[
("STRICT", "STRICT"),
("LENIENT", "LENIENT"),
("SILENT", "SILENT"),
],
default="STRICT",
)
options = GroupField(Options, label="Options", hidden="!advanced")
class Output:
"""Output fields for CollectWgsMetrics."""
report = FileField(label="WGS metrics report")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
basename = os.path.basename(inputs.bam.bam.path)
assert basename.endswith(".bam")
name = basename[:-4]
metrics_file = f"{name}_wgs_metrics.txt"
args = [
"--INPUT",
inputs.bam.bam.path,
"--OUTPUT",
metrics_file,
"--REFERENCE_SEQUENCE",
inputs.genome.fasta.path,
"--READ_LENGTH",
inputs.read_length,
"--INCLUDE_BQ_HISTOGRAM",
inputs.create_histogram,
"--MINIMUM_MAPPING_QUALITY",
inputs.options.min_map_quality,
"--MINIMUM_BASE_QUALITY",
inputs.options.min_quality,
"--COVERAGE_CAP",
inputs.options.coverage_cap,
"--LOCUS_ACCUMULATION_CAP",
inputs.options.accumulation_cap,
"--COUNT_UNPAIRED",
inputs.options.count_unpaired,
"--SAMPLE_SIZE",
inputs.options.sample_size,
"--VALIDATION_STRINGENCY",
inputs.options.validation_stringency,
]
return_code, _, _ = Cmd["gatk"]["CollectWgsMetrics"][args] & TEE(retcode=None)
if return_code:
self.error("CollectWgsMetrics tool failed.")
replace_metrics_class(metrics_file)
outputs.report = metrics_file
outputs.species = inputs.bam.species
outputs.build = inputs.bam.build
| [
37811,
31337,
20731,
546,
5197,
286,
2187,
19270,
32841,
526,
15931,
198,
11748,
28686,
198,
198,
6738,
458,
2178,
388,
1330,
309,
6500,
198,
198,
6738,
581,
349,
732,
13,
14681,
1330,
357,
198,
220,
220,
220,
41146,
15878,
11,
198,
2... | 2.183432 | 2,873 |
"""
This script suppresses the 502 Bad Gateway messages, mitmproxy sends if the server is not responsing correctly.
For example, this functionality can be helpful if mitmproxy is used in between a web scanner and a web application.
Without this script, if the web application under test crashes, mitmproxy will send 502 Bad Gateway responses.
These responses are irritating the web application scanner since they obfuscate the actual problem.
"""
from mitmproxy import http
from mitmproxy.exceptions import HttpSyntaxException
def error(self, flow: http.HTTPFlow):
"""Kills the flow if it has an error different to HTTPSyntaxException.
Sometimes, web scanners generate malformed HTTP syntax on purpose and we do not want to kill these requests.
"""
if flow.error is not None and not isinstance(flow.error, HttpSyntaxException):
flow.kill()
| [
37811,
198,
1212,
4226,
802,
16746,
262,
47233,
7772,
29916,
6218,
11,
10255,
76,
36436,
12800,
611,
262,
4382,
318,
407,
2424,
278,
9380,
13,
198,
1890,
1672,
11,
428,
11244,
460,
307,
7613,
611,
10255,
76,
36436,
318,
973,
287,
1022... | 3.884444 | 225 |
from chokozainerrl.tools.make_video import check
from chokozainerrl.tools.make_video import growth
| [
6738,
442,
482,
8590,
391,
8056,
75,
13,
31391,
13,
15883,
62,
15588,
1330,
2198,
198,
6738,
442,
482,
8590,
391,
8056,
75,
13,
31391,
13,
15883,
62,
15588,
1330,
3349,
628
] | 3.125 | 32 |
# Copyright (c) 2019, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
from itertools import product
import time
import numpy as np
import pytest
import cugraph
from cugraph.tests import utils
import rmm
# Temporarily suppress warnings till networkX fixes deprecation warnings
# (Using or importing the ABCs from 'collections' instead of from
# 'collections.abc' is deprecated, and in 3.8 it will stop working) for
# python 3.7. Also, this import networkx needs to be relocated in the
# third-party group once this gets fixed.
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import networkx as nx
print('Networkx version : {} '.format(nx.__version__))
DATASETS = ['../datasets/dolphins.csv',
'../datasets/karate.csv',
'../datasets/netscience.csv',
'../datasets/email-Eu-core.csv']
SOURCES = [1]
# Test all combinations of default/managed and pooled/non-pooled allocation
@pytest.mark.parametrize('managed, pool',
list(product([False, True], [False, True])))
@pytest.mark.parametrize('graph_file', DATASETS)
@pytest.mark.parametrize('source', SOURCES)
# Test all combinations of default/managed and pooled/non-pooled allocation
@pytest.mark.parametrize('managed, pool',
list(product([False, True], [False, True])))
@pytest.mark.parametrize('graph_file', ['../datasets/netscience.csv'])
@pytest.mark.parametrize('source', SOURCES)
@pytest.mark.parametrize('managed, pool',
list(product([False, True], [False, True])))
@pytest.mark.parametrize('graph_file', ['../datasets/netscience.csv'])
@pytest.mark.parametrize('source', SOURCES)
| [
2,
15069,
357,
66,
8,
13130,
11,
15127,
23929,
44680,
6234,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 2.894737 | 779 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
* Copyright (C) 2017 Hendrik van Essen
*
* This file is subject to the terms and conditions of the MIT License
* See the file LICENSE in the top level directory for more details.
"""
from __future__ import absolute_import, print_function, unicode_literals
# REQUEST KEYS
REQUEST_KEY_ACTION_KEY = 'action_key'
REQUEST_KEY_ACTION_ARGUMENT = 'action_argument'
REQUEST_KEY_GET_TEMPERATURE = 'get_temperature'
REQUEST_KEY_GET_HEAT_INDEX = 'get_heat_index'
REQUEST_KEY_GET_HUMIDITY = 'get_humidity'
RESULT_KEY_API_VERSION = 'api_version' # string
RESULT_KEY_RESULT = 'result' # string
RESULT_KEY_ERROR = 'error' # string
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
1635,
15069,
357,
34,
8,
2177,
14666,
12602,
5719,
11985,
268,
198,
1635,
198,
1635,
770,
2393,
318,
2426... | 2.820833 | 240 |
from core.entity.entity_exceptions import EntityFieldInvalid
class EntityField:
"""
Defines the default entity field.
The entity field is stored inside the Entity itself and contains rules how the user
can read the field value of a certain entity and write some value to the same field.
The EntityField can't be treated as validator. The goal of the EntityField is just
guarantee that all information stored to all entity sources can be retrieved without
being corrupted or refuse the user to do it if this is not possible
"""
_default = None
_value_class = None
_min_length = None
_max_length = None
_min_value = None
_max_value = None
_description = None
@staticmethod
def identity(value):
"""
See __init__ constructor description for details
:param value: some value
:return: the same value
"""
return value
def __init__(self, value_class, min_length=None, max_length=None, min_value=None, max_value=None, default=None,
description: str = None):
"""
Initializes the EntityField. After being initialized the EntityField shall be added inside the
'_public_field_description' dictionary inside the Entity object
:param value_class: Before writing to the field this entity value will be cast to a type given in this argument.
Use EntityField.identity if you don't want such a cast.
:param min_length: next, if len(value) is less than the value of this parameter, EntityFieldInvalid will be
thrown. Not applied if this parameter equals to None.
:param max_length: if len(value) is greater than this value, EntityFieldInvalid will be thrown.
Not applied if this parameter equals to None.
:param min_value: if value itself is less than this value, EntityFieldInvalid will be thrown.
Not applied if this parameter equals to None.
:param max_value: if value itself is greater than this value, EntityFieldInvalid will be thrown.
Not applied if this parameter equals to None.
:param default: Entity default value. Such value will be assigned to 'creating' entity by default
:param description: The entity string description used for logging and debugging
"""
self._value_class = value_class
self._min_length = min_length
self._max_length = max_length
self._min_value = min_value
self._max_value = max_value
self._default = default
self._description = str(description)
@property
def default(self):
"""
Returns the default value of this entity field.
The default value will be assigned to the entity if the entity is newly created rather than loaded from the
external source and such value is not passed through the default constructor
:return: the default value of this entity.
"""
return self._default
@property
def description(self):
"""
Returns the entity description to use in logging and debugging
:return: The entity description to use in logging and debugging
"""
return self._description
def proofread(self, value):
"""
Proofreads the entity value. The method is called when the entity gets the field value to the user.
Such value passes to the user itself who in turn proofreads such value
:param value: the value stored in the entity as defined by one of the entity providers
:return: the value given to the user
"""
return value
def correct(self, value):
"""
Corrects the value before the user sets it to the entity field.
:param value: the value that user wants to set
:return: Actual value set to the entity
"""
if value is None:
raw_value = None
else:
raw_value = self._value_class(value)
if raw_value is None:
if self._min_length is not None and self._min_length > 0:
raise EntityFieldInvalid("")
else:
if self._min_length is not None and len(raw_value) < self._min_length:
raise EntityFieldInvalid("")
if self._max_length is not None and len(raw_value) > self._max_length:
raise EntityFieldInvalid("")
if self._min_value is not None and raw_value < self._min_value:
raise ValueError("The value is too low")
if self._max_value is not None and raw_value > self._max_value:
raise ValueError("The value is too high")
return raw_value
| [
6738,
4755,
13,
26858,
13,
26858,
62,
1069,
11755,
1330,
20885,
15878,
44651,
628,
198,
4871,
20885,
15878,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2896,
1127,
262,
4277,
9312,
2214,
13,
628,
220,
220,
220,
383,
9312,
2214,... | 2.830723 | 1,660 |
from absl import app, flags, logging
import tfne
flags.DEFINE_integer('logging_level',
default=None, help='Integer parameter specifying the verbosity of the absl logging library')
flags.DEFINE_string('config_file',
default=None, help='String parameter specifying the file path to the configuration file used for '
'the TFNE evolutionary process')
flags.DEFINE_string('backup_dir',
default=None, help='String parameter specifying the directory path to where the TFNE state backups '
'should be saved to')
flags.DEFINE_integer('max_generations',
default=None, help='Integer parameter specifying the intended maximum number of generations the '
'population should be evolved')
flags.DEFINE_float('max_fitness',
default=None, help='Float parameter specifying the fitness of the best genome at which point the '
'evolutionary process should preemptively end')
def codeepneat_xor_example(_):
"""
This Example evolves a CoDeepNEAT population on the XOR problem for 100 generations, using dynamic speciation for
the modules and blueprints. Subsequently the best genome is trained for a final 100 epochs and its genotype and
Tensorflow model are backed up.
"""
# Set standard configuration specific to TFNE but not the neuroevolution process
logging_level = logging.INFO
config_file_path = './codeepneat_xor_dynamic_example_config.cfg'
backup_dir_path = './tfne_state_backups/'
max_generations = 100
max_fitness = None
# Read in optionally supplied flags, changing the just set standard configuration
if flags.FLAGS.logging_level is not None:
logging_level = flags.FLAGS.logging_level
if flags.FLAGS.config_file is not None:
config_file_path = flags.FLAGS.config_file
if flags.FLAGS.backup_dir is not None:
backup_dir_path = flags.FLAGS.backup_dir
if flags.FLAGS.max_generations is not None:
max_generations = flags.FLAGS.max_generations
if flags.FLAGS.max_fitness is not None:
max_fitness = flags.FLAGS.max_fitness
# Set logging, parse config
logging.set_verbosity(logging_level)
config = tfne.parse_configuration(config_file_path)
# Initialize the environment and the specific NE algorithm
environment = tfne.environments.XOREnvironment(weight_training=True, config=config, verbosity=logging_level)
ne_algorithm = tfne.algorithms.CoDeepNEAT(config)
# Initialize evolution engine and supply config as well as initialized NE algorithm and evaluation environment.
engine = tfne.EvolutionEngine(ne_algorithm=ne_algorithm,
environment=environment,
backup_dir_path=backup_dir_path,
max_generations=max_generations,
max_fitness=max_fitness)
# Start training process, returning the best genome when training ends
best_genome = engine.train()
print("Best genome returned by evolution:\n")
print(best_genome)
# Increase epoch count in environment for a final training of the best genome. Train the genome and then replay it.
print("Training best genome for 100 epochs...\n")
environment.epochs = 100
environment.eval_genome_fitness(best_genome)
environment.replay_genome(best_genome)
# Serialize and save genotype and Tensorflow model to demonstrate serialization
best_genome.save_genotype(save_dir_path='./best_genome_genotype/')
best_genome.save_model(file_path='./best_genome_model/')
if __name__ == '__main__':
app.run(codeepneat_xor_example)
| [
6738,
2352,
75,
1330,
598,
11,
9701,
11,
18931,
198,
198,
11748,
48700,
710,
198,
198,
33152,
13,
7206,
29940,
62,
41433,
10786,
6404,
2667,
62,
5715,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.629121 | 1,456 |
from ...utilities import db, moocdb_utils
from common import * | [
6738,
2644,
315,
2410,
1330,
20613,
11,
6941,
420,
9945,
62,
26791,
198,
6738,
2219,
1330,
1635
] | 3.647059 | 17 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
MODEL_PARAMS = {
"model": "HTMPrediction",
"version": 1,
"predictAheadTime": None,
"modelParams": {
"sensorParams": {
"verbosity": 0,
"encoders": {
"timestamp_timeOfDay": {
"fieldname": "timestamp",
"timeOfDay": [
21,
1
],
"type": "DateEncoder",
"name": "timestamp_timeOfDay"
},
"value": {
"fieldname": "value",
"seed": 1,
"resolution": 0.88,
"name": "value",
"type": "RandomDistributedScalarEncoder"
},
"timestamp_weekend": {
"fieldname": "timestamp",
"type": "DateEncoder",
"name": "timestamp_weekend",
"weekend": 21
}
},
"sensorAutoReset": None
},
"spParams": {
"columnCount": 2048,
"spVerbosity": 0,
"localAreaDensity": -1.0,
"spatialImp": "cpp",
"inputWidth": 946,
"synPermInactiveDec": 0.005,
"synPermConnected": 0.1,
"synPermActiveInc": 0.04,
"seed": 1956,
"numActiveColumnsPerInhArea": 40,
"boostStrength": 3.0,
"globalInhibition": 1,
"potentialPct": 0.85
},
"trainSPNetOnlyIfRequested": False,
"clParams": {
"steps": "1,5",
"maxCategoryCount": 1000,
"implementation": "cpp",
"alpha": 0.1,
"verbosity": 0,
"regionName": "SDRClassifierRegion"
},
"tmParams": {
"columnCount": 2048,
"pamLength": 1,
"permanenceInc": 0.1,
"outputType": "normal",
"initialPerm": 0.21,
"seed": 1960,
"maxSegmentsPerCell": 128,
"temporalImp": "cpp",
"activationThreshold": 16,
"cellsPerColumn": 32,
"permanenceDec": 0.1,
"minThreshold": 12,
"verbosity": 0,
"maxSynapsesPerSegment": 32,
"globalDecay": 0.0,
"newSynapseCount": 20,
"maxAge": 0,
"inputWidth": 2048
},
"tmEnable": True,
"spEnable": True,
"inferenceType": "TemporalMultiStep"
}
}
| [
2,
16529,
23031,
198,
2,
399,
1713,
64,
19193,
329,
49452,
38589,
357,
45,
84,
47,
2149,
8,
198,
2,
15069,
357,
34,
8,
2211,
11,
399,
1713,
64,
11,
3457,
13,
220,
17486,
345,
423,
281,
4381,
198,
2,
351,
399,
1713,
64,
11,
345... | 2.323326 | 1,299 |
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QLabel
from PyQt5.QtGui import QFont
from PyQt5.QtCore import QTimer, QTime, Qt
app = QApplication(sys.argv)
demo = AppDemo()
demo.show()
app.exit(app.exec_()) | [
11748,
25064,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
23416,
11,
1195,
38300,
11,
1195,
53,
14253,
32517,
11,
1195,
33986,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
8205,
72,
1330,
1195,
23252,
198,
673... | 2.428571 | 98 |
from files import *
from invoice_math import *
from invoice_doc import *
Files = InvoiceFiles()
Calculations = InvoiceMath()
Document = InvoiceDocument()
data = Files.get_audio_information()
Calculations.convert_times(data)
Calculations.add_costs(data)
for k, v in data.items():
print("File: {}\t Length: {}\t Cost: {}".format(k, v[0], v[1]))
print("Total:", Calculations.get_total())
Document.create_document(data)
| [
6738,
3696,
1330,
1635,
198,
6738,
45458,
62,
11018,
1330,
1635,
198,
6738,
45458,
62,
15390,
1330,
1635,
198,
198,
25876,
796,
10001,
2942,
25876,
3419,
198,
9771,
3129,
602,
796,
10001,
2942,
37372,
3419,
198,
24941,
796,
10001,
2942,
... | 2.891156 | 147 |
from .random_player import *
| [
6738,
764,
25120,
62,
7829,
1330,
1635,
198
] | 3.625 | 8 |
# -*- coding: utf-8 -*-
"""
Testing of both scripts: :mod:`cardassembler` and :mod:`blueprint`.
.. note::
Run this script directly to run a :mod:`unittest`.
"""
import os
import re
import sys
import unittest
import xml.etree.ElementTree as ET
import pycodestyle
import blueprint
# Bypass internal Gimp's python gimpfu package imported
# by :mod:`cardassembler`.
from my_mock import Gimpfu as Mock_Gimpfu
sys.modules['gimpfu'] = Mock_Gimpfu()
import cardassembler # nopep8
import toolbox # nopep8
if __name__ == '__main__':
unittest.main(exit=False)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
44154,
286,
1111,
14750,
25,
1058,
4666,
25,
63,
9517,
34455,
1754,
63,
290,
1058,
4666,
25,
63,
17585,
4798,
44646,
198,
198,
492,
3465,
3712,
198,
220,
2... | 2.770732 | 205 |
"""
This package provides the Python interface to functionality relating to
the Panda3D Runtime environment.
.. deprecated:: 1.10.0
The p3d packaging system has been replaced with the new setuptools-based
system. See the :ref:`distribution` manual section.
"""
| [
37811,
198,
1212,
5301,
3769,
262,
11361,
7071,
284,
11244,
11270,
284,
198,
1169,
41112,
18,
35,
43160,
2858,
13,
198,
198,
492,
39224,
3712,
352,
13,
940,
13,
15,
198,
220,
220,
383,
279,
18,
67,
16846,
1080,
468,
587,
6928,
351,
... | 3.788732 | 71 |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import os
import string
import tempfile
import zipfile
from indico.core.config import config
from indico.legacy.common.utils import utf8rep
| [
2,
770,
2393,
318,
636,
286,
1423,
3713,
13,
198,
2,
15069,
357,
34,
8,
6244,
532,
13130,
327,
28778,
198,
2,
198,
2,
1423,
3713,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
198,
2,
13096,
340,
739,
262,
2846,... | 3.423077 | 104 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
11748,
4818,
8079,
198,
6738,
5366,
13,
9945,
1330,
20613,
198,
6738,
5366,
13,
85,
17,
1330,
10011,
2611,
44,
4254,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198
] | 3.263158 | 38 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
owner_users = {
'email': 'qa_test@binka.me',
'password': 'Password1-'
}
provider_users = {
'provider_user@binka.me': 'Password1-',
'provider1_user@binka.me': 'Password1-',
'supplier11@gmail.com': 'Kapital-Ist',
'supplier12@gmail.com': 'Kapital-Ist',
'supplier13@gmail.com': 'Kapital-Ist'
}
broker = {'url': 'https://prozorro.kapital-ist.kiev.ua'}
# login
login_button = '#loginLink'
username_field = '#Email'
pass_field = '#Password'
submit_login_button = 'body > div.body-wrapper > div > div > form > div:nth-child(4) > div > input'
# create tender
create_tender_url = 'https://prozorro.kapital-ist.kiev.ua/draft/belowThreshold/createTender'
input_title = '#Title'
input_description = '#Description'
input_start_enquiry = '#EnquiryPeriod_StartDate_Local'
input_end_enquiry = '#EnquiryPeriod_EndDate_Local'
input_start_tender = '#TenderPeriod_StartDate_Local'
input_end_tender = '#TenderPeriod_EndDate_Local'
# 6/1/2017 13:00 AM format
save_draft = 'body > div.body-wrapper > div > div > form > div:nth-child(6) > div > input'
add_lot = '#draftTender > fieldset:nth-child(5) > a:nth-child(5)'
input_lot_title = '#Title'
input_lot_description = '#Description'
input_value_amount = 'body > div.body-wrapper > div > div > form > div:nth-child(5) > div.form-group > div > span.k-widget.k-numerictextbox.currency.text-box.single-line > span > input.k-formatted-value.currency.text-box.single-line.k-input'
input_min_step = '#MinimalStep_Amount'
save_draft2 = 'body > div.body-wrapper > div > div > form > div.col-md-offset-3.col-md-9 > input'
add_item = '#draftTender > fieldset:nth-child(6) > a:nth-child(5)'
input_item_description = '#Description'
select_cpv = '#ListCPVTitle'
select_cpv_1item = r'#\30 3000000-1_anchor'
cpv_selected = '#SelectedCPV'
select_unit = '#UnitId_chosen > a'
select_unit1 = '#UnitId_chosen > div > ul > li:nth-child(1)'
input_quantity = '#Quantity'
input_delivery_start_date = '#DeliveryDate_StartDate_Local'
input_delivery_end_date = '#DeliveryDate_EndDate_Local'
input_dropdown_region = 'body > div.body-wrapper > div > div > form > div:nth-child(11) > div:nth-child(5) > div > span.k-widget.k-combobox.k-header.form-control.text-box.single-line > span > input'
input_postal_code = '#DeliveryAddress_PostalCode'
input_locality = '#DeliveryAddress_Locality'
input_delivery_address = '#DeliveryAddress_Street'
save_draft3 = 'body > div.body-wrapper > div > div > form > div.col-md-offset-3.col-md-9 > input'
add_doc_button = '#draftTender > fieldset:nth-child(5) > a:nth-child(7)'
doc_title = '#Description'
doc_input = '#Document'
save_draft4 = 'body > div.body-wrapper > div > div > form > div.col-md-offset-3.col-md-9 > input'
submit_create_tender = '#submitPublish'
# search for tender
tender_get_id_locator = 'body > div.body-wrapper > div > div > h3 > a' # xpath UA-2017-05-30-000023
# go to create tender url
select_search_type = 'body > div.body-wrapper > div > div > div:nth-child(2) > a:nth-child(2)'
input_search_field = '#ProcurementNumber'
search_tender_button = '#search'
select_tender = '#tender-table > div > table > tbody > tr > td:nth-child(1) > a'
select_bids = '#tabstrip > li:nth-child(2) > a'
make_bid_button = '#bids > div > div > a'
select_lot = '#form0 > div.modal-body > div > div.lots > div.form-group > div > label > span.cr'
input_bid_amount = '//input[@class="k-formatted-value currency text-box single-line k-input"]'
input_bid_doc = '#files'
# doc - add_doc
submit_bid_button = '#form0 > div.modal-footer > input'
delete_bid_button = '#bids > div > fieldset:nth-child(1) > div > div.col-md-2 > a'
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
18403,
62,
18417,
796,
1391,
198,
220,
220,
220,
705,
12888,
10354,
705,
20402,
62,
9288,
31,
65,
48955,
13,
1326,
32... | 2.491156 | 1,470 |
"""
Copyright 2018 IBM Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import shelve
#from collections import Counter
import parseundp
from PyPDF2 import PdfFileWriter, PdfFileReader
from difflib import SequenceMatcher as sm
from collections import OrderedDict
import os
#Loading the ground truth
#First we need a function to load our previous RIA data. This represents the ground truth as to what sentences match certain targets.
#If we are testing with a country which we have a completed RIA for, we exclude that RIA. We also have functionality to only
#load the target descriptions.
def loadTruth(template_data_path, exclude_ria = [], targets = None):
'''
If the ground truth matches aren't already saved, extract the ground truth from all prior RIA template2 and
create the target matches dictionary. Save this for future use. If a certain RIA is to be excluded, create
a new dictionary that won't be saved.
Args:
template_data_path (string) : Directory to the RIA templates documents.
exclude_ria (list[string]) : List of files to be excluded from RIA data extraction.
targets (dict) : Target matches dictionary of just target descriptions.
Returns:
target_matches (dict) : Dictionary of ground truth matches for each target.
'''
if targets:
target_matches = targets
elif len(exclude_ria) > 0:
development_matches = parseundp.extract_template_data(template_data_path, exclude_ria)
target_matches = parseundp.create_target_dictionary(development_matches)
else:
try:
shelf = shelve.open('undp')
target_matches = shelf['targets']
shelf.close()
except:
shelf.close()
development_matches = parseundp.extract_template_data(template_data_path)
target_matches = parseundp.create_target_dictionary(development_matches)
shelf = shelve.open('undp')
shelf['targets'] = target_matches
shelf.close()
return target_matches
def getTargetDoc(template_data_path, exclude_ria = [], targets = None):
'''
Append the results of all prior RIAs to the corresponding target description.
Args:
template_data_path (string) : Directory to the RIA templates documents.
exclude_ria (list[string]) : List of files to be excluded from RIA data extraction.
targets (dict) : Target matches dictionary of just target descriptions.
Returns:
target_matches (dict) : Dictionary of ground truth matches appended to its corresponding target description.
The dictionary value is a list of a single item.
'''
prior_ria_matches = loadTruth(template_data_path, exclude_ria)
target_documents = {}
for key in prior_ria_matches:
doc = ''
for val in prior_ria_matches[key]:
if type(val) == str:
doc += val
doc += ' '
target_documents[key] = [doc]
return target_documents
def getInfo(par_vec, target_matches, targets_only = False):
'''
If the ground truth matches aren't already saved, extract the ground truth from all prior RIA template2 and
create the target matches dictionary. Save this for future use. If a certain RIA is to be excluded, create
a new dictionary that won't be saved.
Args:
par_vec (CustomParVec) : Embedding model to be used.
target_matches (dict) : Our target matches dictionary of.
targets_only (bool) : Specify if we are only using target descriptions.
Returns:
targs (dict), targ_vecs(list), sents(list) : dictionary of sentences and target they match, list of embedded
ground truth vectors, list of sentences
'''
targs = {}
targ_vecs = []
sents = []
for key, val in target_matches.items():
if targets_only:
sents.append(str(val))
targ_vecs.append(par_vec.inferVector(str(val)))
targs[str(val)] = key
else:
for line in val:
#print(line)
sents.append(str(line))
targ_vecs.append(par_vec.inferVector(str(line)))
targs[str(line)] = key
return targs, targ_vecs, sents
def convertPdf(document_conversion, config, file):
'''
Convert a pdf file into a txt file
Args:
document_conversion (DocumentConversionV1) : Instance of the Document Conversion service.
config (dict) : A config object that defines tags and structure
in the conversion output.
file (string) : path/filename to be converted.
Returns:
void: No return value. Txt file will be saved to the same directory
as code.
'''
with open(file, 'rb') as pdf_file:
try:
response = document_conversion.convert_document(document=pdf_file, config=config)
document_text = response.text
text = open(file[:-4]+'.txt', 'w')
text.write(document_text)
text.close()
except:
print(file, 'FAILED')
#def createPage(documents_path, policy_document):
# inputpdf = PdfFileReader(os.path.join(documents_path, policy_document), "rb")
# for i in range(inputpdf.numPages):
# output = PdfFileWriter()
# output.addPage(inputpdf.getPage(i))
# newname = policy_document[:-4] + "-" + str(i+1) + '.pdf'
# outputStream = open(newname, "wb")
# output.write(outputStream)
# outputStream.close()
# convertPdf(document_conversion, config, newname)
# os.remove(newname)
#Watson DocumentConversion service was used. That service has been deprecated and is no longer available
#pdf documents have to be converted to text to use this code
#from watson_developer_cloud import DocumentConversionV1
#import os
#usr = 'REPLACE BY UID'
#pswd = 'REPLACE BY PWD'
#document_conversion = DocumentConversionV1( # Create instance of document conversion service
# username = usr,
# password = pswd,
# version = '2015-12-15'
#)
#config = {
# 'conversion_target': 'NORMALIZED_TEXT' # Specify configuration to convert document to txt file.
#}
#Functions to conduct a RIA
#We will use our custom embedding technique to infer vectors for each of our ground truth sentences.
#Then, given the policy documents for a country that we wish to produce the RIA for, we will compare the
#similarity of each sentence/paragraph with the sentences from the ground truth.
#Those sentences with the highest cosine similarity will be marked as matching the same target as the ground truth.
def ria(documents_path, policy_documents, model, sents, targ_vecs, targs):
'''
Find the sentences/paragaraphs of policy documents that most match each target
Args:
documents_path (string) : Directory holding all documents.
policy_documents (list[str]) : List of policy documents for country RIA is to be conducted for.
model (CustomParVec) : Embedding model to be used.
sents (list[str]) : list of ground truth sentences to enhance semantic searching.
targ_vecs (list[np.array]) : list of vector embeddings for those ground truth sentences.
targs (dict) : Dictionary of sentence to target
Returns:
score_dict (dict) : dictionary of target to ordered sentences found that match the target
'''
score_dict = {}
for policy_document in policy_documents:
with open(os.path.join(documents_path, policy_document),encoding="utf8") as file:
for line in file:
if len(line) > 30:
top_matches = model.getMostSimilar(line, 125, 0.01, sents, targ_vecs)
for match in top_matches:
key = targs[match[1]]
if key in score_dict:
score_dict[key].add((match[0], line))
else:
score_dict[key] = set({(match[0], line)})
return score_dict
def riaPDF(documents_path, policy_documents, model, sents, targ_vecs, targs):
'''
Find the sentences/paragaraphs of policy documents that most match each target.
Args:
documents_path (string) : Directory holding all documents.
policy_documents (list[str]) : List of policy documents for country RIA is to be conducted for.
model (CustomParVec) : Embedding model to be used.
sents (list[str]) : list of ground truth sentences to enhance semantic searching.
targ_vecs (list[np.array]) : list of vector embeddings for those ground truth sentences.
targs (dict) : Dictionary of sentence to target
Returns:
score_dict (dict) : dictionary of target to ordered sentences found that match the target
'''
score_dict = {}
for policy_document in policy_documents:
try:
inputpdf = PdfFileReader(os.path.join(documents_path, policy_document), "rb")
except:
print(policy_document, 'FAILED')
continue
for i in range(inputpdf.numPages):
output = PdfFileWriter()
output.addPage(inputpdf.getPage(i))
newname = policy_document[:-4] + "-" + str(i+1) + '.pdf'
outputStream = open(newname, "wb")
output.write(outputStream)
outputStream.close()
#NEEDS TO BE COMMENTED OUT WHEN COVERTPDF IS RE_WRITTEN convertPdf(document_conversion, config, newname)
try:
with open(newname[:-4]+'.txt') as file:
for line in file:
if len(line) > 30:
#print(line)
top_matches = model.getMostSimilar(line, 125, 0.01, sents, targ_vecs)
for match in top_matches:
key = targs[match[1]]
if key in score_dict:
#print('here1')
score_dict[key].add((match[0], line, policy_document, i+1))
else:
score_dict[key] = set({(match[0], line, policy_document, i+1)})
#print('here1')
except:
print(newname[:-4]+'.txt failed')
continue
os.remove(newname)
os.remove(newname[:-4]+'.txt')
return score_dict
#Functions to view RIA Results
def get_matches2(target, target_dict, num_matches = 1000):
'''
Returns the specified number of matches of a target along with its document of
origin and page number in a target dictionary ordered by cosine similarity
Args:
target (string) : Target to return matches for
target_dict (dict) : Dictionary of target matches
num_matches (int) : Number of matches to be returned.
Returns:
list(Page Number, Document, Text) : List of num_matches sentences/paragraphs that correspond to the
specified target.
'''
ordered = [['', '', item[1]] for item in sorted(target_dict[target], reverse = True)]
# PDF CHANGE: Once PDF reader methods have been implemented, replace above line by one below
#ordered = [[item[3], item[2], item[1]] for item in sorted(target_dict[target], reverse = True)]
return ordered[:num_matches]
def get_matches(target, target_dict, num_matches = 1000):
'''
Returns the specified number of matches of a tagret in a target dictionary ordered by cosine similarity
Args:
target (string) : Target to return matches for
target_dict (dict) : Dictionary of target matches
num_matches (int) : Number of matches to be returned.
Returns:
(list) : List of num_matches sentences/paragraphs that correspond to the specified target.
'''
ordered = [item[1] for item in sorted(target_dict[target], reverse = True)]
return list(reversed(OrderedDict.fromkeys(reversed(ordered))))[:num_matches]
#Functions to evaluate RIA Results
def evaluateByTarget(score_dict, test_target_matches, num):
'''
Finds matches with prior RIA as the number of sentences outputted increases
Args:
score_dict (dict) : Our target matches
test_target_matches (dict) : Target matches from prior RIA
num (int) : Number of output sentences to match.
Returns:
(dict) : Dictionary of how many matches were found after each sentence per target
'''
truths = []
match_by_sent = {}
truth_dict = {}
check = []
for target in score_dict.keys():
for result in get_matches(target, score_dict, num):
if target in test_target_matches and len(test_target_matches[target]) > 1:
sentences = result.split('.')
for sent in sentences:
for ground_truth in test_target_matches[target]:
score = sm(None, ground_truth, sent).ratio()
if score > 0.50:
if score < .55:
check.append((ground_truth, sent))
if target in truth_dict and ground_truth not in truths:
truths.append(ground_truth)
truth_dict[target].append(ground_truth)
elif target not in truth_dict and ground_truth not in truths:
truth_dict[target] = [ground_truth]
truths.append(ground_truth)
if target in truth_dict:
if target in match_by_sent:
match_by_sent[target].append(len(truth_dict[target])/(len(test_target_matches[target])-1))
else:
match_by_sent[target] = [len(truth_dict[target])/(len(test_target_matches[target])-1)]
else:
if target in match_by_sent:
match_by_sent[target].append(0)
else:
match_by_sent[target] = [0]
return match_by_sent
def avgMatches(match_by_sent, test_target_matches, num):
'''
Finds the average percent matches with prior RIA for all targets as the number of sentences outputted increases
Args:
match_by_sent (dict) : Dictionary of percent matches by target per sentence
test_target_matches (dict) : Target matches from prior RIA
num (int) : Number of output sentences to match.
Returns:
(dict) : Dictionary of how many matches were found after each sentence per target
'''
avg_new = []
for i in range(num):
adder, counter = 0, 0
for key in match_by_sent:
try:
adder += (match_by_sent[key][i] * (len(test_target_matches[key])-1))
counter += (len(test_target_matches[key])-1)
except:
adder += (match_by_sent[key][-1] * (len(test_target_matches[key])-1))
counter += (len(test_target_matches[key])-1)
avg_new.append(adder/counter)
return avg_new
#Functions to generate the final output
def getResults(score_dict, num_matches):
'''
Retrieve the items wanted from a score_dict
Args:
score_dict (dict) : target/result dictionary.
num_matches (int) : Number of matches to be returned per target
'''
results = {}
for key in score_dict:
results[key] = get_matches2(key, score_dict, num_matches) #was get_matches2(key, score_dict, num_matches) for pdf doc pages
return results
import pandas as pd
import xlsxwriter
from openpyxl import load_workbook
def generateSpreadsheet(results, name):
'''
Generate an excel spreadhseet of the results in which each sheet corresponds to a target.
The first columns is left blank for evaluation, second column is the page number, third the origin document, and
fourth, the target match.
Args:
results (dict) : target/result dictionary.
name (string) : Spreadsheet name.
'''
# Create a Pandas dataframe from the data.
df = pd.DataFrame.from_dict(results, orient='index')
x = df.transpose()
xbook = xlsxwriter.Workbook(name)
#header = ['Page #', 'Policy Document', 'Target Match']
# Convert the dataframe to an XlsxWriter Excel object.
for col in sorted(x, key=lambda x: float(x)):
xsheet = xbook.add_worksheet(str(col))
#xsheet.write_row(0, 1, header)
for i in range(len(x[col])):
xsheet.write_row(i, 1, x[col].loc[i])
xbook.close()
#Functions to update the model for the next RIA
def getUpdates(excel_results):
'''
Gets the sentences that were properly matched to a target evaluated by policy experts from a RIA conducted.
Args:
excel_results (string): Excel workbook with evaluated results.
Returns:
updates(dict) : Dictionary of target to sentences that were properly matches as evaluated by policy experts.
'''
updates = {}
wb = load_workbook(excel_results)
for sheet in wb.get_sheet_names():
ws = wb.get_sheet_by_name(sheet)
updates[sheet] = []
for i in range(1, 6):
if ws.cell(row = i, column = 1).value == 1:
updates[sheet].append(ws.cell(row = i, column = 4).value)
return updates
def updateGroundTruth(new_truths):
'''
Updates the model with the new truth from the most recent RIA
Args:
new_truths (dict): Dictionary of new sentence matches(value:list) for the target(key)
'''
try:
shelf = shelve.open('RIA_Data')
ground_truth = shelf['ground_truth']
shelf.close()
except:
shelf.close()
for key in new_truths:
doc = ''
for val in new_truths[key]:
if type(val) == str:
doc += val
doc += ' '
ground_truth[key][0] += doc
try:
shelf = shelve.open('RIA_Data')
shelf['ground_truth'] = ground_truth
shelf.close()
except:
shelf.close()
| [
37811,
201,
198,
220,
220,
15069,
2864,
19764,
10501,
201,
198,
201,
198,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
201,
198,
220,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
1... | 2.248807 | 8,798 |
import pygame
class KEY:
"""
All the usefull keyboard intrupts
"""
backspace = pygame.K_BACKSPACE
tab = pygame.K_TAB
clear = pygame.K_CLEAR
return_ = pygame.K_RETURN
pause = pygame.K_PAUSE
escape = pygame.K_ESCAPE
space = pygame.K_SPACE
exclaim = pygame.K_EXCLAIM
quotedbl = pygame.K_QUOTEDBL
hash_ = pygame.K_HASH
dollar = pygame.K_DOLLAR
ampersand = pygame.K_AMPERSAND
quote = pygame.K_QUOTE
left_parenthesis = pygame.K_LEFTPAREN
right_parenthesis = pygame.K_RIGHTPAREN
asterisk = pygame.K_ASTERISK
plus_sign = pygame.K_PLUS
comma = pygame.K_COMMA
minus_sign = pygame.K_MINUS
period = pygame.K_PERIOD
forward_slash = pygame.K_SLASH
num_0 = pygame.K_0
num_1 = pygame.K_1
num_2 = pygame.K_2
num_3 = pygame.K_3
num_4 = pygame.K_4
num_5 = pygame.K_5
num_6 = pygame.K_6
num_7 = pygame.K_7
num_8 = pygame.K_8
num_9 = pygame.K_9
colon = pygame.K_COLON
semicolon = pygame.K_SEMICOLON
less_than_sign = pygame.K_LESS
equals_sign = pygame.K_EQUALS
greater_than_sign = pygame.K_GREATER
question_mark = pygame.K_QUESTION
at = pygame.K_AT
left_bracket = pygame.K_LEFTBRACKET
backslash = pygame.K_BACKSLASH
right_bracket = pygame.K_RIGHTBRACKET
caret = pygame.K_CARET
underscore = pygame.K_UNDERSCORE
grave = pygame.K_BACKQUOTE
a = pygame.K_a
b = pygame.K_b
c = pygame.K_c
d = pygame.K_d
e = pygame.K_e
f = pygame.K_f
g = pygame.K_g
h = pygame.K_h
i = pygame.K_i
j = pygame.K_j
k = pygame.K_k
l = pygame.K_l
m = pygame.K_m
n = pygame.K_n
o = pygame.K_o
p = pygame.K_p
q = pygame.K_q
r = pygame.K_r
s = pygame.K_s
t = pygame.K_t
u = pygame.K_u
v = pygame.K_v
w = pygame.K_w
x = pygame.K_x
y = pygame.K_y
z = pygame.K_z
delete = pygame.K_DELETE
keypad_0 = pygame.K_KP0
keypad_1 = pygame.K_KP1
keypad_2 = pygame.K_KP2
keypad_3 = pygame.K_KP3
keypad_4 = pygame.K_KP4
keypad_5 = pygame.K_KP5
keypad_6 = pygame.K_KP6
keypad_7 = pygame.K_KP7
keypad_8 = pygame.K_KP8
keypad_9 = pygame.K_KP9
keypad_period = pygame.K_KP_PERIOD
keypad_divide = pygame.K_KP_DIVIDE
keypad_multiply = pygame.K_KP_MULTIPLY
keypad_minus = pygame.K_KP_MINUS
keypad_plus = pygame.K_KP_PLUS
keypad_enter = pygame.K_KP_ENTER
keypad_equals = pygame.K_KP_EQUALS
up_arrow = pygame.K_UP
down_arrow = pygame.K_DOWN
right_arrow = pygame.K_RIGHT
left_arrow = pygame.K_LEFT
insert = pygame.K_INSERT
home = pygame.K_HOME
end = pygame.K_END
page_up = pygame.K_PAGEUP
page_down = pygame.K_PAGEDOWN
F1 = pygame.K_F1
F2 = pygame.K_F2
F3 = pygame.K_F3
F4 = pygame.K_F4
F5 = pygame.K_F5
F6 = pygame.K_F6
F7 = pygame.K_F7
F8 = pygame.K_F8
F9 = pygame.K_F9
F10 = pygame.K_F10
F11 = pygame.K_F11
F12 = pygame.K_F12
F13 = pygame.K_F13
F14 = pygame.K_F14
F15 = pygame.K_F15
numlock = pygame.K_NUMLOCK
capslock = pygame.K_CAPSLOCK
scrollock = pygame.K_SCROLLOCK
right_shift = pygame.K_RSHIFT
left_shift = pygame.K_LSHIFT
right_control = pygame.K_RCTRL
left_control = pygame.K_LCTRL
right_alt = pygame.K_RALT
left_alt = pygame.K_LALT
right_meta = pygame.K_RMETA
left_meta = pygame.K_LMETA
left_Windows_key = pygame.K_LSUPER
right_Windows_key = pygame.K_RSUPER
mode_shift = pygame.K_MODE
help_ = pygame.K_HELP
print_screen = pygame.K_PRINT
sysrq = pygame.K_SYSREQ
break_ = pygame.K_BREAK
menu = pygame.K_MENU
power = pygame.K_POWER
Euro = pygame.K_EURO
class EVENT:
"""
Pygame events are re-assigned
"""
quit_ = pygame.QUIT
key_down = pygame.KEYDOWN
key_up = pygame.KEYUP
mouse_motion = pygame.MOUSEMOTION
mouse_button_up = pygame.MOUSEBUTTONUP
mouse_button_down = pygame.MOUSEBUTTONDOWN
video_resize = pygame.VIDEORESIZE
video_expose = pygame.VIDEOEXPOSE
user_event = pygame.USEREVENT | [
11748,
12972,
6057,
198,
4871,
35374,
25,
628,
220,
220,
220,
37227,
198,
220,
220,
220,
1439,
262,
779,
12853,
10586,
493,
3622,
82,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
736,
13200,
796,
12972,
6057,
13,
42,
62,
31098,
43... | 1.456532 | 4,302 |
if __name__ == '__main__':
car = Car()
a_tire = ATire()
b_tire = BTire()
car.set_tire(a_tire)
for i in range(6):
car.run()
car.set_tire(b_tire)
car.run()
| [
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1097,
796,
1879,
3419,
198,
220,
220,
220,
257,
62,
83,
557,
796,
5161,
557,
3419,
198,
220,
220,
220,
275,
62,
83,
557,
796,
22205,
557,... | 1.798165 | 109 |
# TensorFlow and tf.keras
import tensorflow as tf #https://adventuresinmachinelearning.com/python-tensorflow-tutorial/
#import keras
from sklearn.model_selection import train_test_split
# Helper libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
#****************************************
#**********start of user block***********
filename_list=['./NN_data/0MTM_scan_CORI_2.csv',
'./NN_data/0MTM_scan_PC.csv',
'./NN_data/0MTM_scan_CORI_1.csv',
'./NN_data/0MTM_scan_CORI_3_large_nu.csv']
epochs = 100
batch_size = 100
checkpoint_path='./tmp/checkpoint_gamma'
Read_from_checkpoint=False
#**********end of user block*************
#****************************************
#*********start of creating of model***************
x_train, x_test, y_train, y_test=load_data(filename_list)
#*********start of trainning***********************
print('x_test')
print(x_test)
print(y_test)
print(len(x_test)+len(x_train))
#input()
model,callback_func=create_model(checkpoint_path)
if Read_from_checkpoint:
model.load_weights(checkpoint_path)
history=model.fit(x_train, y_train, epochs=epochs,
callbacks=callback_func,\
validation_data=(x_test,y_test))
#save the model
model.save("./Trained_model/SLiM_NN_stabel_unstable.h5") # we can save the model and reload it at anytime in the future
#*********end of trainning***********************
from Post_plot_learning_rate import plot_hist
plot_hist(history)
| [
2,
309,
22854,
37535,
290,
48700,
13,
6122,
292,
198,
11748,
11192,
273,
11125,
355,
48700,
220,
1303,
5450,
1378,
324,
10065,
259,
30243,
40684,
13,
785,
14,
29412,
12,
83,
22854,
11125,
12,
83,
44917,
14,
198,
2,
11748,
41927,
292,
... | 2.690265 | 565 |
import ipaddress
import warnings
from unittest import mock
from django.test import TestCase
from django_wireguard.models import WireguardInterface, WireguardPeer
from django_wireguard.wireguard import WireGuard, WireGuardException
| [
11748,
20966,
21975,
198,
11748,
14601,
198,
198,
6738,
555,
715,
395,
1330,
15290,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
6738,
42625,
14208,
62,
21809,
14864,
13,
27530,
1330,
14712,
14864,
39317,
11,
14712,
148... | 3.9 | 60 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import tkinter as tk
from tkinter import ttk
if __name__ == '__main__':
root = tk.Tk()
root.title('Example')
ex = Example(root)
root.geometry("300x250+300+300")
root.mainloop()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
705,
541,
21879,
1077,
6,
628,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
25... | 2.227642 | 123 |
import pytest
from django.contrib.auth.models import User
from expenses.models import Expenses
from house.models import House
from scripts.create_mock_data import run
@pytest.mark.django_db()
| [
11748,
12972,
9288,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
198,
6738,
9307,
13,
27530,
1330,
5518,
4541,
198,
6738,
2156,
13,
27530,
1330,
2097,
198,
6738,
14750,
13,
17953,
62,
76,
735,
62,
78... | 3.362069 | 58 |
'''implements some basic number theory stuff: gcd, euler-phi function, inverts numbers modulo n.
'''
big_primes = 512927357,674506081
# use the division algorithm to find q,r given integers a,b. The
# quotient, q, and remainder, r, have the property that a = bq + r,
# 0<r<b
if __name__=="__main__":
run_tests()
| [
7061,
6,
320,
1154,
902,
617,
4096,
1271,
4583,
3404,
25,
308,
10210,
11,
304,
18173,
12,
34846,
2163,
11,
287,
24040,
3146,
953,
43348,
299,
13,
198,
198,
7061,
6,
198,
198,
14261,
62,
1050,
999,
796,
642,
18741,
1983,
27277,
11,
... | 2.735043 | 117 |
if __name__ == "__main__":
lst = [[1,10],[12,15],[18,20],[5,25]]
# lst2 = [[-5,5],[-5,5],[-5,5]]
lst = [[1,2],[2,3],[3,4],[1,3]]
print(solve(lst))
# print(solve(lst2)) | [
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
300,
301,
796,
16410,
16,
11,
940,
38430,
1065,
11,
1314,
38430,
1507,
11,
1238,
38430,
20,
11,
1495,
11907,
198,
220,
220,
220,
1303,
300,
301,
... | 1.720721 | 111 |
from airtrack.src.camera.base import AirtrackCamera
| [
6738,
1633,
11659,
13,
10677,
13,
25695,
13,
8692,
1330,
3701,
11659,
35632,
198
] | 3.714286 | 14 |
from unitmeasure import converters
from unitmeasure import dimension
from unitmeasure.util import classproperty
| [
6738,
4326,
1326,
5015,
1330,
6718,
1010,
198,
6738,
4326,
1326,
5015,
1330,
15793,
198,
198,
6738,
4326,
1326,
5015,
13,
22602,
1330,
1398,
26745,
628
] | 4.384615 | 26 |
import FWCore.ParameterSet.Config as cms
import copy
from ElectroWeakAnalysis.ZMuMu.zSelection_cfi import *
# same charge dimuons....
dimuonsGlobalSameCharge = cms.EDFilter(
"CandViewRefSelector",
### added UserData
src = cms.InputTag("userDataDimuons"),
##src = cms.InputTag("dimuons"),
cut = cms.string('charge!=0 & mass > 0 & daughter(0).isGlobalMuon = 1 & daughter(1).isGlobalMuon = 1')
)
goodZToMuMuSameChargeLoose = cms.EDFilter(
"ZToMuMuIsolatedIDSelector",
zSelectionLoose,
src = cms.InputTag("dimuonsGlobalSameCharge"),
filter = cms.bool(True)
)
goodZToMuMuSameChargeLoose.cut=cms.string("charge!=0 & daughter(0).pt > 10 & daughter(1).pt > 10 & abs(daughter(0).eta)<2.1 & abs(daughter(1).eta)<2.1 ")
goodZToMuMuSameCharge = cms.EDFilter(
"ZToMuMuIsolatedIDSelector",
zSelection,
src = cms.InputTag("dimuonsGlobalSameCharge"),
filter = cms.bool(True)
)
goodZToMuMuSameCharge.cut=cms.string("charge!=0 & daughter(0).pt > 20 & daughter(1).pt > 20 & abs(daughter(0).eta)<2.1 & abs(daughter(1).eta)<2.1 ")
goodZToMuMuSameChargeAtLeast1HLTLoose = cms.EDFilter(
"ZHLTMatchFilter",
src = cms.InputTag("goodZToMuMuSameChargeLoose"),
condition =cms.string("atLeastOneMatched"),
hltPath = cms.string("HLT_Mu9"),
filter = cms.bool(True)
)
goodZToMuMuSameChargeAtLeast1HLT = cms.EDFilter(
"ZHLTMatchFilter",
src = cms.InputTag("goodZToMuMuSameCharge"),
condition =cms.string("atLeastOneMatched"),
hltPath = cms.string("HLT_Mu9"),
filter = cms.bool(True)
)
goodZToMuMuSameCharge2HLTLoose = copy.deepcopy(goodZToMuMuSameChargeAtLeast1HLTLoose)
goodZToMuMuSameCharge2HLTLoose.condition= cms.string("bothMatched")
goodZToMuMuSameCharge1HLT = copy.deepcopy(goodZToMuMuSameChargeAtLeast1HLT)
goodZToMuMuSameCharge1HLT.condition= cms.string("bothMatched")
goodZToMuMuSameCharge1HLTLoose = copy.deepcopy(goodZToMuMuSameChargeAtLeast1HLTLoose)
goodZToMuMuSameCharge1HLTLoose.condition= cms.string("exactlyOneMatched")
goodZToMuMuSameCharge2HLT = copy.deepcopy(goodZToMuMuSameChargeAtLeast1HLT)
goodZToMuMuSameCharge2HLT.condition= cms.string("exactlyOneMatched")
| [
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
11748,
4866,
198,
198,
6738,
38781,
44898,
32750,
13,
57,
33239,
33239,
13,
89,
4653,
1564,
62,
66,
12463,
1330,
1635,
198,
198,
2,
976,
3877,
5391,
84,
684,
1106,
... | 2.42953 | 894 |
# -*- coding: utf-8 -*-
'''
BSD 3-Clause License
Copyright (c) 2021, Mike Bromberek
All rights reserved.
'''
# First party classes
# Third party classes
from flask import render_template, redirect, url_for, flash, request
from flask_login import current_user, login_user, logout_user, login_required
from werkzeug.urls import url_parse
# Custom classes
from app import db
from app.auth import bp
from app.auth.forms import LoginForm, RegistrationForm
from app.models import User, Workout
from app import logger
@bp.route('/login', methods=['GET','POST'])
@bp.route('/logout')
'''
@bp.route('/register', methods=['GET','POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = RegistrationForm()
if form.validate_on_submit():
email = form.email.data.lower()
user = User(displayname=form.displayname.data, email=email)
user.set_password(form.password.data)
# db.session.add(user)
# db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', title='Register', form=form)
'''
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
21800,
513,
12,
2601,
682,
13789,
198,
15269,
357,
66,
8,
33448,
11,
4995,
31455,
65,
18238,
198,
3237,
2489,
10395,
13,
198,
7061,
6,
198,
198,
2... | 2.833724 | 427 |
import numpy as np
import scipy as sp
import identify
from math import sqrt as _sqrt
from scipy.interpolate import CubicSpline as _CubicSpline
from os.path import splitext as _splitext
from interpolate import CSpline
from utils import C_hartree
__all__ = ['Hamiltonian']
class Hamiltonian:
"""Constructs the Hamiltonian operator, solves the time-independant coupled
system of radial Shrodinger equations and builds a list of energy levels
"""
def __init__(self, grid, diatomic_data, channels, couplings=[],
eig_decomp='lapack', lapack_driver='evr',
arpack_options=('LM', 6, None), is_weighted=False):
"""Initilizes the Hamiltonian object
Parameters
----------
grid : Grid object
[description]
diatomic_data : DiatomicData object
[description]
channels : list
a list with the defined channel objects
couplings : list, optional
a list with the defined coupling objects. Defaults to [].
eig_decomp : str, optional
the package LAPACK or ARPACK which to be used for eigen
decomposition. Defaults to 'lapack'.
lapack_driver : str, optional
Defines which LAPACK driver should be used when eig_decomp is
set to "lapack". Valid options are "ev", "evd", "evr", "evx".
Defaults to 'evr'.
arpack_options : tuple, optional
[description]. Defaults to ('LM', 6, None).
is_weighted : bool, optional
[description]. Defaults to False.
Notes
----------
"""
self.ngrid = grid.ngrid
self.rmin = grid.rmin
self.rmax = grid.rmax
self.rgrid = grid.rgrid
self.rgrid2 = np.square(self.rgrid)
self.solver = grid.solver
self.molecule = diatomic_data.molecule
self.mol_name = ''.join(
filter(lambda x: not x.isdigit(), self.molecule[0]))
self.jqnumbers = diatomic_data.jqnumbers
self.pars = sorted(diatomic_data.symmetry)
self.masses = diatomic_data.reduced_masses or diatomic_data.masses
self.niso = diatomic_data.niso
self.refj = diatomic_data.refj
self.refE = diatomic_data.refE
self.exp_data = diatomic_data.exp_data
self.exp_file = diatomic_data.exp_file
self.wavens_data = diatomic_data.wavens_data
self.channels = channels
self.couplings = couplings
self.nch = len(self.channels)
self.ncp = len(self.couplings)
# mapping arrays
self.Fy = grid.Fy
self.Gy = grid.Gy
# determine matrix size
self.msize = self.nch * self.ngrid
# store diagonal indices
self.dd = np.diag_indices(self.ngrid)
# which module to be used for eigen decomposition
self.eig_decomp = eig_decomp
# the lapack procedure syevr() is used by default
self.lapack_driver = lapack_driver
# which eigenvalues to use in the arpack procedure
self.arpack_which = arpack_options[0]
# in the arpack procedure k=6 by default
self.arpack_k = arpack_options[1]
# if sigma is set shift-invert mode is requested
self.arpack_sigma = arpack_options[2]
# map the diagonalization procedures
self.diagonilize = {
'lapack': self._lapack_eig_decomposition,
'arpack': self._arpack_eig_decomposition
}
# Watson's weighting method
self.is_weighted = is_weighted
# max number of fit parameters
nmax_params = 200
# max number of computed levels
self.nmax_levels = 10000
self.kin_enr = KineticEnergy(self)
self.pot_enr = PotentialEnergy(self)
self.interact = Interaction(self)
# matrix with the spline S functions
self.sk_grid = np.zeros((self.msize, nmax_params))
# used in the fit for initilization of the parameters
channel_pars = diatomic_data.get_channel_parameters(channels)
coupling_pars = diatomic_data.get_coupling_parameters(couplings)
self.ypar_init = np.concatenate((channel_pars[0], coupling_pars[0]))
self.yfixed_init = np.concatenate((channel_pars[1], coupling_pars[1]))
# get other parameters and functions used in the fit
self.unq_channel_inds = diatomic_data.unq_chind
self.edit_channel_parameters = diatomic_data.edit_channel_parameters
self.edit_coupling_parameters = diatomic_data.edit_coupling_parameters
self.interpolate_functions(self.ypar_init)
def interpolate_functions(self, ypar):
"""Interpolate coupling and channel functions on the grid of points
Parameters
----------
ypar : numpy.ndarray
An array containing the corresponding parameters
"""
self.pot_enr.calculate_channels_on_grid(ypar=ypar)
self.ugrid = self.pot_enr.ugrid
# self.fgrid = np.zeros(self.ncp * self.ngrid)
if self.ncp != 0:
self.interact.calculate_couplings_on_grid(ypar=ypar)
self.fgrid = self.interact.fgrid
def build_hamiltonian(self, iso, par, jrotn, tmatrix):
"""Build the Hamiltonian matrix by summing the block diagonal matrix
of the kinetic energy the diagonal matrix of potential energy
and the coupling matrix
Parameters
----------
iso : bool
isotopologue number
par : int
symmetry label
jrotn : float
rotational quantum number
tmatrix : numpy.ndarray
the kinetic energy matrix
Returns
----------
hmatrix : numpy.ndarray
the Hamiltonian matrix
"""
vmatrix = self.pot_enr.calculate_potential_energy(jrotn, par, iso)
imatrix = self.interact.calculate_coupling_matrix(jrotn, par, iso)
hmatrix = tmatrix + vmatrix + imatrix
# evalues, evecs = self.diagonilize[self.eig_decomp](hmatrix)
return hmatrix
def _lapack_eig_decomposition(self, hmatrix):
"""Diagonilizes the Hamiltonian matrix with the scipy eigh() procedure
from the LAPACK package
Parameters
----------
hmatrix : numpy.ndarray
The Hamiltonian matrix
Returns
----------
evalues : numpy.ndarray
the computed eigenvalues
evecs : numpy.ndarray
the computed eigenvectors
Notes
----------
"""
subset_value, subset_index = None, None
if self.energy_subset_index:
emini, emaxi = self.energy_subset_index[0]
emaxi = self.energy_subset_index[-1]
subset_index = (emini, emaxi-1)
elif self.energy_subset_value:
eminv = self.energy_subset_value[0] / C_hartree
emaxv = self.energy_subset_value[-1] / C_hartree
subset_value = (eminv, emaxv)
else:
subset_value = (-np.inf, np.inf)
evalues, evecs = sp.linalg.eigh(
hmatrix,
eigvals_only=False,
overwrite_a=True,
lower=False,
subset_by_index=subset_index,
subset_by_value=subset_value,
driver=self.lapack_driver.lower(),
check_finite=False
)
return evalues, evecs
def _arpack_eig_decomposition(self, hmatrix):
"""Diagonilizes the Hamiltonian matrix with the scipy sparse eigsh() procedure
from the ARPACK package
Parameters
----------
hmatrix : numpy.ndarray
the Hamiltonian matrix
Returns
----------
evalues : numpy.ndarray
the coumputed eiegenvalues
evecs : numpy.ndarray
the computed eigenvectors
Notes
----------
ARAPCK procedure is the most efficient and suitable for finding
the largest eigenvalues of a sparse matrix. If the smallest
eigenvalues are desired then it is recommended to use a
shift-invert mode. It transforms the eigenvalue problem to
an eqivalent problem with shifted eigenvalues in which the
small eigenvalues u become the large eigenvalues v: v = 1 / u
"""
evalues, evecs = sp.sparse.linalg.eigsh(
hmatrix,
k=self.arpack_k,
which=self.arpack_which.upper(),
sigma=self.arpack_sigma,
return_eigenvectors=True
)
return evalues, evecs
def _arange_levels(self, jrotn, par, iso, evalues, evecs, shiftE=[0.0]):
"""Given the good quantum numbers, labels and the computed eigenvalues this
function will find and compute additional quantum numbers and labels
and will arange the full information for each level in one matrix
Parameters
----------
jrotn : float
the rotational quantum number
par : int
symmetry label, 0 for f- and 1 for e-symmetry
iso : int
the isotopolgue number
evalues : numpy.ndarray
the array with the computed eigenvalues
evecs : numpy.ndarray
the array with the computed eigenvectors
shiftE : list, optional
the value of the energy to shift the levels. Defaults to [0.0].
Returns
----------
levels : numpy.ndarray
an array with all levels i.e. energy+quantum numbers and labels
"""
ids = np.arange(1, evalues.shape[0]+1)
if self.refj and jrotn == self.jqnumbers[0]:
shiftE[0] = evalues[0]
elif self.refE:
shiftE[0] = self.refE
evalues_shifted = (evalues - shiftE[0]) * C_hartree
ccoefs = self._get_coupling_coefficients(evecs)
states = np.argmax(ccoefs, axis=1) + 1
vibnums = self._assign_vibrational_number(states)
lambdas = self._get_lambda_values(states)
omegas = self._get_omega_values(states)
levels = np.column_stack((
ids, evalues_shifted,
np.full((evalues_shifted.shape[0],), jrotn),
np.full((evalues_shifted.shape[0],), par),
np.full((evalues_shifted.shape[0],), iso),
ccoefs, states, vibnums, lambdas, omegas
))
return levels
def save_predicted_energies(self):
"""Stores the complete list of computed energy levels in external file
"""
nrows = self.calc_data.shape[1]
cols = [0, -3] + list(range(1, nrows-3)) + [-2, -1]
calc_data_out = self.calc_data[:, cols]
coef = 'coef'
coef_labels = ''.join(
map(lambda x: f'{coef+str(x):^10}', range(1, self.nch+1)))
labels = ('n', 'v', 'Ecalc', 'J', 'symmetry', ' marker',
coef_labels, 'state', 'lambda', 'omega')
header = (f'{labels[0]:^10}{labels[1]:<9}{labels[2]:<15}{labels[3]:<4}'
f'{labels[4]:<7}{labels[5]:<7}{labels[6]}{labels[7]:<7}'
f'{labels[8]:<9}{labels[9]}')
fmt = ['%7.1d', '%5.1d', '%16.6f', '%7.1f', '%5.1d', '%7.1d']
fmt += self.nch*['%9.3f'] + 2*['%6.1d'] + ['%8.1f']
file_name, file_ext = _splitext(self.energy_out_file)
output_file = file_name + '_predicted' + file_ext
np.savetxt(output_file, calc_data_out, header=header, fmt=fmt)
def get_predicted_data(self):
"""Gets the complete list of computed energy levels
Returns
----------
calc_data: numpy.ndarray
the computed energy levels
"""
return self.calc_data
def get_hamiltonian(self):
"""Gets the Hamiltonian matrix for the last computed J, symmetry
and isotopologue
Returns
----------
hamiltonian : numpy.ndarray
the computed Hamiltonian matrix
"""
# will get the matrix for the last comupted J, e/f-level and isotope
return self.hamiltonian
| [
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
355,
599,
198,
11748,
5911,
198,
6738,
10688,
1330,
19862,
17034,
355,
4808,
31166,
17034,
198,
6738,
629,
541,
88,
13,
3849,
16104,
378,
1330,
7070,
291,
26568,
500,
355,
4808,
... | 2.109559 | 5,869 |
from django.urls import path
from . import views
urlpatterns = [
path('', views.ProjectView.as_view(), name='projects'),
path(r'<pk>/', views.ProjectDetailView.as_view(), name='project-detail'),
path(r'<pk>/tasks/', views.TaskProjectView.as_view(), name='project-tasks'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
5009,
13,
16775,
7680,
13,
292,
62,
1177,
22784,
1438,
11639,
42068,
33809,
198... | 2.642202 | 109 |
from __future__ import unicode_literals
from django.db import models
from simple_history import register
from simple_history.models import HistoricalRecords
from simple_history.tests.custom_user.models import CustomUser
register(ExternalModelRegistered, app="simple_history.tests", manager_name="histories")
class Poll(models.Model):
"""Test model for same-named historical models
This model intentionally conflicts with the 'Polls' model in 'tests.models'.
"""
history = HistoricalRecords(user_related_name="+")
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
6738,
2829,
62,
23569,
1330,
7881,
198,
6738,
2829,
62,
23569,
13,
27530,
1330,
23121,
6690,
3669,
198,
6738,
2829... | 3.668919 | 148 |
# project/server/models/user.py
import jwt
import datetime
from project.server import app, db, bcrypt
from .blacklistToken import BlacklistToken
from .favourite import Favourite
from .tmdb import Tmdb
from .show import Show
class User(db.Model):
""" User Model for storing user related details """
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(255), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
registered_on = db.Column(db.DateTime, nullable=False)
favourites = db.relationship(lambda: Favourite, cascade="all, delete-orphan", backref='user')
def encode_auth_token(self, user_id):
"""
Generates the Auth Token
:return: string
"""
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=5),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
app.config.get('SECRET_KEY'),
algorithm='HS256'
)
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
"""
Validates the auth token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)
if is_blacklisted_token:
return 'Token blacklisted. Please log in again.'
else:
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
def has_favourite(self, tmdb_id):
"""
Checks if user has already the show in his favs
:param tmdb_id:
"""
is_already_in_favs = len([favourite for favourite in self.favourites if favourite.tmdb_id == int(tmdb_id)]) > 0
return is_already_in_favs
def add_favourite(self, tmdb_id):
"""
Adds a favourite show to the user
:param tmdb_id:
"""
if (not self.has_favourite(tmdb_id)):
favourite = Favourite(user_id = int(self.id), tmdb_id = int(tmdb_id))
self.favourites.append(favourite)
else:
raise Exception('Cannot remove an inexistant favourite')
def remove_favourite(self, tmdb_id):
"""
Removes a favourite show to the user
:param tmdb_id:
"""
if (self.has_favourite(tmdb_id)):
self.favourites = [favourite for favourite in self.favourites if favourite.tmdb_id != int(tmdb_id) ]
else:
raise Exception('Cannot remove an inexistant favourite')
| [
2,
1628,
14,
15388,
14,
27530,
14,
7220,
13,
9078,
198,
198,
11748,
474,
46569,
198,
11748,
4818,
8079,
198,
198,
6738,
1628,
13,
15388,
1330,
598,
11,
20613,
11,
275,
29609,
198,
6738,
764,
13424,
4868,
30642,
1330,
2619,
4868,
30642... | 2.166062 | 1,379 |