hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1714205d11e20956a01b4c33e37e04957551b563 | 2,642 | py | Python | distance_labelling_theorem.py | lucernae/magic-graph-labeling | 562fa2136f90cafc6967df317f19cbda9e6d46fa | [
"MIT"
] | null | null | null | distance_labelling_theorem.py | lucernae/magic-graph-labeling | 562fa2136f90cafc6967df317f19cbda9e6d46fa | [
"MIT"
] | null | null | null | distance_labelling_theorem.py | lucernae/magic-graph-labeling | 562fa2136f90cafc6967df317f19cbda9e6d46fa | [
"MIT"
] | null | null | null | # coding=utf-8
"""
This file will contains criteria for a graph to have distance magic labelling.
"""
from utils import get_logger
logger = get_logger(__name__)
def get_graph_param(graph):
"""
Return graph parameter of r regular constant and n vertices constant
:param graph:
:return: (r, n)
"""
return graph.degree(0), len(graph.vertices())
def criteria_1(graph):
"""
For n even, an r-regular distance magic graph with n vertices exists
if and only if 2 ≤ r ≤ n − 2, r ≡ 0 (mod 2) and
either n ≡ 0 (mod 4) or n≡r+2≡2 (mod 4).
:param graph:
:return:
"""
r, n = get_graph_param(graph)
if (
n % 2 == 0 and
2 <= r <= n - 2 and r % 2 == 0 and
(n % 4 == 0 or (n % 4 == 2 and r % 4 == 0))):
logger.info('Criteria 1')
return True
return False
def criteria_2(graph):
"""
There exists a 4-regular distance magic graph of odd order n
if and only if n ≥ 17.
:param graph:
:return:
"""
r, n = get_graph_param(graph)
if (
r == 4 and
n % 2 == 1 and
n >= 17):
logger.info('Criteria 2')
return True
return False
def criteria_3(graph):
"""
There exists a 6-regular distance magic graph of odd order n
if and only if n = 9 or n ≥ 13.
:param graph:
:return:
"""
r, n = get_graph_param(graph)
if (
r == 6 and
n % 2 == 1 and
(n == 9 or n >= 13)):
logger.info('Criteria 3')
return True
return False
def criteria_4(graph):
"""
There exists an 8-regular distance magic graph of odd order n
if and only if n ≥ 15.
:param graph:
:return:
"""
r, n = get_graph_param(graph)
if (
r == 8 and
n % 2 == 1 and
n >= 15):
logger.info('Criteria 4')
return True
return False
def criteria_5(graph):
"""
There exists a 10-regular distance magic graph of odd order n
if and only if n ≥ 15.
:param graph:
:return:
"""
r, n = get_graph_param(graph)
if (
r == 10 and
n % 2 == 1 and
n >= 15):
logger.info('Criteria 5')
return True
return False
def criteria_6(graph):
"""
There exists a 12-regular distance magic graph of odd order n
if and only if n ≥ 15.
:param graph:
:return:
"""
r, n = get_graph_param(graph)
if (
r == 12 and
n % 2 == 1 and
n >= 15):
logger.info('Criteria 6')
return True
return False
| 20.48062 | 78 | 0.521196 |
a8b0782dc6d6b0866621e97947d5410cceec3529 | 438 | py | Python | Session 4/Skeleton Files/Exercise 1.py | Descent098/schulich-ignite-winter-2021 | 2003966651f8afc2d75657e1728e8dd8a13b88a2 | [
"MIT"
] | 1 | 2021-02-16T19:44:28.000Z | 2021-02-16T19:44:28.000Z | Session 4/Skeleton Files/Exercise 1.py | Descent098/schulich-ignite-winter-2021 | 2003966651f8afc2d75657e1728e8dd8a13b88a2 | [
"MIT"
] | null | null | null | Session 4/Skeleton Files/Exercise 1.py | Descent098/schulich-ignite-winter-2021 | 2003966651f8afc2d75657e1728e8dd8a13b88a2 | [
"MIT"
] | 1 | 2021-02-22T22:57:19.000Z | 2021-02-22T22:57:19.000Z | """Exercise 1: Meower Power
Remember to fill out all the TODO's, you can quickly scan for them by pressing CTRL/CMD + F
"""
class Animal:
def __init__(self, name, sound):
self.name = name
self.sound = sound
def make_sound(self):
print(self.sound)
class Cat(Animal):
#TODO: finish class
...
def meow():
# TODO: Finish meow function
...
cat = Cat("Fluffy")
cat.make_sound()
| 18.25 | 91 | 0.605023 |
3fd64f5ba2206fbc3401492a82294d44553a5bec | 1,593 | py | Python | Files/Ch04/02_training_and_saving_weights complete.py | mahdinobar/Image-Recognition-inLearning | 940d5b88f70e65021ea63a4cc71fff9ce231f1a2 | [
"MIT"
] | null | null | null | Files/Ch04/02_training_and_saving_weights complete.py | mahdinobar/Image-Recognition-inLearning | 940d5b88f70e65021ea63a4cc71fff9ce231f1a2 | [
"MIT"
] | 7 | 2019-12-16T22:15:20.000Z | 2022-02-10T00:35:12.000Z | Ex_Files_Deep_Learning_Image_Recog_Upd/Exercise Files/Ch04/02_training_and_saving_weights complete.py | gilson27/learning_experiments | c329b1e0de8dd50afc70fb257540b569927156d1 | [
"MIT"
] | null | null | null | import keras
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from pathlib import Path
# Load data set
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data set to 0-to-1 range
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# Convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
# Create a model and add layers
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape=(32, 32, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation="relu"))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation="softmax"))
# Compile the model
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
# Train the model
model.fit(
x_train,
y_train,
batch_size=64,
epochs=30,
validation_data=(x_test, y_test),
shuffle=True
)
# Save neural network structure
model_structure = model.to_json()
f = Path("model_structure.json")
f.write_text(model_structure)
# Save neural network's trained weights
model.save_weights("model_weights.h5")
| 25.693548 | 89 | 0.728186 |
b866e1cc4ddc2283ddfd27a4599fddc2b20ea1de | 837 | py | Python | stumpy/jupyter.py | akubera/stumpy | 57539c811022eae437ab0850b20b0ff74bb4e041 | [
"BSD-3-Clause"
] | null | null | null | stumpy/jupyter.py | akubera/stumpy | 57539c811022eae437ab0850b20b0ff74bb4e041 | [
"BSD-3-Clause"
] | null | null | null | stumpy/jupyter.py | akubera/stumpy | 57539c811022eae437ab0850b20b0ff74bb4e041 | [
"BSD-3-Clause"
] | null | null | null | #
# stumpy/jupyter.py
#
"""
Methods for running in a jupyter notebook
"""
from .histogram import Histogram, HistogramRatioPair
from . import utils
import numpy as np
def enable_inline_ROOT_stuff():
from IPython.display import Javascript, display
display(Javascript("""
console.log("enabling JSRoot")
requirejs.config({
paths: {JSRoot: [
'/files/post_analysis/jsroot/scripts/JSRootCore',
'https://raw.githubusercontent.com/linev/jsroot/master/scripts/JSRootCore'
]}
});
require(["JSRoot"], function (ROOT) {
console.log("[stumpy::enable_inline_ROOT_stuff] Loaded JSRoot", ROOT);
})
"""))
def draw(obj, drawops='', size=None):
"""
"""
c = TCanvas()
if size is not None:
c.SetCanvasSize(size)
obj.Draw(drawops)
c.Draw()
return c
| 20.925 | 82 | 0.641577 |
2d83d75c52c07cd0a2e021015587cc5d5f5640ab | 13,866 | py | Python | dxm/lib/masking_api/api/column_metadata_api.py | experiortec/dxm-toolkit | b2ab6189e163c62fa8d7251cd533d2a36430d44a | [
"Apache-2.0"
] | 5 | 2018-08-23T15:47:05.000Z | 2022-01-19T23:38:18.000Z | dxm/lib/masking_api/api/column_metadata_api.py | experiortec/dxm-toolkit | b2ab6189e163c62fa8d7251cd533d2a36430d44a | [
"Apache-2.0"
] | 59 | 2018-10-15T10:37:00.000Z | 2022-03-22T20:49:25.000Z | dxm/lib/masking_api/api/column_metadata_api.py | experiortec/dxm-toolkit | b2ab6189e163c62fa8d7251cd533d2a36430d44a | [
"Apache-2.0"
] | 12 | 2019-03-08T19:59:13.000Z | 2021-12-16T03:28:04.000Z | # coding: utf-8
"""
Masking API
Schema for the Masking Engine API # noqa: E501
OpenAPI spec version: 5.1.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from dxm.lib.masking_api.api_client import ApiClient
class ColumnMetadataApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_all_column_metadata(self, **kwargs): # noqa: E501
"""Get all column metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_column_metadata(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int table_metadata_id: The ID of the table metadata to get all column metadata from
:param bool is_masked: Get only masked column metadata when this is true and only unmasked column metadata when this is false
:param int page_number: The page number for which to get column metadata. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:return: ColumnMetadataList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_column_metadata_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_column_metadata_with_http_info(**kwargs) # noqa: E501
return data
def get_all_column_metadata_with_http_info(self, **kwargs): # noqa: E501
"""Get all column metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_column_metadata_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int table_metadata_id: The ID of the table metadata to get all column metadata from
:param bool is_masked: Get only masked column metadata when this is true and only unmasked column metadata when this is false
:param int page_number: The page number for which to get column metadata. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:return: ColumnMetadataList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['table_metadata_id', 'is_masked', 'page_number', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_column_metadata" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'table_metadata_id' in params:
query_params.append(('table_metadata_id', params['table_metadata_id'])) # noqa: E501
if 'is_masked' in params:
query_params.append(('is_masked', params['is_masked'])) # noqa: E501
if 'page_number' in params:
query_params.append(('page_number', params['page_number'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/column-metadata', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ColumnMetadataList', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_column_metadata_by_id(self, column_metadata_id, **kwargs): # noqa: E501
"""Get column metadata by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_column_metadata_by_id(column_metadata_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int column_metadata_id: The ID of the column metadata to get (required)
:return: ColumnMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_column_metadata_by_id_with_http_info(column_metadata_id, **kwargs) # noqa: E501
else:
(data) = self.get_column_metadata_by_id_with_http_info(column_metadata_id, **kwargs) # noqa: E501
return data
def get_column_metadata_by_id_with_http_info(self, column_metadata_id, **kwargs): # noqa: E501
"""Get column metadata by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_column_metadata_by_id_with_http_info(column_metadata_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int column_metadata_id: The ID of the column metadata to get (required)
:return: ColumnMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['column_metadata_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_column_metadata_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'column_metadata_id' is set
if self.api_client.client_side_validation and ('column_metadata_id' not in params or
params['column_metadata_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `column_metadata_id` when calling `get_column_metadata_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'column_metadata_id' in params:
path_params['columnMetadataId'] = params['column_metadata_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/column-metadata/{columnMetadataId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ColumnMetadata', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_column_metadata(self, column_metadata_id, body, **kwargs): # noqa: E501
"""Update column metadata by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_column_metadata(column_metadata_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int column_metadata_id: The ID of the column metadata to update (required)
:param ColumnMetadata body: The updated column metadata (required)
:return: ColumnMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_column_metadata_with_http_info(column_metadata_id, body, **kwargs) # noqa: E501
else:
(data) = self.update_column_metadata_with_http_info(column_metadata_id, body, **kwargs) # noqa: E501
return data
def update_column_metadata_with_http_info(self, column_metadata_id, body, **kwargs): # noqa: E501
"""Update column metadata by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_column_metadata_with_http_info(column_metadata_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int column_metadata_id: The ID of the column metadata to update (required)
:param ColumnMetadata body: The updated column metadata (required)
:return: ColumnMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['column_metadata_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_column_metadata" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'column_metadata_id' is set
if self.api_client.client_side_validation and ('column_metadata_id' not in params or
params['column_metadata_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `column_metadata_id` when calling `update_column_metadata`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `update_column_metadata`") # noqa: E501
collection_formats = {}
path_params = {}
if 'column_metadata_id' in params:
path_params['columnMetadataId'] = params['column_metadata_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/column-metadata/{columnMetadataId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ColumnMetadata', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 42.018182 | 142 | 0.632338 |
ca1450f7b4b961b44f6f4a1b5ac0e9d1e43592f5 | 6,295 | py | Python | google/cloud/dialogflow_v2/services/conversation_profiles/pagers.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 171 | 2018-09-19T21:16:18.000Z | 2020-12-07T17:41:10.000Z | google/cloud/dialogflow_v2/services/conversation_profiles/pagers.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 150 | 2018-09-25T14:04:28.000Z | 2020-12-09T21:45:43.000Z | google/cloud/dialogflow_v2/services/conversation_profiles/pagers.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 75 | 2018-09-22T14:12:18.000Z | 2020-12-08T07:12:12.000Z | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Sequence,
Tuple,
Optional,
Iterator,
)
from google.cloud.dialogflow_v2.types import conversation_profile
class ListConversationProfilesPager:
"""A pager for iterating through ``list_conversation_profiles`` requests.
This class thinly wraps an initial
:class:`google.cloud.dialogflow_v2.types.ListConversationProfilesResponse` object, and
provides an ``__iter__`` method to iterate through its
``conversation_profiles`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListConversationProfiles`` requests and continue to iterate
through the ``conversation_profiles`` field on the
corresponding responses.
All the usual :class:`google.cloud.dialogflow_v2.types.ListConversationProfilesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., conversation_profile.ListConversationProfilesResponse],
request: conversation_profile.ListConversationProfilesRequest,
response: conversation_profile.ListConversationProfilesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.dialogflow_v2.types.ListConversationProfilesRequest):
The initial request object.
response (google.cloud.dialogflow_v2.types.ListConversationProfilesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = conversation_profile.ListConversationProfilesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[conversation_profile.ListConversationProfilesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[conversation_profile.ConversationProfile]:
for page in self.pages:
yield from page.conversation_profiles
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListConversationProfilesAsyncPager:
"""A pager for iterating through ``list_conversation_profiles`` requests.
This class thinly wraps an initial
:class:`google.cloud.dialogflow_v2.types.ListConversationProfilesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``conversation_profiles`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListConversationProfiles`` requests and continue to iterate
through the ``conversation_profiles`` field on the
corresponding responses.
All the usual :class:`google.cloud.dialogflow_v2.types.ListConversationProfilesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[
..., Awaitable[conversation_profile.ListConversationProfilesResponse]
],
request: conversation_profile.ListConversationProfilesRequest,
response: conversation_profile.ListConversationProfilesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.dialogflow_v2.types.ListConversationProfilesRequest):
The initial request object.
response (google.cloud.dialogflow_v2.types.ListConversationProfilesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = conversation_profile.ListConversationProfilesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(
self,
) -> AsyncIterator[conversation_profile.ListConversationProfilesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[conversation_profile.ConversationProfile]:
async def async_generator():
async for page in self.pages:
for response in page.conversation_profiles:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| 39.34375 | 92 | 0.695314 |
1400d052cb8bbdccd5d1848199e5f6417af1451c | 5,034 | py | Python | test/utils/test_conv_transpose.py | sbharadwajj/backpack | 5d484195735e29b1c08848906618f8ba2f339bec | [
"MIT"
] | null | null | null | test/utils/test_conv_transpose.py | sbharadwajj/backpack | 5d484195735e29b1c08848906618f8ba2f339bec | [
"MIT"
] | null | null | null | test/utils/test_conv_transpose.py | sbharadwajj/backpack | 5d484195735e29b1c08848906618f8ba2f339bec | [
"MIT"
] | null | null | null | """Test generalization of unfold to transpose convolutions."""
# TODO: @sbharadwajj: impose test suite structure
# TODO: @sbharadwajj: test with groups≠1
import torch
from backpack.utils.conv_transpose import unfold_by_conv_transpose
from ..automated_test import check_sizes_and_values
torch.manual_seed(0)
###############################################################################
# Perform a convolution with the unfolded input matrix #
###############################################################################
def conv_transpose_with_unfold(input, module):
"""Perform transpose convolution via matrix multiplication."""
assert module.bias is None
def get_output_shape(input, module):
return module(input).shape
N, C_in = input.shape[0], input.shape[1]
output_shape = get_output_shape(input, module)
C_out = output_shape[1]
spatial_out_size = output_shape[2:]
spatial_out_numel = spatial_out_size.numel()
kernel_size = module.kernel_size
kernel_size_numel = int(torch.prod(torch.Tensor(kernel_size)))
G = module.groups
weight_matrix = module.weight.data.reshape(
C_in // G, G, C_out // G, kernel_size_numel
)
unfolded_input = unfold_by_conv_transpose(input, module).reshape(
N, C_in // G, G, kernel_size_numel, spatial_out_numel
)
result = torch.einsum("cgox,ncgxh->ngoh", weight_matrix, unfolded_input)
return result.reshape(N, C_out, *spatial_out_size)
CONV_TRANSPOSE_2D_SETTINGS = [
[torch.nn.ConvTranspose2d(1, 1, kernel_size=2, bias=False), (1, 1, 3, 3)],
[torch.nn.ConvTranspose2d(1, 2, kernel_size=2, bias=False), (1, 1, 3, 3)],
[torch.nn.ConvTranspose2d(2, 1, kernel_size=2, bias=False), (1, 2, 3, 3)],
[torch.nn.ConvTranspose2d(2, 2, kernel_size=2, bias=False), (1, 2, 3, 3)],
[torch.nn.ConvTranspose2d(2, 3, kernel_size=2, bias=False), (3, 2, 11, 13)],
[
torch.nn.ConvTranspose2d(2, 3, kernel_size=2, padding=1, bias=False),
(3, 2, 11, 13),
],
[
torch.nn.ConvTranspose2d(2, 3, kernel_size=2, padding=1, stride=2, bias=False),
(3, 2, 11, 13),
],
[
torch.nn.ConvTranspose2d(
2, 3, kernel_size=2, padding=1, stride=2, dilation=2, bias=False
),
(3, 2, 11, 13),
],
]
def test_conv_transpose2d_with_unfold():
for module, in_shape in CONV_TRANSPOSE_2D_SETTINGS:
input = torch.rand(in_shape)
result_conv_transpose = module(input)
result_conv_transpose_by_unfold = conv_transpose_with_unfold(input, module)
check_sizes_and_values(result_conv_transpose, result_conv_transpose_by_unfold)
CONV_TRANSPOSE_1D_SETTINGS = [
[torch.nn.ConvTranspose1d(1, 1, kernel_size=2, bias=False), (1, 1, 3)],
[torch.nn.ConvTranspose1d(1, 2, kernel_size=2, bias=False), (1, 1, 3)],
[torch.nn.ConvTranspose1d(2, 1, kernel_size=2, bias=False), (1, 2, 3)],
[torch.nn.ConvTranspose1d(2, 2, kernel_size=2, bias=False), (1, 2, 3)],
[torch.nn.ConvTranspose1d(2, 3, kernel_size=2, bias=False), (3, 2, 11)],
[torch.nn.ConvTranspose1d(2, 3, kernel_size=2, padding=1, bias=False), (3, 2, 11)],
[
torch.nn.ConvTranspose1d(2, 3, kernel_size=2, padding=1, stride=2, bias=False),
(3, 2, 11),
],
[
torch.nn.ConvTranspose1d(
2, 3, kernel_size=2, padding=1, stride=2, dilation=2, bias=False
),
(3, 2, 11),
],
]
def test_conv_transpose1d_with_unfold():
for module, in_shape in CONV_TRANSPOSE_1D_SETTINGS:
input = torch.rand(in_shape)
result_conv_transpose = module(input)
result_conv_transpose_by_unfold = conv_transpose_with_unfold(input, module)
check_sizes_and_values(result_conv_transpose, result_conv_transpose_by_unfold)
CONV_TRANSPOSE_3D_SETTINGS = [
[torch.nn.ConvTranspose3d(1, 1, kernel_size=2, bias=False), (1, 1, 3, 3, 3)],
[torch.nn.ConvTranspose3d(1, 2, kernel_size=2, bias=False), (1, 1, 3, 3, 3)],
[torch.nn.ConvTranspose3d(2, 1, kernel_size=2, bias=False), (1, 2, 3, 3, 3)],
[torch.nn.ConvTranspose3d(2, 2, kernel_size=2, bias=False), (1, 2, 3, 3, 3)],
[torch.nn.ConvTranspose3d(2, 3, kernel_size=2, bias=False), (3, 2, 11, 13, 17)],
[
torch.nn.ConvTranspose3d(2, 3, kernel_size=2, padding=1, bias=False),
(3, 2, 11, 13, 17),
],
[
torch.nn.ConvTranspose3d(2, 3, kernel_size=2, padding=1, stride=2, bias=False),
(3, 2, 11, 13, 17),
],
[
torch.nn.ConvTranspose3d(
2, 3, kernel_size=2, padding=1, stride=2, dilation=2, bias=False
),
(3, 2, 11, 13, 17),
],
]
def test_conv_transpose3d_with_unfold():
for module, in_shape in CONV_TRANSPOSE_3D_SETTINGS:
input = torch.rand(in_shape)
result_conv_transpose = module(input)
result_conv_transpose_by_unfold = conv_transpose_with_unfold(input, module)
check_sizes_and_values(result_conv_transpose, result_conv_transpose_by_unfold)
| 34.958333 | 87 | 0.638657 |
1b52a9fd38f69a5bd8dec623ce9d6e1ef57a6e46 | 7,835 | py | Python | sdk/python/pulumi_azure_nextgen/cdn/afd_endpoint.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/cdn/afd_endpoint.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/cdn/afd_endpoint.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
__all__ = ['AFDEndpoint']
class AFDEndpoint(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
enabled_state: Optional[pulumi.Input[Union[str, 'EnabledState']]] = None,
endpoint_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
origin_response_timeout_seconds: Optional[pulumi.Input[int]] = None,
profile_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
CDN endpoint is the entity within a CDN profile containing configuration information such as origin, protocol, content caching and delivery behavior. The AzureFrontDoor endpoint uses the URL format <endpointname>.azureedge.net.
API Version: 2020-09-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'EnabledState']] enabled_state: Whether to enable use of this rule. Permitted values are 'Enabled' or 'Disabled'
:param pulumi.Input[str] endpoint_name: Name of the endpoint under the profile which is unique globally.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[int] origin_response_timeout_seconds: Send and receive timeout on forwarding request to the origin. When timeout is reached, the request fails and returns.
:param pulumi.Input[str] profile_name: Name of the CDN profile which is unique within the resource group.
:param pulumi.Input[str] resource_group_name: Name of the Resource group within the Azure subscription.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['enabled_state'] = enabled_state
__props__['endpoint_name'] = endpoint_name
__props__['location'] = location
__props__['origin_response_timeout_seconds'] = origin_response_timeout_seconds
if profile_name is None and not opts.urn:
raise TypeError("Missing required property 'profile_name'")
__props__['profile_name'] = profile_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['deployment_status'] = None
__props__['host_name'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:cdn/latest:AFDEndpoint"), pulumi.Alias(type_="azure-nextgen:cdn/v20200901:AFDEndpoint")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(AFDEndpoint, __self__).__init__(
'azure-nextgen:cdn:AFDEndpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'AFDEndpoint':
"""
Get an existing AFDEndpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return AFDEndpoint(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="deploymentStatus")
def deployment_status(self) -> pulumi.Output[str]:
return pulumi.get(self, "deployment_status")
@property
@pulumi.getter(name="enabledState")
def enabled_state(self) -> pulumi.Output[Optional[str]]:
"""
Whether to enable use of this rule. Permitted values are 'Enabled' or 'Disabled'
"""
return pulumi.get(self, "enabled_state")
@property
@pulumi.getter(name="hostName")
def host_name(self) -> pulumi.Output[str]:
"""
The host name of the endpoint structured as {endpointName}.{DNSZone}, e.g. contoso.azureedge.net
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="originResponseTimeoutSeconds")
def origin_response_timeout_seconds(self) -> pulumi.Output[Optional[int]]:
"""
Send and receive timeout on forwarding request to the origin. When timeout is reached, the request fails and returns.
"""
return pulumi.get(self, "origin_response_timeout_seconds")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning status
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Read only system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 41.898396 | 235 | 0.647224 |
92eca6bb896ec3f12fcc0f19178a3c3f31088a2e | 1,568 | py | Python | examples/block_tets_blender.py | franaudo/fea | e164256bac179116520d19d6fc54c98de0610896 | [
"MIT"
] | null | null | null | examples/block_tets_blender.py | franaudo/fea | e164256bac179116520d19d6fc54c98de0610896 | [
"MIT"
] | null | null | null | examples/block_tets_blender.py | franaudo/fea | e164256bac179116520d19d6fc54c98de0610896 | [
"MIT"
] | null | null | null | from compas_fea.cad import blender
from compas_fea.structure import ElasticIsotropic
from compas_fea.structure import ElementProperties as Properties
from compas_fea.structure import GeneralStep
from compas_fea.structure import PinnedDisplacement
from compas_fea.structure import PointLoad
from compas_fea.structure import SolidSection
from compas_fea.structure import Structure
from compas_blender.utilities import get_object_by_name
# Author(s): Andrew Liew (github.com/andrewliew)
# Structure
mdl = Structure(name='block_tets', path='C:/Temp/')
# Tetrahedrons
blender.add_tets_from_mesh(mdl, name='elset_tets', mesh=get_object_by_name('mesh'))
# Sets
blender.add_nsets_from_layers(mdl, layers=['nset_base', 'nset_top'])
# Materials
mdl.add(ElasticIsotropic(name='mat_elastic', E=10*10**9, v=0.3, p=1))
# Sections
mdl.add(SolidSection(name='sec_solid'))
# Properties
mdl.add(Properties(name='ep_tets', material='mat_elastic', section='sec_solid', elset='elset_tets'))
# Displacementss
mdl.add(PinnedDisplacement(name='disp_pinned', nodes='nset_base'))
# Loads
mdl.add(PointLoad(name='load_top', nodes='nset_top', y=100, z=100))
# Steps
mdl.add([
GeneralStep(name='step_bc', displacements=['disp_pinned']),
GeneralStep(name='step_load', loads=['load_top']),
])
mdl.steps_order = ['step_bc', 'step_load']
# Summary
mdl.summary()
# Run
mdl.analyse_and_extract(software='abaqus', fields=['u'])
# blender.plot_data(mdl, step='step_load', field='um')
# blender.plot_voxels(mdl, step='step_load', field='um', vdx=0.05)
mdl.save_to_obj()
| 23.058824 | 100 | 0.762755 |
d84d2eb662bf916038074299b30c61ad3eb84cc0 | 757 | py | Python | uncommon/uncommon.py | MSAdministrator/uncommon | 2c8e12e1f93cd345331fc8833fa2f8f828f1dff5 | [
"MIT"
] | 1 | 2021-01-10T06:58:01.000Z | 2021-01-10T06:58:01.000Z | uncommon/uncommon.py | MSAdministrator/uncommon | 2c8e12e1f93cd345331fc8833fa2f8f828f1dff5 | [
"MIT"
] | null | null | null | uncommon/uncommon.py | MSAdministrator/uncommon | 2c8e12e1f93cd345331fc8833fa2f8f828f1dff5 | [
"MIT"
] | null | null | null | import os, random
class Uncommon:
__DATA_PATH = os.path.abspath(
os.path.join(
os.path.dirname(__file__), 'data', 'words' + '.txt'
)
)
__word_list = []
def __init__(self, separator='-', count=3):
self.count = count
self.separator = separator
def __load_words(self):
with open(self.__DATA_PATH, 'r') as file:
for item in file.readlines():
self.__word_list.append(item.strip())
def get(self):
return_list = []
if not self.__word_list:
self.__load_words()
for i in range(self.count):
return_list.append(random.choice(self.__word_list))
return f'{self.separator}'.join([x for x in return_list])
| 26.103448 | 65 | 0.574637 |
03f54786bbd38e694ece36d5d81861dd3cb563bb | 579 | py | Python | notebook/re_complie.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 174 | 2018-05-30T21:14:50.000Z | 2022-03-25T07:59:37.000Z | notebook/re_complie.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 5 | 2019-08-10T03:22:02.000Z | 2021-07-12T20:31:17.000Z | notebook/re_complie.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 53 | 2018-04-27T05:26:35.000Z | 2022-03-25T07:59:37.000Z | import re
s = 'aaa@xxx.com, bbb@yyy.com, ccc@zzz.net'
m = re.match(r'([a-z]+)@([a-z]+)\.com', s)
print(m)
# <re.Match object; span=(0, 11), match='aaa@xxx.com'>
result = re.sub(r'([a-z]+)@([a-z]+)\.com', 'new-address', s)
print(result)
# new-address, new-address, ccc@zzz.net
p = re.compile(r'([a-z]+)@([a-z]+)\.com')
print(p)
# re.compile('([a-z]+)@([a-z]+)\\.com')
print(type(p))
# <class 're.Pattern'>
m = p.match(s)
print(m)
# <re.Match object; span=(0, 11), match='aaa@xxx.com'>
result = p.sub('new-address', s)
print(result)
# new-address, new-address, ccc@zzz.net
| 20.678571 | 60 | 0.580311 |
8c9280d1c3543615eb05e4e88e5e26a20d7d23b7 | 1,240 | py | Python | ant/custom/auto_load/__init__.py | lankors/yunAnt | 2f3ec4e2b7c37a3ed82c8ed836565eea655bfd21 | [
"Apache-2.0"
] | 3 | 2017-08-23T02:55:18.000Z | 2019-12-27T03:43:46.000Z | ant/custom/auto_load/__init__.py | lankors/yunAnt | 2f3ec4e2b7c37a3ed82c8ed836565eea655bfd21 | [
"Apache-2.0"
] | null | null | null | ant/custom/auto_load/__init__.py | lankors/yunAnt | 2f3ec4e2b7c37a3ed82c8ed836565eea655bfd21 | [
"Apache-2.0"
] | 1 | 2020-10-28T10:24:58.000Z | 2020-10-28T10:24:58.000Z | #!/usr/bin/python
#coding=utf-8
"""
auth:wuqichao
mail:wqc2008@gmail.com
createtime:2014-7-10下午2:24:30
usege:
"""
__all__ = ['auto_load']
class auto_load(object):
'''
自动引入模块类
'''
def __init__(self,class_path):
self.class_path = class_path
def import_and_get_mod(self,str, parent_mod=None):
mods = str.split('.')
child_mod_str = '.'.join(mods[1:])
if parent_mod is None:
if len(mods) > 1:
#First time this function is called; import the module
#__import__() will only return the top level module
return self.import_and_get_mod(child_mod_str, __import__(str))
else:
return __import__(str)
else:
mod = getattr(parent_mod, mods[0])
if len(mods) > 1:
#We're not yet at the intended module; drill down
return self.import_and_get_mod(child_mod_str, mod)
else:
return mod
def get_mod(self):
'''
动态实例化工厂类
'''
if self.class_path:
#组装引入字符串
name = self.class_path
#引入modle
return self.import_and_get_mod(name)
| 21.016949 | 78 | 0.545161 |
8a5c99466963050c7fed025223b4b6cd0274d909 | 496 | py | Python | happier/util.py | williamhogman/happier | 54420e37e20059909fb0d980541af0fa79480337 | [
"MIT"
] | 5 | 2020-06-22T16:13:08.000Z | 2020-06-24T08:43:23.000Z | happier/util.py | williamhogman/happier | 54420e37e20059909fb0d980541af0fa79480337 | [
"MIT"
] | null | null | null | happier/util.py | williamhogman/happier | 54420e37e20059909fb0d980541af0fa79480337 | [
"MIT"
] | null | null | null | import os
import os.path
ROOTS = {
"pyproject.toml",
"requirements.txt",
"Pipfile",
}
def find_root(cwd: str = None, max_jumps: int = 7):
if cwd is None:
return os.getcwd()
if max_jumps == 0:
return None
files = {f for f in os.listdir(cwd) if os.path.isfile(os.path.join(cwd, f))}
roots_found = files.intersection(ROOTS)
if len(roots_found) > 0:
return cwd
else:
return find_root(os.path.abspath(os.path.join(cwd, "..")))
| 19.84 | 80 | 0.600806 |
55685c3dffcba3e6eba6fd2f54170d81c9849737 | 3,930 | py | Python | Python3/wda_functions.py | ChengzijunAixiaoli/POTD | 958e81599a8a4d4396685f3a541cf2d8aa382c16 | [
"Apache-2.0"
] | 3 | 2021-03-20T12:30:15.000Z | 2022-03-01T13:30:33.000Z | Python3/wda_functions.py | ChengzijunAixiaoli/POTD | 958e81599a8a4d4396685f3a541cf2d8aa382c16 | [
"Apache-2.0"
] | null | null | null | Python3/wda_functions.py | ChengzijunAixiaoli/POTD | 958e81599a8a4d4396685f3a541cf2d8aa382c16 | [
"Apache-2.0"
] | 2 | 2021-03-20T12:30:59.000Z | 2021-06-03T16:41:36.000Z | from scipy import linalg
import numpy as np
def pair_tensor(T,x1,x2):
temp=x1[:,None] - x2
C=temp*T[:,:,None]
ans = np.tensordot(C, temp, ([0,1],[0,1]))
return ans
def dist(x1, x2):
""" Compute squared euclidean distance between samples (autograd)
"""
x1p2 = np.sum(np.square(x1), 1)
x2p2 = np.sum(np.square(x2), 1)
return x1p2.reshape((-1, 1)) + x2p2.reshape((1, -1)) - 2 * np.dot(x1, x2.T)
def sinkhorn(w1, w2, M, reg, k):
"""Sinkhorn algorithm with fixed number of iteration (autograd)
"""
## note: lambda=1/reg in paper
K = np.exp(-M/reg )
ui = np.ones((M.shape[0],))
vi = np.ones((M.shape[1],))
for i in range(k):
vi = w2 / (np.dot(K.T, ui))
ui = w1 / (np.dot(K, vi))
G = ui.reshape((M.shape[0], 1)) * K * vi.reshape((1, M.shape[1]))
return G
def split_classes(X, y):
"""split samples in X by classes in y
"""
lstsclass = np.unique(y)
return [X[y == i, :].astype(np.float32) for i in lstsclass]
def wda_eig(X, y, p, reg, P0, Breg=0, k=10, maxiter=100, verbose=0):
"""
The function solves the ratio trace formulation of WDA.
Parameters
----------
X : ndarray, shape (n, d)
Training samples.
y : ndarray, shape (n,)
Labels for training samples.
p : int
Size of dimensionality reduction.
reg : float
Wasserstein regularization term >0 (entropic regularization)
P0 : ndarray, shape (d, p)
Initial subspace for projection.
Breg: float, optional, default set to 0
Regularization for the B matrix in the denominator to make B positive definite
k: int, optional, default set to 10
Number of Sinkhorn iterations
maxiter: int, optional, default set to 100
Number of maximum number of iterations
Returns
-------
P : ndarray, shape (d, p)
Optimal transportation matrix for the given parameters
proj : callable
Projection function including mean centering
obj: list
List of angles s_k to measure the distance between consecutive subspaces
"""
mx = np.mean(X)
X -= mx.reshape((1, -1))
# data split between classes
d = X.shape[1]
xc = split_classes(X, y)
# compute uniform weighs
wc = [np.ones((x.shape[0]), dtype=np.float32) / x.shape[0] for x in xc]
P=P0
obj = []
for it in range(maxiter):
loss_b = np.zeros((d,d))
loss_w = np.zeros((d,d))
for i, xi in enumerate(xc):
pxi = np.dot(xi, P)
for j, xj in enumerate(xc[i:]):
pxj = np.dot(xj, P)
M = dist(pxi, pxj)
G = sinkhorn(wc[i], wc[j + i], M, reg, k)
if j==0:
loss_w += pair_tensor(G, xi, xj)
else:
loss_b += pair_tensor(G, xi, xj)
if Breg==0:
w, V = linalg.eig((loss_b+loss_b.T)/2, (loss_w+loss_w.T)/2)
else:
w, V = linalg.eig((loss_b+loss_b.T)/2, (loss_w+loss_w.T)/2+Breg*np.eye(d))
w=np.real(w)
V=np.real(V)
#idx = np.argsort(w.real)
#Pnew = np.real(V[:, 0:p])
idx = np.argsort(-w)
Pnew = V[:, idx[0:p]]
Pinv = np.linalg.inv(P.T.dot(P))
Pninv = np.linalg.inv(Pnew.T.dot(Pnew))
## s_k = ||sin\theta(P_{k-1}, P_k)||_2
angle = np.linalg.norm((P.dot(Pinv.dot(P.T))-Pnew.dot(Pninv.dot(Pnew.T))),2)
obj.append(angle)
if (verbose==1):
print("Iter: % 2d, angle: % 2.8f" %(it, angle))
P=Pnew
if (abs(angle)< 1e-3):
break
Popt = P
def proj(X):
return (X - mx.reshape((1, -1))).dot(Popt)
return Popt, proj, obj | 30.703125 | 87 | 0.516285 |
8d180f1b3f316774f09a7bdf6987d694e1a7fc89 | 7,532 | py | Python | edb/schema/indexes.py | haikyuu/edgedb | 73125882a4eff337692ad10af4bfdf15eef341ab | [
"Apache-2.0"
] | null | null | null | edb/schema/indexes.py | haikyuu/edgedb | 73125882a4eff337692ad10af4bfdf15eef341ab | [
"Apache-2.0"
] | null | null | null | edb/schema/indexes.py | haikyuu/edgedb | 73125882a4eff337692ad10af4bfdf15eef341ab | [
"Apache-2.0"
] | null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
from edb import edgeql
from edb import errors
from edb.edgeql import ast as qlast
from . import abc as s_abc
from . import annos as s_anno
from . import delta as sd
from . import expr as s_expr
from . import inheriting
from . import name as sn
from . import objects as so
from . import referencing
class Index(referencing.ReferencedInheritingObject, s_anno.AnnotationSubject):
subject = so.SchemaField(so.Object)
expr = so.SchemaField(
s_expr.Expression, coerce=True, compcoef=0.909)
# Text representation of the original expression that's been
# parsed and re-generated, but not normalized.
orig_expr = so.SchemaField(
str, default=None, coerce=True, allow_ddl_set=True,
ephemeral=True)
def __repr__(self):
cls = self.__class__
return '<{}.{} {!r} at 0x{:x}>'.format(
cls.__module__, cls.__name__, self.id, id(self))
__str__ = __repr__
def get_displayname(self, schema) -> str:
expr = self.get_expr(schema)
return expr.origtext
class IndexableSubject(inheriting.InheritingObject):
indexes_refs = so.RefDict(
attr='indexes',
ref_cls=Index)
indexes = so.SchemaField(
so.ObjectIndexByUnqualifiedName,
inheritable=False, ephemeral=True, coerce=True, compcoef=0.909,
default=so.ObjectIndexByUnqualifiedName)
def add_index(self, schema, index):
return self.add_classref(schema, 'indexes', index)
class IndexSourceCommandContext:
pass
class IndexSourceCommand(inheriting.InheritingObjectCommand):
pass
class IndexCommandContext(sd.ObjectCommandContext,
s_anno.AnnotationSubjectCommandContext):
pass
class IndexCommand(referencing.ReferencedInheritingObjectCommand,
schema_metaclass=Index,
context_class=IndexCommandContext,
referrer_context_class=IndexSourceCommandContext):
@classmethod
def _classname_from_ast(cls, schema, astnode, context):
referrer_ctx = cls.get_referrer_context(context)
if referrer_ctx is not None:
referrer_name = referrer_ctx.op.classname
shortname = sn.Name(
module='__',
name=astnode.name.name,
)
quals = cls._classname_quals_from_ast(
schema, astnode, shortname, referrer_name, context)
name = sn.Name(
module=referrer_name.module,
name=sn.get_specialized_name(
shortname,
referrer_name,
*quals,
),
)
else:
name = super()._classname_from_ast(schema, astnode, context)
return name
@classmethod
def _classname_quals_from_ast(cls, schema, astnode, base_name,
referrer_name, context):
expr_text = cls.get_orig_expr_text(schema, astnode, 'expr')
if expr_text is None:
# if not, then use the origtext directly from the expression
expr = s_expr.Expression.from_ast(
astnode.expr, schema, context.modaliases)
expr_text = expr.origtext
name = (cls._name_qual_from_exprs(schema, (expr_text,)),)
return name
def get_object(self, schema, context, *, name=None):
try:
return super().get_object(schema, context, name=name)
except errors.InvalidReferenceError:
referrer_ctx = self.get_referrer_context(context)
referrer = referrer_ctx.scls
expr = self.get_attribute_value('expr')
raise errors.InvalidReferenceError(
f"index {expr.origtext!r} does not exist on "
f"{referrer.get_verbosename(schema)}"
) from None
class CreateIndex(IndexCommand, referencing.CreateReferencedInheritingObject):
astnode = qlast.CreateIndex
referenced_astnode = qlast.CreateIndex
@classmethod
def _cmd_tree_from_ast(cls, schema, astnode, context):
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
orig_text = cls.get_orig_expr_text(schema, astnode, 'expr')
cmd.set_attribute_value(
'expr',
s_expr.Expression.from_ast(
astnode.expr,
schema,
context.modaliases,
orig_text=orig_text,
),
)
return cmd
@classmethod
def as_inherited_ref_ast(cls, schema, context, name, parent):
nref = cls.get_inherited_ref_name(schema, context, parent, name)
astnode_cls = cls.referenced_astnode
expr = parent.get_expr(schema)
if expr is not None:
expr_ql = edgeql.parse_fragment(expr.origtext)
else:
expr_ql = None
return astnode_cls(
name=nref,
expr=expr_ql,
)
def get_ast_attr_for_field(self, field: str) -> Optional[str]:
if field == 'expr':
return 'expr'
else:
return None
def compile_expr_field(self, schema, context, field, value):
if field.name == 'expr':
parent_ctx = context.get_ancestor(IndexSourceCommandContext, self)
subject_name = parent_ctx.op.classname
subject = schema.get(subject_name, default=None)
if not isinstance(subject, s_abc.Pointer):
singletons = [subject]
path_prefix_anchor = qlast.Subject
else:
singletons = []
path_prefix_anchor = None
return type(value).compiled(
value,
schema=schema,
modaliases=context.modaliases,
parent_object_type=self.get_schema_metaclass(),
anchors={qlast.Subject: subject},
path_prefix_anchor=path_prefix_anchor,
singletons=singletons,
)
else:
return super().compile_expr_field(schema, context, field, value)
class RenameIndex(IndexCommand, referencing.RenameReferencedInheritingObject):
pass
class AlterIndex(IndexCommand, referencing.AlterReferencedInheritingObject):
astnode = qlast.AlterIndex
class DeleteIndex(IndexCommand, inheriting.DeleteInheritingObject):
astnode = qlast.DropIndex
@classmethod
def _cmd_tree_from_ast(cls, schema, astnode, context):
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
cmd.set_attribute_value(
'expr',
s_expr.Expression.from_ast(
astnode.expr, schema, context.modaliases),
)
return cmd
class RebaseIndex(IndexCommand,
inheriting.RebaseInheritingObject):
pass
| 30.995885 | 78 | 0.63675 |
9cdfa8709fa9f37b1552aeb9bb06f33e71a7cc4c | 761 | py | Python | ext/datawald_sqsagency/setup.py | ideabosque/DataWald-AWS | 3ea905a40526dad3cb0eff92167c1e4230aa4aa9 | [
"MIT"
] | null | null | null | ext/datawald_sqsagency/setup.py | ideabosque/DataWald-AWS | 3ea905a40526dad3cb0eff92167c1e4230aa4aa9 | [
"MIT"
] | null | null | null | ext/datawald_sqsagency/setup.py | ideabosque/DataWald-AWS | 3ea905a40526dad3cb0eff92167c1e4230aa4aa9 | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='DataWald-SQSAgency',
version='0.0.2',
author='Idea Bosque',
author_email='ideabosque@gmail.com',
description='DataWald SQSAgency.',
long_description=__doc__,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
platforms='Linux',
install_requires=['DataWald-Frontend', 'DataWald-BackOffice', 'AWS-SQSConnector'],
classifiers=[
'Programming Language :: Python',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 31.708333 | 86 | 0.660972 |
9be7bca6debb454ec007c51984636a8312491b93 | 11,161 | py | Python | pysdn/controller/topology.py | gaberger/pysdn | 67442e1c259d8ca8620ada95b95977e3852463c5 | [
"BSD-3-Clause"
] | 1 | 2017-08-22T14:17:10.000Z | 2017-08-22T14:17:10.000Z | pysdn/controller/topology.py | gaberger/pysdn | 67442e1c259d8ca8620ada95b95977e3852463c5 | [
"BSD-3-Clause"
] | 1 | 2021-03-26T00:47:22.000Z | 2021-03-26T00:47:22.000Z | pysdn/controller/topology.py | gaberger/pysdn | 67442e1c259d8ca8620ada95b95977e3852463c5 | [
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
topology.py: Controller's topology parser
"""
import json
from pysdn.common.utils import dict_keys_dashed_to_underscored
class Topology():
""" Class that represents Controller's view on a Network Topology instance.
"""
def __init__(self, topo_json=None, topo_dict=None):
self.topology_id = None
self.nodes = []
self.links = []
self.switches = []
self.hosts = []
assert_msg = "[Topology] either '%s' or '%s' should be used, " \
"not both" % ('topo_json', 'topo_dict')
assert(not ((topo_json is not None) and
(topo_dict is not None))), assert_msg
if (topo_dict is not None):
self.__init_from_dict__(topo_dict)
return
if (topo_json is not None):
self.__init_from_json__(topo_json)
return
def __init_from_json__(self, s):
if (isinstance(s, basestring)):
obj = json.loads(s)
d = dict_keys_dashed_to_underscored(obj)
for k, v in d.items():
if ('topology_id' == k):
self.topology_id = v
elif ('node' == k):
if (isinstance(v, list)):
for i in v:
node = Node(i)
self.add_node(node)
elif ('link' == k):
if (isinstance(v, list)):
for i in v:
link = Link(i)
self.add_link(link)
else:
assert(False)
else:
raise TypeError("[Topology] wrong argument type '%s'"
" (JSON 'string' is expected)" % type(s))
def __init_from_dict__(self, d):
if (isinstance(d, dict)):
js = json.dumps(d)
self.__init_from_json__(js)
else:
raise TypeError("[Topology] wrong argument type '%s'"
" ('dict' is expected)" % type(d))
def to_string(self):
""" Returns string representation of this object. """
return str(vars(self))
def add_node(self, node):
assert(isinstance(node, Node))
self.nodes.append(node)
if (node.is_switch()):
self.switches.append(node)
elif (node.is_host()):
self.hosts.append(node)
def add_link(self, link):
assert(isinstance(link, Link))
self.links.append(link)
def get_id(self):
return self.topology_id
def get_switch_ids(self):
snames = []
for n in self.nodes:
if (n.is_switch()):
snames.append(n.node_id)
return sorted(snames)
def get_host_ids(self):
snames = []
for n in self.nodes:
if (n.is_host()):
snames.append(n.node_id)
return sorted(snames)
def get_switches_cnt(self):
return len(self.switches)
def get_hosts_cnt(self):
return len(self.hosts)
def get_inter_switch_links_cnt(self):
cnt = 0
for l in self.links:
if(l.is_switch_to_switch()):
cnt += 1
assert(cnt % 2 == 0)
return cnt / 2
def get_nodes(self):
return self.nodes
def get_switches(self):
return sorted(self.switches, key=lambda n: n.get_id())
def get_switch(self, switch_id):
for item in self.switches:
if(item.get_id() == switch_id):
return item
def get_hosts(self):
return self.hosts
def get_peer_list_for_node(self, node):
plist = []
print node.get_id()
for link in self.links:
if(link.is_dst_node(node)):
plist.append(link)
return plist
def get_peer_list_for_node_port_(self, node, pnum):
plist = []
for link in self.links:
if(link.is_dst_node_port(node, pnum)):
src_node_id = link.get_src_node_id()
if(src_node_id):
src_node = self.get_node_by_id(src_node_id)
if(src_node):
plist.append(src_node)
return plist
def get_node_by_id(self, node_id):
node = None
for item in self.nodes:
if item.get_id() == node_id:
node = item
break
return node
class Node():
""" A node in the topology instance.
Helper class of the 'Topology' class """
def __init__(self, d):
assert(isinstance(d, dict))
for k, v in d.items():
setattr(self, k, v)
def to_string(self):
""" Returns string representation of this object. """
return str(vars(self))
def to_json(self):
""" Returns JSON representation of this object. """
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def is_switch(self):
p1 = 'openflow'
return self.node_id.startswith(p1)
def is_host(self):
p1 = 'host'
return self.node_id.startswith(p1)
def get_type_str(self):
type_str = ""
if(self.is_host()):
type_str = "host"
elif (self.is_switch()):
type_str = "switch"
else:
assert(False)
return type_str
def get_id(self):
return self.node_id
def get_port_numbers(self):
pnums = []
if self.is_switch():
p1 = 'termination_point'
p2 = 'tp_id'
if hasattr(self, p1):
tplist = getattr(self, p1)
assert(isinstance(tplist, list))
for item in tplist:
if isinstance(item, dict) and p2 in item:
s = self.get_id() + ":"
pnum = item[p2].replace(s, '')
pnums.append(pnum)
return sorted(pnums)
def get_mac_address(self):
mac_addr = None
p = 'host_tracker_service:id'
if(hasattr(self, p)):
mac_addr = getattr(self, p)
return mac_addr
def get_ip_address_for_mac(self, mac_addr):
ip_addr = None
p1 = 'host_tracker_service:addresses'
p2 = 'mac'
p3 = 'ip'
if(hasattr(self, p1)):
attr = getattr(self, p1)
if(isinstance(attr, list)):
for item in attr:
if isinstance(item, dict) and p2 in item and p3 in item:
if (item[p2] == mac_addr):
ip_addr = item[p3]
break
return ip_addr
def get_openflow_id(self):
if self.is_switch():
return self.get_id()
else:
return None
class Link():
""" A link in the topology instance.
Helper class of the 'Topology' class """
def __init__(self, d):
assert(isinstance(d, dict))
for k, v in d.items():
setattr(self, k, v)
def to_string(self):
""" Returns string representation of this object. """
return str(vars(self))
def is_switch_to_switch(self):
res = False
src_node = self.source['source_node']
dst_node = self.destination['dest_node']
if(src_node.startswith('openflow') and
dst_node.startswith('openflow') and
src_node != dst_node):
res = True
return res
def is_loopback(self):
res = False
src_node = self.source['source_node']
dst_node = self.destination['dest_node']
if(src_node.startswith('openflow') and
dst_node.startswith('openflow') and
src_node == dst_node):
res = True
return res
def is_host_to_switch(self):
res = False
src_node = self.source['source_node']
dst_node = self.destination['dest_node']
if(src_node.startswith('host') and
dst_node.startswith('openflow')):
res = True
return res
def is_switch_to_host(self):
res = False
src_node = self.source['source_node']
dst_node = self.destination['dest_node']
if(src_node.startswith('openflow') and
dst_node.startswith('host')):
res = True
return res
def is_dst_node(self, node):
res = False
p1 = 'destination'
p2 = 'dest_node'
if(hasattr(self, p1)):
dst = getattr(self, p1)
if(p2 in dst and dst[p2] == node.get_id()):
res = True
return res
def is_dst_node_port(self, node, pnum):
res = False
p1 = 'destination'
p2 = 'dest_node'
p3 = 'dest_tp'
if(hasattr(self, p1)):
attr = getattr(self, p1)
if(p2 in attr and p3 in attr):
node_id = node.get_id()
tp_id = node_id + ":" + pnum
res = (attr[p2] == node_id and attr[p3] == tp_id)
return res
def get_src_node_id(self):
src_node_id = None
p1 = 'source'
p2 = 'source_node'
if(hasattr(self, p1)):
attr = getattr(self, p1)
if(isinstance(attr, dict) and p2 in attr):
src_node_id = attr[p2]
return src_node_id
def get_id(self):
p = 'link_id'
if(hasattr(self, p)):
return getattr(self, p)
else:
assert(0)
| 30.746556 | 79 | 0.556043 |
808f9d046e37b5450dddcdba43872df20f79809e | 1,680 | py | Python | ariadne/point_net_dev/loss.py | t3hseus/ariadne | b4471a37741000e22281c4d6ff647d65ab9e1914 | [
"MIT"
] | 6 | 2020-08-28T22:44:07.000Z | 2022-01-24T20:53:00.000Z | ariadne/point_net_dev/loss.py | t3hseus/ariadne | b4471a37741000e22281c4d6ff647d65ab9e1914 | [
"MIT"
] | 1 | 2021-02-20T09:38:46.000Z | 2021-02-20T09:38:46.000Z | ariadne/point_net_dev/loss.py | t3hseus/ariadne | b4471a37741000e22281c4d6ff647d65ab9e1914 | [
"MIT"
] | 2 | 2021-10-04T09:25:06.000Z | 2022-02-09T09:09:09.000Z | import gin
import torch
import torch.nn as nn
import torch.nn.functional as F
class PointNetClassificationLoss(nn.Module):
def __init__(self,
real_weight=1,
fake_weight=1,
is_softmax=False,
alpha=0.0001):
super().__init__()
self.real_weight = real_weight
self.fake_weight = fake_weight
self.alpha = alpha
if is_softmax:
self.criterion = F.cross_entropy
else:
self.criterion = F.binary_cross_entropy_with_logits
def _regularization_term(self, transform_matrix):
# identity matrix
id_transform = torch.eye(
transform_matrix.size(-1),
requires_grad=True
).repeat(transform_matrix.size(0), 1, 1).to(transform_matrix.device)
return torch.norm(
id_transform - torch.bmm(transform_matrix, transform_matrix.transpose(1, 2))
)
def forward(self, preds, target):
# Compute target weights on-the-fly for loss function
batch_weights_real = target * self.real_weight
batch_weights_fake = (1 - target) * self.fake_weight
batch_weights = batch_weights_real + batch_weights_fake
# unpack predictions
preds, in_mtrx, feat_mtrx = preds
ce_loss_val = self.criterion(preds, target, weight=batch_weights)
reg_loss = 0
if in_mtrx is not None:
reg_loss += self._regularization_term(in_mtrx)
if feat_mtrx is not None:
reg_loss += self._regularization_term(feat_mtrx)
reg_loss = self.alpha * reg_loss / float(preds.size(0))
return ce_loss_val + reg_loss
| 35.744681 | 88 | 0.633929 |
5787fe8f72c935b584a0336f0895d6e1c0d5ee35 | 6,136 | py | Python | s3cache/s3cache.py | bkyryliuk/s3werkzeugcache | 04a99cd4d43de36d936722dfa66d5996b4f6c047 | [
"MIT"
] | 5 | 2017-05-05T00:57:31.000Z | 2021-05-14T13:44:28.000Z | s3cache/s3cache.py | bkyryliuk/s3werkzeugcache | 04a99cd4d43de36d936722dfa66d5996b4f6c047 | [
"MIT"
] | 3 | 2017-11-17T16:35:11.000Z | 2022-01-05T17:23:50.000Z | s3cache/s3cache.py | bkyryliuk/s3werkzeugcache | 04a99cd4d43de36d936722dfa66d5996b4f6c047 | [
"MIT"
] | 7 | 2017-09-21T22:48:23.000Z | 2021-05-14T13:10:50.000Z | """Results backends are used to store long-running query results
The Abstraction is flask-caching, which uses the BaseCache class from cachelib
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
try:
import cPickle as pickle
except ImportError:
import pickle
import io
import logging
import boto3
from cachelib import BaseCache
class S3Cache(BaseCache):
"""S3 cache implementation.
Adapted from examples in
https://github.com/pallets/werkzeug/blob/master/werkzeug/contrib/cache.py.
Timeout parameters are ignored as S3 doesn't support key-level expiration.
To expire keys, set up an expiration policy as described in
https://aws.amazon.com/blogs/aws/amazon-s3-object-expiration/.
get_extra_args, put_extra_args, and head_extra_args can be used to provide additional arguments to
underlying boto3 s3 client.
See: http://boto3.readthedocs.io/en/latest/reference/services/s3.html for more details
"""
def __init__(
self, s3_bucket, key_prefix, default_timeout=300, get_extra_args={}, put_extra_args={}, head_extra_args={}):
self.default_timeout = default_timeout
self.s3_client = boto3.client('s3')
self.bucket = s3_bucket
self._key_prefix = key_prefix
self.get_extra_args = get_extra_args
self.put_extra_args = put_extra_args
self.head_extra_args = head_extra_args
@property
def key_prefix(self):
return (
self._key_prefix
if not hasattr(self._key_prefix, "__call__")
else self._key_prefix()
)
def get(self, key):
"""Look up key in the cache and return the value for it.
:param key: the key to be looked up.
:returns: The value if it exists and is readable, else ``None``.
"""
if not self._key_exists(key):
return None
else:
value_file = io.BytesIO()
try:
self.s3_client.download_fileobj(
self.bucket,
self._full_s3_key(key),
value_file,
ExtraArgs=self.get_extra_args
)
except Exception as e:
logging.warn('Error while trying to get key %s', key)
logging.exception(e)
return None
else:
value_file.seek(0)
return pickle.load(value_file)
def delete(self, key):
"""Delete `key` from the cache.
:param key: the key to delete.
:returns: Whether the key existed and has been deleted.
:rtype: boolean
"""
if not self._key_exists(key):
return False
else:
try:
self.s3_client.delete_objects(
Bucket=self.bucket,
Delete={
'Objects': [
{
'Key': self._full_s3_key(key)
}
]
}
)
except Exception as e:
logging.warn('Error while trying to delete key %s', key)
logging.exception(e)
return False
else:
return True
def set(self, key, value, timeout=None):
"""Add a new key/value to the cache.
If the key already exists, the existing value is overwritten.
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key in seconds (if not
specified, it uses the default timeout). A timeout of
0 idicates that the cache never expires.
:returns: ``True`` if key has been updated, ``False`` for backend
errors. Pickling errors, however, will raise a subclass of
``pickle.PickleError``.
:rtype: boolean
"""
value_file = io.BytesIO()
pickle.dump(value, value_file)
try:
value_file.seek(0)
self.s3_client.upload_fileobj(
value_file,
self.bucket,
self._full_s3_key(key),
ExtraArgs=self.put_extra_args
)
except Exception as e:
logging.warn('Error while trying to set key %s', key)
logging.exception(e)
return False
else:
return True
def add(self, key, value, timeout=None):
"""Works like :meth:`set` but does not overwrite existing values.
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key in seconds (if not
specified, it uses the default timeout). A timeout of
0 idicates that the cache never expires.
:returns: Same as :meth:`set`, but also ``False`` for already
existing keys.
:rtype: boolean
"""
if self._key_exists(key):
return False
else:
return self.set(key, value, timeout=timeout)
def clear(self):
"""Clears the cache.
Keep in mind that not all caches support completely clearing the cache.
:returns: Whether the cache has been cleared.
:rtype: boolean
"""
return False
def _full_s3_key(self, key):
"""Convert a cache key to a full S3 key, including the key prefix."""
return '%s%s' % (self.key_prefix, key)
def _key_exists(self, key):
"""Determine whether the given key exists in the bucket."""
try:
self.s3_client.head_object(
Bucket=self.bucket,
Key=self._full_s3_key(key),
**self.head_extra_args
)
except Exception:
# head_object throws an exception when object doesn't exist
return False
else:
return True
| 32.125654 | 120 | 0.566982 |
afbb6fd4d926b871f60e5eca41a0d6ca1e86b933 | 637 | py | Python | configs/xh.deeplab.mobilenet.paris.gt_discriminator/utils/argmax.py | Oliver-ss/Domain_Adaptation | 13e8edc89628f681c383bb2f7297527bbd510f09 | [
"MIT"
] | 6 | 2019-10-24T18:20:33.000Z | 2020-11-11T07:38:44.000Z | configs/xh.deeplab.mobilenet.paris.gt_discriminator/utils/argmax.py | Oliver-ss/Domain_Adaptation | 13e8edc89628f681c383bb2f7297527bbd510f09 | [
"MIT"
] | null | null | null | configs/xh.deeplab.mobilenet.paris.gt_discriminator/utils/argmax.py | Oliver-ss/Domain_Adaptation | 13e8edc89628f681c383bb2f7297527bbd510f09 | [
"MIT"
] | null | null | null | import torch
from torch.autograd import Variable
class ArgMax(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
idx = torch.argmax(input, 1, keepdim=True)
output = torch.zeros_like(input, requires_grad=True)
output.scatter_(1, idx, 1)
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output
if __name__ == '__main__':
a = torch.rand((1, 2, 2, 2), requires_grad=True)
argmax = ArgMax()
b = argmax.apply(a)
c= b.sum()*3
c.backward()
print('b: ', b)
print('c: ',c )
print('b: ',b.grad)
print('a: ' ,a.grad) | 24.5 | 60 | 0.609105 |
a5dfdb0cd8e9dc21f3ad6581718608a7ec50ce69 | 8,468 | py | Python | test/test_getpy.py | ksob/getpy | 8baa4e1062b94e5ea5a8f6fd07ff3fed98887550 | [
"MIT"
] | 1 | 2019-11-14T13:10:33.000Z | 2019-11-14T13:10:33.000Z | test/test_getpy.py | ksob/getpy | 8baa4e1062b94e5ea5a8f6fd07ff3fed98887550 | [
"MIT"
] | null | null | null | test/test_getpy.py | ksob/getpy | 8baa4e1062b94e5ea5a8f6fd07ff3fed98887550 | [
"MIT"
] | 2 | 2020-06-24T05:58:04.000Z | 2020-08-03T21:22:53.000Z | import pytest
import numpy as np
import getpy as gp
standard = pytest.mark.standard
slow = pytest.mark.slow
@standard
def test_getpy_vectorized_methods():
key_type = np.dtype('u8')
value_type = np.dtype('u8')
gp_dict = gp.Dict(key_type, value_type)
keys = np.random.randint(1, 1000, size=200, dtype=key_type)
values = np.random.randint(1, 1000, size=200, dtype=value_type)
gp_dict[keys] = values
iterated_keys = [key for key in gp_dict]
iterated_keys_and_values = [(key, value) for key, value in gp_dict.items()]
assert len(gp_dict) == len(np.unique(keys))
p_dict = dict()
for key, value in zip(keys, values):
p_dict[key] = value
assert len(gp_dict) == len(p_dict)
assert sorted([(key, value) for key, value in gp_dict.items()]) == sorted(p_dict.items())
select_keys = np.random.choice(keys, size=100).astype(key_type)
select_values = gp_dict[select_keys]
random_keys = np.random.randint(1, 1000, size=500).astype(key_type)
random_keys_mask = gp_dict.contains(random_keys)
mask_keys = random_keys[random_keys_mask]
mask_values = gp_dict[mask_keys]
gp_dict.iadd(keys, values)
gp_dict.isub(keys, values)
gp_dict.ior(keys, values)
gp_dict.iand(keys, values)
@standard
def test_getpy_vectorized_methods_with_default():
key_type = np.dtype('u8')
value_type = np.dtype('u8')
gp_dict = gp.Dict(key_type, value_type, default_value=0)
keys = np.random.randint(1, 1000, size=200, dtype=key_type)
values = np.random.randint(1, 1000, size=200, dtype=value_type)
gp_dict[keys] = values
iterated_keys = [key for key in gp_dict]
iterated_keys_and_values = [(key, value) for key, value in gp_dict.items()]
select_keys = np.random.choice(keys, size=100)
select_values = gp_dict[select_keys]
random_keys = np.random.randint(1, 1000, size=500, dtype=key_type)
random_keys_mask = gp_dict.contains(random_keys)
random_values_with_defaults = gp_dict[random_keys]
for random_key_mask, random_value in zip(random_keys_mask, random_values_with_defaults):
if not random_key_mask:
assert random_value == 0
else:
assert random_value != 0
one_values = np.ones(500, dtype=value_type)
gp_dict.iadd(random_keys, one_values)
gp_dict.isub(random_keys, one_values)
gp_dict.ior(random_keys, one_values)
gp_dict.iand(random_keys, one_values)
@standard
def test_getpy_vectorized_methods_with_bytearray_dtype():
key_type = np.dtype('u8')
value_type = gp.types['bytearray50']
gp_dict = gp.Dict(key_type, value_type)
keys = np.random.randint(1, 1000, size=200, dtype=key_type)
values = np.packbits([np.array([1, 0, 1, 0, 1, 0, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1]*25, dtype=np.bool)]*200, axis=1).view(value_type)
gp_dict[keys] = values
iterated_keys = [key for key in gp_dict]
iterated_keys_and_values = [(key, value) for key, value in gp_dict.items()]
select_keys = np.random.choice(keys, size=100)
select_values = gp_dict[select_keys]
random_keys = np.random.randint(1, 1000, size=500, dtype=key_type)
random_keys_mask = gp_dict.contains(random_keys)
mask_keys = random_keys[random_keys_mask]
mask_values = gp_dict[mask_keys]
gp_dict.iadd(keys, values)
gp_dict.isub(keys, values)
gp_dict.ior(keys, values)
gp_dict.iand(keys, values)
@standard
def test_getpy_types():
for key_type, value_type in gp.dict_types:
gp_dict = gp.Dict(key_type, value_type)
if key_type.kind == 'U':
keys = np.array(['0123456789'*10 for i in range(10)], dtype=key_type)
else:
keys = np.array(range(10), dtype=key_type)
if value_type.kind == 'U':
values = np.array(['0123456789'*10 for i in range(10)], dtype=value_type)
else:
values = np.array(range(10), dtype=value_type)
gp_dict[keys] = values
values = gp_dict[keys]
@standard
@pytest.mark.timeout(1)
def test_getpy_dump_load():
key_type = np.dtype('u8')
value_type = np.dtype('u8')
gp_dict_1 = gp.Dict(key_type, value_type)
keys = np.random.randint(1, 1000, size=10**1, dtype=key_type)
values = np.random.randint(1, 1000, size=10**1, dtype=value_type)
gp_dict_1[keys] = values
gp_dict_1.dump('test/test.hashtable.bin')
gp_dict_2 = gp.Dict(key_type, value_type)
gp_dict_2.load('test/test.hashtable.bin')
assert gp_dict_1 == gp_dict_2
@standard
@pytest.mark.timeout(1)
def test_getpy_big_dict_uint32_uint32():
key_type = np.dtype('u4')
value_type = np.dtype('u4')
gp_dict = gp.Dict(key_type, value_type)
values = np.random.randint(10**9, size=10**4, dtype=value_type)
for i in range(10**2):
keys = np.random.randint(10**9, size=10**4, dtype=key_type)
gp_dict[keys] = values
@standard
@pytest.mark.timeout(1)
def test_getpy_big_dict_uint64_uint64():
key_type = np.dtype('u8')
value_type = np.dtype('u8')
gp_dict = gp.Dict(key_type, value_type)
values = np.random.randint(10**15, size=10**4, dtype=value_type)
for i in range(10**2):
keys = np.random.randint(10**15, size=10**4, dtype=key_type)
gp_dict[keys] = values
@pytest.mark.timeout(1)
def test_getpy_big_dict_uint64_bytearray8():
key_type = np.dtype('u8')
value_type = gp.types['bytearray8']
gp_dict = gp.Dict(key_type, value_type)
values = np.packbits([np.array([1, 0, 1, 0, 1, 0, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1]*4, dtype=np.bool)]*10**4, axis=1).view(value_type)
for i in range(10**2):
keys = np.random.randint(10**15, size=10**4, dtype=key_type)
gp_dict[keys] = values
@standard
@pytest.mark.timeout(1)
def test_getpy_big_dict_uint64_lookup():
key_type = np.dtype('u8')
value_type = np.dtype('u8')
gp_dict = gp.Dict(key_type, value_type)
keys = np.random.randint(10**15, size=10**5, dtype=key_type)
values = np.random.randint(10**15, size=10**5, dtype=value_type)
gp_dict[keys] = values
for i in range(10**2):
values = gp_dict[keys]
@standard
@pytest.mark.timeout(5)
def test_getpy_very_big_dict_uint32_uint32():
key_type = np.dtype('u4')
value_type = np.dtype('u4')
gp_dict = gp.Dict(key_type, value_type)
values = np.random.randint(10**9, size=10**5, dtype=value_type)
for i in range(10**2):
keys = np.random.randint(10**9, size=10**5, dtype=key_type)
gp_dict[keys] = values
@standard
@pytest.mark.timeout(5)
def test_getpy_very_big_dict_uint64_uint64():
key_type = np.dtype('u8')
value_type = np.dtype('u8')
gp_dict = gp.Dict(key_type, value_type)
values = np.random.randint(10**15, size=10**5, dtype=value_type)
for i in range(10**2):
keys = np.random.randint(10**15, size=10**5, dtype=key_type)
gp_dict[keys] = values
@standard
@pytest.mark.timeout(5)
def test_getpy_very_big_dict_uint64_bytearray8():
key_type = np.dtype('u8')
value_type = gp.types['bytearray8']
gp_dict = gp.Dict(key_type, value_type)
values = np.packbits([np.array([1, 0, 1, 0, 1, 0, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1]*4, dtype=np.bool)]*10**5, axis=1).view(value_type)
for i in range(10**2):
keys = np.random.randint(10**15, size=10**5, dtype=key_type)
gp_dict[keys] = values
@standard
@pytest.mark.timeout(5)
def test_getpy_very_big_dict_uint64_bytearray16():
key_type = np.dtype('u8')
value_type = gp.types['bytearray16']
gp_dict = gp.Dict(key_type, value_type)
values = np.packbits([np.array([1, 0, 1, 0, 1, 0, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1]*8, dtype=np.bool)]*10**5, axis=1).view(value_type)
for i in range(10**2):
keys = np.random.randint(10**15, size=10**5, dtype=key_type)
gp_dict[keys] = values
@standard
@pytest.mark.timeout(5)
def test_getpy_very_big_dict_uint64_bytearray32():
key_type = np.dtype('u8')
value_type = gp.types['bytearray32']
gp_dict = gp.Dict(key_type, value_type)
values = np.packbits([np.array([1, 0, 1, 0, 1, 0, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1]*16, dtype=np.bool)]*10**5, axis=1).view(value_type)
for i in range(10**2):
keys = np.random.randint(10**15, size=10**5, dtype=key_type)
gp_dict[keys] = values
| 28.705085 | 111 | 0.650213 |
e98d584594d2bf7d6be3c678356f748e437fa5de | 145 | py | Python | order/admin.py | musojonpython/my_taxi | 3ded68bd4aaba6ee4e91910fc4a0884d3ab56825 | [
"MIT"
] | 2 | 2021-01-25T09:31:43.000Z | 2021-01-25T17:33:19.000Z | order/admin.py | musojonpython/my_taxi | 3ded68bd4aaba6ee4e91910fc4a0884d3ab56825 | [
"MIT"
] | null | null | null | order/admin.py | musojonpython/my_taxi | 3ded68bd4aaba6ee4e91910fc4a0884d3ab56825 | [
"MIT"
] | 1 | 2020-10-06T14:40:28.000Z | 2020-10-06T14:40:28.000Z | from django.contrib import admin
from .models import OrderStatus, AcceptOrder
admin.site.register(OrderStatus)
admin.site.register(AcceptOrder)
| 24.166667 | 44 | 0.841379 |
67575a1b20af35277e0ee12f6a373168cc439f7b | 24,923 | py | Python | termtosvg/anim.py | xqms/termtosvg | 7d96ad4f0ede0ed91fdc3cf3263cb7f16d347959 | [
"BSD-3-Clause"
] | null | null | null | termtosvg/anim.py | xqms/termtosvg | 7d96ad4f0ede0ed91fdc3cf3263cb7f16d347959 | [
"BSD-3-Clause"
] | null | null | null | termtosvg/anim.py | xqms/termtosvg | 7d96ad4f0ede0ed91fdc3cf3263cb7f16d347959 | [
"BSD-3-Clause"
] | null | null | null | import copy
import io
import os.path
import pkgutil
from collections import namedtuple
from itertools import groupby
import pyte.graphics
import pyte.screens
from lxml import etree
from wcwidth import wcswidth
# Ugliest hack: Replace the first 16 colors rgb values by their names so that
# termtosvg can distinguish FG_BG_256[0] (which defaults to black #000000 but
# can be styled with themes) from FG_BG_256[16] (which is also black #000000
# but should be displayed as is).
_COLORS = ['black', 'red', 'green', 'brown', 'blue', 'magenta', 'cyan', 'white']
_BRIGHTCOLORS = ['bright{}'.format(color) for color in _COLORS]
NAMED_COLORS = _COLORS + _BRIGHTCOLORS
pyte.graphics.FG_BG_256 = NAMED_COLORS + pyte.graphics.FG_BG_256[16:]
# Id for the very last SVG animation. This is used to make the first animations
# start when the last one ends (animation looping)
LAST_ANIMATION_ID = 'anim_last'
# Background rectangle SVG element
_BG_RECT_TAG_ATTRIBUTES = {
'class': 'background',
'height': '100%',
'width': '100%',
'x': '0',
'y': '0'
}
BG_RECT_TAG = etree.Element('rect', _BG_RECT_TAG_ATTRIBUTES)
# Default size for a character cell rendered as SVG.
CELL_WIDTH = 8
CELL_HEIGHT = 15
# The number of character cells to leave when placing successive frames
# so content does not bleed into adjacent frames
FRAME_CELL_SPACING = 1
# XML namespaces
SVG_NS = 'http://www.w3.org/2000/svg'
TERMTOSVG_NS = 'https://github.com/nbedos/termtosvg'
XLINK_NS = 'http://www.w3.org/1999/xlink'
NAMESPACES = {
'svg': SVG_NS,
'termtosvg': TERMTOSVG_NS,
'xlink': XLINK_NS,
}
class TemplateError(Exception):
pass
_CELL_ATTRIBUTES = ['text', 'color', 'background_color', 'bold', 'italics',
'underscore', 'strikethrough']
_CharacterCell = namedtuple('_CharacterCell', _CELL_ATTRIBUTES)
# Set default values for last 6 arguments
_CharacterCell.__new__.__defaults__ = ('foreground', 'background', False, False,
False, False)
_CharacterCell.__doc__ = 'Representation of a character cell'
_CharacterCell.text.__doc__ = 'Text content of the cell'
_CharacterCell.bold.__doc__ = 'Bold modificator flag'
_CharacterCell.italics.__doc__ = 'Italics modificator flag'
_CharacterCell.underscore.__doc__ = 'Underscore modificator flag'
_CharacterCell.strikethrough.__doc__ = 'Strikethrough modificator flag'
_CharacterCell.color.__doc__ = 'Color of the text'
_CharacterCell.background_color.__doc__ = 'Background color of the cell'
class CharacterCell(_CharacterCell):
@classmethod
def from_pyte(cls, char):
"""Create a CharacterCell from a pyte character"""
if char.fg == 'default':
text_color = 'foreground'
else:
if char.bold and not str(char.fg).startswith('bright'):
named_color = 'bright{}'.format(char.fg)
else:
named_color = char.fg
if named_color in NAMED_COLORS:
text_color = 'color{}'.format(NAMED_COLORS.index(named_color))
elif len(char.fg) == 6:
# HEXADECIMAL COLORS
# raise ValueError if char.fg is not an hexadecimal number
int(char.fg, 16)
text_color = '#{}'.format(char.fg)
else:
raise ValueError('Invalid foreground color: {}'.format(char.fg))
if char.bg == 'default':
background_color = 'background'
elif char.bg in NAMED_COLORS:
background_color = 'color{}'.format(NAMED_COLORS.index(char.bg))
elif len(char.bg) == 6:
# Hexadecimal colors
# raise ValueError if char.bg is not an hexadecimal number
int(char.bg, 16)
background_color = '#{}'.format(char.bg)
else:
raise ValueError('Invalid background color')
if char.reverse:
text_color, background_color = background_color, text_color
return CharacterCell(char.data, text_color, background_color,
char.bold, char.italics, char.underscore,
char.strikethrough)
class ConsecutiveWithSameAttributes:
"""Callable to be used as a key for itertools.groupby to group together
consecutive elements of a list with the same attributes"""
def __init__(self, attributes):
self.group_index = None
self.last_index = None
self.attributes = attributes
self.last_key_attributes = None
def __call__(self, arg):
index, obj = arg
key_attributes = {name: getattr(obj, name) for name in self.attributes}
if self.last_index != index - 1 or self.last_key_attributes != key_attributes:
self.group_index = index
self.last_index = index
self.last_key_attributes = key_attributes
return self.group_index, key_attributes
def render_animation(frames, geometry, filename, template,
cell_width=CELL_WIDTH, cell_height=CELL_HEIGHT):
root = _render_preparation(geometry, template, cell_width, cell_height)
_, screen_height = geometry
root = _render_animation(screen_height, frames, root, cell_width, cell_height)
with open(filename, 'wb') as output_file:
output_file.write(etree.tostring(root))
def render_still_frames(frames, geometry, directory, template,
cell_width=CELL_WIDTH, cell_height=CELL_HEIGHT):
root = _render_preparation(geometry, template, cell_width, cell_height)
frame_generator = _render_still_frames(frames, root, cell_width, cell_height)
for frame_count, frame_root in enumerate(frame_generator):
filename = os.path.join(directory, 'termtosvg_{:05}.svg'.format(frame_count))
with open(filename, 'wb') as output_file:
output_file.write(etree.tostring(frame_root))
def _render_preparation(geometry, template, cell_width, cell_height):
# Read header record and add the corresponding information to the SVG
root = resize_template(template, geometry, cell_width, cell_height)
svg_screen_tag = root.find('.//{{{namespace}}}svg[@id="screen"]'
.format(namespace=SVG_NS))
if svg_screen_tag is None:
raise ValueError('Missing tag: <svg id="screen" ...>...</svg>')
for child in svg_screen_tag.getchildren():
svg_screen_tag.remove(child)
svg_screen_tag.append(BG_RECT_TAG)
return root
def _render_still_frames(frames, root, cell_width, cell_height):
for frame in frames:
frame_group, frame_definitions = _render_timed_frame(
offset=0,
buffer=frame.buffer,
cell_height=cell_height,
cell_width=cell_width,
definitions={}
)
frame_root = copy.deepcopy(root)
svg_screen_tag = frame_root.find('.//{{{namespace}}}svg[@id="screen"]'
.format(namespace=SVG_NS))
if svg_screen_tag is None:
raise ValueError('Missing tag: <svg id="screen" ...>...</svg>')
tree_defs = etree.SubElement(svg_screen_tag, 'defs')
for definition in frame_definitions.values():
tree_defs.append(definition)
svg_screen_tag.append(frame_group)
_embed_css(frame_root)
yield frame_root
def _render_animation(screen_height, frames, root, cell_width, cell_height):
svg_screen_tag = root.find('.//{{{namespace}}}svg[@id="screen"]'
.format(namespace=SVG_NS))
if svg_screen_tag is None:
raise ValueError('Missing tag: <svg id="screen" ...>...</svg>')
screen_view = etree.Element('g', attrib={'id': 'screen_view'})
definitions = {}
timings = {}
animation_duration = None
for frame_count, frame in enumerate(frames):
# To prevent line jumping up and down by one pixel between two frames,
# add h % 2 so that offset is an even number. (issue noticed in
# Firefox only)
h = screen_height + FRAME_CELL_SPACING
offset = frame_count * (h + h % 2) * cell_height
frame_group, frame_definitions = _render_timed_frame(
offset=offset,
buffer=frame.buffer,
cell_height=cell_height,
cell_width=cell_width,
definitions=definitions
)
screen_view.append(frame_group)
animation_duration = frame.time + frame.duration
timings[frame.time] = -offset
definitions.update(frame_definitions)
tree_defs = etree.SubElement(svg_screen_tag, 'defs')
for definition in definitions.values():
tree_defs.append(definition)
svg_screen_tag.append(screen_view)
_add_animation(root, timings, animation_duration)
return root
def _add_animation(root, timings, animation_duration):
animators = {
'css': _embed_css,
'waapi': _embed_waapi,
}
settings = root.find('.//{{{}}}defs/{{{}}}template_settings'
.format(SVG_NS, TERMTOSVG_NS))
if settings is None:
raise TemplateError('Missing "template_settings" element in definitions')
animation = settings.find('{{{}}}animation[@type]'.format(TERMTOSVG_NS))
if animation is None:
raise TemplateError('Missing or invalid "animation" element in "template_settings"')
f = animators.get(animation.attrib['type'].lower())
if f is None:
raise TemplateError("Attribute 'type' of element 'animation' must be one of {}"
.format(', '.join(animators.keys())))
f(root, timings, animation_duration)
def _render_timed_frame(offset, buffer, cell_height, cell_width, definitions):
"""Return a group element containing an SVG version of the provided frame.
:param buffer: 2D array of CharacterCells
:param cell_height: Height of a character cell in pixels
:param cell_width: Width of a character cell in pixels
:param definitions: Existing definitions (updated in place)
:return: A tuple consisting of a group element and new definitions
"""
frame_group_tag = etree.Element('g')
group_definitions = {}
for row_number in buffer:
if buffer[row_number]:
current_definitions = {**definitions, **group_definitions}
tags, new_definitions = _render_line(offset,
row_number,
buffer[row_number],
cell_height,
cell_width,
current_definitions)
for tag in tags:
frame_group_tag.append(tag)
group_definitions.update(new_definitions)
return frame_group_tag, group_definitions
def _render_line(offset, row_number, row, cell_height, cell_width, definitions):
tags = _render_line_bg_colors(screen_line=row,
height=offset + row_number * cell_height,
cell_height=cell_height,
cell_width=cell_width)
# Group text elements for the current line into text_group_tag
text_group_tag = etree.Element('g')
text_tags = _render_characters(row, cell_width)
for tag in text_tags:
text_group_tag.append(tag)
# Find or create a definition for text_group_tag
text_group_tag_str = etree.tostring(text_group_tag)
if text_group_tag_str in definitions:
group_id = definitions[text_group_tag_str].attrib['id']
new_definitions = {}
else:
group_id = 'g{}'.format(len(definitions) + 1)
assert group_id not in definitions.values()
text_group_tag.attrib['id'] = group_id
new_definitions = {text_group_tag_str: text_group_tag}
# Add a reference to the definition of text_group_tag with a 'use' tag
use_attributes = {
'{{{}}}href'.format(XLINK_NS): '#{}'.format(group_id),
'y': str(offset + row_number * cell_height),
}
tags.append(etree.Element('use', use_attributes))
return tags, new_definitions
def _make_rect_tag(column, length, height, cell_width, cell_height, background_color):
attributes = {
'x': str(column * cell_width),
'y': str(height),
'width': str(length * cell_width),
'height': str(cell_height)
}
if background_color.startswith('#'):
attributes['fill'] = background_color
else:
attributes['class'] = background_color
rect_tag = etree.Element('rect', attributes)
return rect_tag
def _render_line_bg_colors(screen_line, height, cell_height, cell_width):
"""Return a list of 'rect' tags representing the background of 'screen_line'
If consecutive cells have the same background color, a single 'rect' tag is
returned for all these cells.
If a cell background uses default_bg_color, no 'rect' will be generated for
this cell since the default background is always displayed.
:param screen_line: Mapping between column numbers and CharacterCells
:param height: Vertical position of the line on the screen in pixels
:param cell_height: Height of the a character cell in pixels
:param cell_width: Width of a character cell in pixels
"""
non_default_bg_cells = [(column, cell) for (column, cell)
in sorted(screen_line.items())
if cell.background_color != 'background']
key = ConsecutiveWithSameAttributes(['background_color'])
rect_tags = [
_make_rect_tag(
column,
wcswidth(''.join(t[1].text for t in group)),
height,
cell_width,
cell_height,
attributes['background_color']
) for (column, attributes), group in groupby(non_default_bg_cells, key)]
return rect_tags
def _make_text_tag(column, attributes, text, cell_width):
"""Build SVG text element based on content and style attributes"""
text_tag_attributes = {
'x': str(column * cell_width),
'textLength': str(wcswidth(text) * cell_width),
}
if attributes['bold']:
text_tag_attributes['font-weight'] = 'bold'
if attributes['italics']:
text_tag_attributes['font-style'] = 'italic'
decoration = ''
if attributes['underscore']:
decoration = 'underline'
if attributes['strikethrough']:
decoration += ' line-through'
if decoration:
text_tag_attributes['text-decoration'] = decoration
if attributes['color'].startswith('#'):
text_tag_attributes['fill'] = attributes['color']
else:
text_tag_attributes['class'] = attributes['color']
text_tag = etree.Element('text', text_tag_attributes)
text_tag.text = text
return text_tag
def _render_characters(screen_line, cell_width):
"""Return a list of 'text' elements representing the line of the screen
Consecutive characters with the same styling attributes (text color, font
weight...) are grouped together in a single text element.
:param screen_line: Mapping between column numbers and characters
:param cell_width: Width of a character cell in pixels
"""
line = sorted(screen_line.items())
key = ConsecutiveWithSameAttributes(['color', 'bold', 'italics', 'underscore', 'strikethrough'])
text_tags = [_make_text_tag(column, attributes, ''.join(c.text for _, c in group), cell_width)
for (column, attributes), group in groupby(line, key)]
return text_tags
def resize_template(template, geometry, cell_width, cell_height):
"""Resize template based on the number of rows and columns of the terminal"""
def scale(element, template_columns, template_rows, columns, rows):
"""Resize viewbox based on the number of rows and columns of the terminal"""
try:
viewbox = element.attrib['viewBox'].replace(',', ' ').split()
except KeyError:
raise TemplateError('Missing "viewBox" for element "{}"'.format(element))
vb_min_x, vb_min_y, vb_width, vb_height = [int(n) for n in viewbox]
vb_width += cell_width * (columns - template_columns)
vb_height += cell_height * (rows - template_rows)
element.attrib['viewBox'] = ' '.join(map(str, (vb_min_x, vb_min_y, vb_width, vb_height)))
scalable_attributes = {
'width': cell_width * (columns - template_columns),
'height': cell_height * (rows - template_rows)
}
for attribute, delta in scalable_attributes.items():
if attribute in element.attrib:
try:
element.attrib[attribute] = str(int(element.attrib[attribute]) + delta)
except ValueError:
raise TemplateError('"{}" attribute of {} must be in user units'
.format(attribute, element))
return element
try:
tree = etree.parse(io.BytesIO(template))
root = tree.getroot()
except etree.Error as exc:
raise TemplateError('Invalid template') from exc
# Extract the screen geometry which is saved in a private data portion of
# the template
settings = root.find('.//{{{}}}defs/{{{}}}template_settings'
.format(SVG_NS, TERMTOSVG_NS))
if settings is None:
raise TemplateError('Missing "template_settings" element in definitions')
svg_geometry = settings.find('{{{}}}screen_geometry[@columns][@rows]'
.format(TERMTOSVG_NS))
if svg_geometry is None:
raise TemplateError('Missing "screen_geometry" element in "template_settings"')
attributes_err_msg = ('Missing or invalid "columns" or "rows" attribute '
'for element "screen_geometry": expected positive '
'integers')
try:
template_columns = int(svg_geometry.attrib['columns'])
template_rows = int(svg_geometry.attrib['rows'])
except (KeyError, ValueError) as exc:
raise TemplateError(attributes_err_msg) from exc
# Update settings with real columns and rows values to preserve the scale
# in case the animation serves as a template
columns, rows = geometry
svg_geometry.attrib['columns'], svg_geometry.attrib['rows'] = (str(columns),
str(rows))
if template_rows <= 0 or template_columns <= 0:
raise TemplateError(attributes_err_msg)
# Scale the viewBox of the root svg element based on the size of the screen
# and the size registered in the template
scale(root, template_columns, template_rows, columns, rows)
# Also scale the viewBox of the svg element with id 'screen'
screen = root.find('.//{{{namespace}}}svg[@id="screen"]'
.format(namespace=SVG_NS))
if screen is None:
raise TemplateError('svg element with id "screen" not found')
scale(screen, template_columns, template_rows, columns, rows)
return root
def validate_template(name, templates):
if name in templates:
return templates[name]
try:
with open(name, 'rb') as template_file:
return template_file.read()
except FileNotFoundError as exc:
raise TemplateError('Invalid template') from exc
def _embed_css(root, timings=None, animation_duration=None):
try:
style = root.find('.//{{{ns}}}defs/{{{ns}}}style[@id="generated-style"]'
.format(ns=SVG_NS))
except etree.Error as exc:
raise TemplateError('Invalid template') from exc
if style is None:
raise TemplateError('Missing <style id="generated-style" ...> element '
'in "defs"')
css_body = """#screen {
font-family: 'DejaVu Sans Mono', monospace;
font-style: normal;
font-size: 13px;
}
text {
dominant-baseline: text-before-edge;
white-space: pre;
}
"""
if animation_duration is None or timings is None:
style.text = etree.CDATA(css_body)
else:
if animation_duration == 0:
raise ValueError('Animation duration must be greater than 0')
transforms = []
last_offset = None
transform_format = "{time:.3f}%{{transform:translateY({offset}px)}}"
for time, offset in sorted(timings.items()):
transforms.append(
transform_format.format(
time=100.0 * time/animation_duration,
offset=offset
)
)
last_offset = offset
if last_offset is not None:
transforms.append(
transform_format.format(time=100, offset=last_offset)
)
css_animation = """
:root {{
--animation-duration: {duration}ms;
}}
@keyframes roll {{
{transforms}
}}
#screen_view {{
animation-duration: {duration}ms;
animation-iteration-count:infinite;
animation-name:roll;
animation-timing-function: steps(1,end);
animation-fill-mode: forwards;
}}
""".format(
duration=animation_duration,
transforms=os.linesep.join(transforms)
)
style.text = etree.CDATA(css_body + css_animation)
return root
def _embed_waapi(root, timings=None, animation_duration=None):
try:
style = root.find('.//{{{ns}}}defs/{{{ns}}}style[@id="generated-style"]'
.format(ns=SVG_NS))
except etree.Error as exc:
raise TemplateError('Invalid template') from exc
if style is None:
raise TemplateError('Missing <style id="generated-style" ...> element '
'in "defs"')
css_body = """
#screen {
font-family: 'DejaVu Sans Mono', monospace;
font-style: normal;
font-size: 14px;
}
text {
dominant-baseline: text-before-edge;
white-space: pre;
}
"""
style.text = etree.CDATA(css_body)
if animation_duration and timings:
if animation_duration == 0:
raise ValueError('Animation duration must be greater than 0')
css_body += """
:root {{
--animation-duration: {duration}ms;
}}
""".format(duration=animation_duration)
script_element = root.find('.//{{{ns}}}script[@id="generated-js"]'
.format(ns=SVG_NS))
if script_element is None:
raise TemplateError(
'Missing <script id="generated-js" ...> element')
transform_no_offset = "{{transform: 'translate3D(0, {y_pos}px, 0)', easing: 'steps(1, end)'}}"
transform_with_offset = "{{transform: 'translate3D(0, {y_pos}px, 0)', easing: 'steps(1, end)', offset: {offset:.3f}}}"
transforms = []
last_pos = None
for time, y_pos in sorted(timings.items()):
if last_pos is None:
transforms.append(transform_no_offset.format(y_pos=y_pos))
else:
transforms.append(
transform_with_offset
.format(offset=time / animation_duration, y_pos=y_pos)
)
last_pos = y_pos
if last_pos is not None:
transforms.append(transform_no_offset.format(y_pos=last_pos))
js_animation = """
var termtosvg_vars = {{
transforms: [
{transforms}
],
timings: {{
duration: {duration},
iterations: Infinity
}}
}};""".format(
transforms=',{}'.format(os.linesep).join(transforms),
duration=animation_duration
)
script_element.text = etree.CDATA(js_animation)
return root
def validate_svg(svg_file):
"""Validate an SVG file against SVG 1.1 Document Type Definition"""
package = __name__.split('.')[0]
dtd_bytes = pkgutil.get_data(package, '/data/svg11-flat-20110816.dtd')
with io.BytesIO(dtd_bytes) as bstream:
dtd = etree.DTD(bstream)
try:
tree = etree.parse(svg_file)
for bad in tree.xpath('/svg:svg/svg:defs/termtosvg:template_settings',
namespaces=NAMESPACES):
bad.getparent().remove(bad)
root = tree.getroot()
is_valid = dtd.validate(root)
except etree.Error as exc:
raise ValueError('Invalid SVG file') from exc
if not is_valid:
reason = dtd.error_log.filter_from_errors()[0]
raise ValueError('Invalid SVG file: {}'.format(reason))
| 36.922963 | 126 | 0.622919 |
8a51f071f6ced130cf764e9ac3690802a3c992d4 | 532 | py | Python | DAS/data/User.py | sontungtran99/MDAS | 29fd1dbc88dc02b05d561e5b8207a89ba1e648ad | [
"MIT"
] | null | null | null | DAS/data/User.py | sontungtran99/MDAS | 29fd1dbc88dc02b05d561e5b8207a89ba1e648ad | [
"MIT"
] | 8 | 2021-02-08T20:25:38.000Z | 2022-03-11T23:33:03.000Z | DAS/data/User.py | sontungtran99/MDAS | 29fd1dbc88dc02b05d561e5b8207a89ba1e648ad | [
"MIT"
] | null | null | null | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///users.db'
db = SQLAlchemy(app)
class User(db.Model):
__tablename__ = 'users'
phonenum = db.Column(db.String(15), unique=True, nullable=False, primary_key=True)
district = db.Column(db.String(30), nullable=False)
province = db.Column(db.String(30), nullable=False)
def __repr__(self):
return '<User {} in dist. {}, province {}'.format(self.phonenum, self.district, self.province)
| 25.333333 | 96 | 0.731203 |
3d90783d68030b4e47bd49ed4fdf0d1d8328d79b | 1,174 | py | Python | sktps/ps/aging.py | jclee81/sktacc | 6f601ce8f61b4e361b17773060ee2544bf35dbe4 | [
"Apache-2.0"
] | 2 | 2017-08-03T06:03:25.000Z | 2017-08-10T08:55:22.000Z | sktps/ps/aging.py | jclee81/sktacc | 6f601ce8f61b4e361b17773060ee2544bf35dbe4 | [
"Apache-2.0"
] | 8 | 2020-01-28T21:45:44.000Z | 2022-02-09T23:27:06.000Z | sktps/ps/aging.py | jclee81/sktacc | 6f601ce8f61b4e361b17773060ee2544bf35dbe4 | [
"Apache-2.0"
] | null | null | null | import util
import time
from util.log import log
class DefaultAgingPolicy(object):
def __init__(self, time_out_sec=30):
self.target_iteration_id = -1
self.time_out_sec = time_out_sec
def init(self, parameter_server):
self.target_iteration_id = parameter_server.iteration_id
def get_data(self, parameter_server):
self.target_iteration_id -= 1
iid = self.target_iteration_id
success = True
first = True
if iid < 0:
return first, success, '', None
first = False
ps = parameter_server
group_id = util.get_group_id(ps.train_id, iid)
max_try_count = self.time_out_sec / 0.1
try_count = 0
while True:
raw = ps.rc.get(group_id)
try_count += 1
if raw is None:
if max_try_count > try_count:
time.sleep(0.1)
continue
else:
log.info('Aging: target iteration id will be decreased')
success = False
else:
success = True
return first, success, group_id, raw
| 27.952381 | 76 | 0.560477 |
95f93c251dc3f6a79df82aeba43baeb00601ff1d | 1,701 | py | Python | xlsxwriter/test/core/test_core01.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/core/test_core01.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/core/test_core01.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from datetime import datetime
from ..helperfunctions import _xml_to_list
from ...core import Core
class TestAssembleCore(unittest.TestCase):
"""
Test assembling a complete Core file.
"""
def test_assemble_xml_file(self):
"""Test writing an Core file."""
self.maxDiff = None
fh = StringIO()
core = Core()
core._set_filehandle(fh)
properties = {
'author': 'A User',
'created': datetime(2010, 1, 1, 0, 0, 0),
}
core._set_properties(properties)
core._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cp:coreProperties xmlns:cp="http://schemas.openxmlformats.org/package/2006/metadata/core-properties" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dcterms="http://purl.org/dc/terms/" xmlns:dcmitype="http://purl.org/dc/dcmitype/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<dc:creator>A User</dc:creator>
<cp:lastModifiedBy>A User</cp:lastModifiedBy>
<dcterms:created xsi:type="dcterms:W3CDTF">2010-01-01T00:00:00Z</dcterms:created>
<dcterms:modified xsi:type="dcterms:W3CDTF">2010-01-01T00:00:00Z</dcterms:modified>
</cp:coreProperties>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| 33.352941 | 304 | 0.588477 |
218ffd863f00ab30c289754f17b3b5674e5c82b6 | 24,134 | py | Python | tests/conftest.py | tgisaturday/client | 9c3274cf6035636e0e05ce7e1d869bb2f5e03482 | [
"MIT"
] | 1 | 2020-09-16T19:06:01.000Z | 2020-09-16T19:06:01.000Z | tests/conftest.py | ashzblum/client | 768ca0b40be3bbd58fc7bfe6211c06ca07e1b216 | [
"MIT"
] | 1 | 2021-04-27T20:13:45.000Z | 2021-04-27T20:13:45.000Z | tests/conftest.py | ashzblum/client | 768ca0b40be3bbd58fc7bfe6211c06ca07e1b216 | [
"MIT"
] | null | null | null | from __future__ import print_function
import pytest
import time
import datetime
import requests
import os
import sys
import threading
import logging
import shutil
from contextlib import contextmanager
from tests import utils
from six.moves import queue
from wandb import wandb_sdk
# from multiprocessing import Process
import subprocess
import click
from click.testing import CliRunner
import webbrowser
import git
import psutil
import atexit
import wandb
import shutil
from wandb.util import mkdir_exists_ok
from six.moves import urllib
from wandb.sdk.lib.module import unset_globals
from wandb.sdk.lib.git import GitRepo
from wandb.sdk.internal.handler import HandleManager
from wandb.sdk.internal.sender import SendManager
from wandb.sdk.interface.interface import BackendSender
from wandb.proto import wandb_internal_pb2
from wandb.proto import wandb_internal_pb2 as pb
try:
import nbformat
except ImportError: # TODO: no fancy notebook fun in python2
pass
try:
from unittest.mock import MagicMock
except ImportError: # TODO: this is only for python2
from mock import MagicMock
DUMMY_API_KEY = "1824812581259009ca9981580f8f8a9012409eee"
class ServerMap(object):
def __init__(self):
self._map = {}
def items(self):
return self._map.items()
def __getitem__(self, worker_id):
if self._map.get(worker_id) is None:
self._map[worker_id] = start_mock_server(worker_id)
return self._map[worker_id]
servers = ServerMap()
def test_cleanup(*args, **kwargs):
print("Shutting down mock servers")
for wid, server in servers.items():
print("Shutting down {}".format(wid))
server.terminate()
print("Open files during tests: ")
proc = psutil.Process()
print(proc.open_files())
def start_mock_server(worker_id):
"""We start a flask server process for each pytest-xdist worker_id"""
port = utils.free_port()
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
path = os.path.join(root, "tests", "utils", "mock_server.py")
command = [sys.executable, "-u", path]
env = os.environ
env["PORT"] = str(port)
env["PYTHONPATH"] = root
logfname = os.path.join(
root, "tests", "logs", "live_mock_server-{}.log".format(worker_id)
)
logfile = open(logfname, "w")
server = subprocess.Popen(
command,
stdout=logfile,
env=env,
stderr=subprocess.STDOUT,
bufsize=1,
close_fds=True,
)
server._port = port
server.base_url = "http://localhost:%i" % server._port
def get_ctx():
return requests.get(server.base_url + "/ctx").json()
def set_ctx(payload):
return requests.put(server.base_url + "/ctx", json=payload).json()
def reset_ctx():
return requests.delete(server.base_url + "/ctx").json()
server.get_ctx = get_ctx
server.set_ctx = set_ctx
server.reset_ctx = reset_ctx
started = False
for i in range(10):
try:
res = requests.get("%s/ctx" % server.base_url, timeout=5)
if res.status_code == 200:
started = True
break
print("Attempting to connect but got: %s" % res)
except requests.exceptions.RequestException:
print(
"Timed out waiting for server to start...", server.base_url, time.time()
)
if server.poll() is None:
time.sleep(1)
else:
raise ValueError("Server failed to start.")
if started:
print("Mock server listing on {} see {}".format(server._port, logfname))
else:
server.terminate()
print("Server failed to launch, see {}".format(logfname))
try:
print("=" * 40)
with open(logfname) as f:
for logline in f.readlines():
print(logline.strip())
print("=" * 40)
except Exception as e:
print("EXCEPTION:", e)
raise ValueError("Failed to start server! Exit code %s" % server.returncode)
return server
atexit.register(test_cleanup)
@pytest.fixture
def test_name(request):
# change "test[1]" to "test__1__"
name = urllib.parse.quote(request.node.name.replace("[", "__").replace("]", "__"))
return name
@pytest.fixture
def test_dir(test_name):
orig_dir = os.getcwd()
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
test_dir = os.path.join(root, "tests", "logs", test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
mkdir_exists_ok(test_dir)
os.chdir(test_dir)
yield test_dir
os.chdir(orig_dir)
@pytest.fixture
def git_repo(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
mkdir_exists_ok("wandb")
# Because the forked process doesn't use my monkey patch above
with open("wandb/settings", "w") as f:
f.write("[default]\nproject: test")
open("README", "wb").close()
r.index.add(["README"])
r.index.commit("Initial commit")
yield GitRepo(lazy=False)
@pytest.fixture
def git_repo_with_remote(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
r.create_remote("origin", "https://foo:bar@github.com/FooTest/Foo.git")
yield GitRepo(lazy=False)
@pytest.fixture
def git_repo_with_remote_and_empty_pass(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
r.create_remote("origin", "https://foo:@github.com/FooTest/Foo.git")
yield GitRepo(lazy=False)
@pytest.fixture
def dummy_api_key():
return DUMMY_API_KEY
@pytest.fixture
def test_settings(test_dir, mocker, live_mock_server):
"""Settings object for tests"""
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
wandb.wandb_sdk.wandb_run.EXIT_TIMEOUT = 15
wandb.wandb_sdk.wandb_setup._WandbSetup.instance = None
wandb_dir = os.path.join(test_dir, "wandb")
mkdir_exists_ok(wandb_dir)
# root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
settings = wandb.Settings(
_start_time=time.time(),
base_url=live_mock_server.base_url,
root_dir=test_dir,
save_code=False,
project="test",
console="off",
host="test",
api_key=DUMMY_API_KEY,
run_id=wandb.util.generate_id(),
_start_datetime=datetime.datetime.now(),
)
settings.setdefaults()
yield settings
# Just incase someone forgets to join in tests
if wandb.run is not None:
wandb.run.finish()
@pytest.fixture
def mocked_run(runner, test_settings):
"""A managed run object for tests with a mock backend"""
run = wandb.wandb_sdk.wandb_run.Run(settings=test_settings)
run._set_backend(MagicMock())
yield run
@pytest.fixture
def runner(monkeypatch, mocker):
# monkeypatch.setattr('wandb.cli.api', InternalApi(
# default_settings={'project': 'test', 'git_tag': True}, load_settings=False))
monkeypatch.setattr(wandb.util, "prompt_choices", lambda x: x[0])
monkeypatch.setattr(wandb.wandb_lib.apikey, "prompt_choices", lambda x: x[0])
monkeypatch.setattr(click, "launch", lambda x: 1)
monkeypatch.setattr(webbrowser, "open_new_tab", lambda x: True)
mocker.patch("wandb.wandb_lib.apikey.isatty", lambda stream: True)
mocker.patch("wandb.wandb_lib.apikey.input", lambda x: 1)
mocker.patch("wandb.wandb_lib.apikey.getpass.getpass", lambda x: DUMMY_API_KEY)
return CliRunner()
@pytest.fixture(autouse=True)
def reset_setup():
wandb.wandb_sdk.wandb_setup._WandbSetup._instance = None
@pytest.fixture(autouse=True)
def local_netrc(monkeypatch):
"""Never use our real credentials, put them in their own isolated dir"""
with CliRunner().isolated_filesystem():
# TODO: this seems overkill...
origexpand = os.path.expanduser
# Touch that netrc
open(".netrc", "wb").close()
def expand(path):
if "netrc" in path:
try:
ret = os.path.realpath("netrc")
except OSError:
ret = origexpand(path)
else:
ret = origexpand(path)
return ret
monkeypatch.setattr(os.path, "expanduser", expand)
yield
@pytest.fixture(autouse=True)
def local_settings(mocker):
"""Place global settings in an isolated dir"""
with CliRunner().isolated_filesystem():
cfg_path = os.path.join(os.getcwd(), ".config", "wandb", "settings")
mkdir_exists_ok(os.path.join(".config", "wandb"))
mocker.patch("wandb.old.settings.Settings._global_path", return_value=cfg_path)
yield
@pytest.fixture
def mock_server(mocker):
return utils.mock_server(mocker)
# We create one live_mock_server per pytest-xdist worker
@pytest.fixture
def live_mock_server(request, worker_id):
global servers
server = servers[worker_id]
name = urllib.parse.quote(request.node.name)
# We set the username so the mock backend can namespace state
os.environ["WANDB_USERNAME"] = name
os.environ["WANDB_BASE_URL"] = server.base_url
os.environ["WANDB_ERROR_REPORTING"] = "false"
os.environ["WANDB_API_KEY"] = DUMMY_API_KEY
# clear mock server ctx
server.reset_ctx()
yield server
del os.environ["WANDB_USERNAME"]
del os.environ["WANDB_BASE_URL"]
del os.environ["WANDB_ERROR_REPORTING"]
del os.environ["WANDB_API_KEY"]
@pytest.fixture
def notebook(live_mock_server, test_dir):
"""This launches a live server, configures a notebook to use it, and enables
devs to execute arbitrary cells. See tests/test_notebooks.py
"""
@contextmanager
def notebook_loader(nb_path, kernel_name="wandb_python", save_code=True, **kwargs):
with open(utils.notebook_path("setup.ipynb")) as f:
setupnb = nbformat.read(f, as_version=4)
setupcell = setupnb["cells"][0]
# Ensure the notebooks talks to our mock server
new_source = setupcell["source"].replace(
"__WANDB_BASE_URL__", live_mock_server.base_url,
)
if save_code:
new_source = new_source.replace("__WANDB_NOTEBOOK_NAME__", nb_path)
else:
new_source = new_source.replace("__WANDB_NOTEBOOK_NAME__", "")
setupcell["source"] = new_source
nb_path = utils.notebook_path(nb_path)
shutil.copy(nb_path, os.path.join(os.getcwd(), os.path.basename(nb_path)))
with open(nb_path) as f:
nb = nbformat.read(f, as_version=4)
nb["cells"].insert(0, setupcell)
try:
client = utils.WandbNotebookClient(nb, kernel_name=kernel_name)
with client.setup_kernel(**kwargs):
# Run setup commands for mocks
client.execute_cells(-1, store_history=False)
yield client
finally:
with open(os.path.join(os.getcwd(), "notebook.log"), "w") as f:
f.write(client.all_output_text())
wandb.termlog("Find debug logs at: %s" % os.getcwd())
wandb.termlog(client.all_output_text())
notebook_loader.base_url = live_mock_server.base_url
return notebook_loader
@pytest.fixture
def mocked_module(monkeypatch):
"""This allows us to mock modules loaded via wandb.util.get_module"""
def mock_get_module(module):
orig_get_module = wandb.util.get_module
mocked_module = MagicMock()
def get_module(mod):
if mod == module:
return mocked_module
else:
return orig_get_module(mod)
monkeypatch.setattr(wandb.util, "get_module", get_module)
return mocked_module
return mock_get_module
@pytest.fixture
def mocked_ipython(monkeypatch):
monkeypatch.setattr(
wandb.wandb_sdk.wandb_settings, "_get_python_type", lambda: "jupyter"
)
ipython = MagicMock()
# TODO: this is really unfortunate, for reasons not clear to me, monkeypatch doesn't work
orig_get_ipython = wandb.jupyter.get_ipython
wandb.jupyter.get_ipython = lambda: ipython
yield ipython
wandb.jupyter.get_ipython = orig_get_ipython
def default_wandb_args():
"""This allows us to parameterize the wandb_init_run fixture
The most general arg is "env", you can call:
@pytest.mark.wandb_args(env={"WANDB_API_KEY": "XXX"})
To set env vars and have them unset when the test completes.
"""
return {
"error": None,
"k8s": None,
"sagemaker": False,
"tensorboard": False,
"resume": False,
"env": {},
"wandb_init": {},
}
def mocks_from_args(mocker, args, mock_server):
if args["k8s"] is not None:
mock_server.ctx["k8s"] = args["k8s"]
args["env"].update(utils.mock_k8s(mocker))
if args["sagemaker"]:
args["env"].update(utils.mock_sagemaker(mocker))
@pytest.fixture
def wandb_init_run(request, runner, mocker, mock_server):
marker = request.node.get_closest_marker("wandb_args")
args = default_wandb_args()
if marker:
args.update(marker.kwargs)
try:
mocks_from_args(mocker, args, mock_server)
for k, v in args["env"].items():
os.environ[k] = v
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
# We want to run setup every time in tests
wandb.wandb_sdk.wandb_setup._WandbSetup._instance = None
mocker.patch("wandb.wandb_sdk.wandb_init.Backend", utils.BackendMock)
run = wandb.init(
settings=wandb.Settings(console="off", mode="offline", _except_exit=False),
**args["wandb_init"]
)
yield run
wandb.join()
finally:
unset_globals()
for k, v in args["env"].items():
del os.environ[k]
@pytest.fixture
def wandb_init(request, runner, mocker, mock_server):
def init(*args, **kwargs):
try:
mocks_from_args(mocker, default_wandb_args(), mock_server)
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
# We want to run setup every time in tests
wandb.wandb_sdk.wandb_setup._WandbSetup._instance = None
mocker.patch("wandb.wandb_sdk.wandb_init.Backend", utils.BackendMock)
return wandb.init(
settings=wandb.Settings(
console="off", mode="offline", _except_exit=False
),
*args,
**kwargs
)
finally:
unset_globals()
return init
@pytest.fixture()
def restore_version():
save_current_version = wandb.__version__
yield
wandb.__version__ = save_current_version
try:
del wandb.__hack_pypi_latest_version__
except AttributeError:
pass
@pytest.fixture()
def disable_console():
os.environ["WANDB_CONSOLE"] = "off"
yield
del os.environ["WANDB_CONSOLE"]
@pytest.fixture()
def parse_ctx():
"""Fixture providing class to parse context data."""
def parse_ctx_fn(ctx, run_id=None):
return utils.ParseCTX(ctx, run_id=run_id)
yield parse_ctx_fn
@pytest.fixture()
def record_q():
return queue.Queue()
@pytest.fixture()
def fake_interface(record_q):
return BackendSender(record_q=record_q)
@pytest.fixture
def fake_backend(fake_interface):
class FakeBackend:
def __init__(self):
self.interface = fake_interface
yield FakeBackend()
@pytest.fixture
def fake_run(fake_backend):
def run_fn():
s = wandb.Settings()
run = wandb_sdk.wandb_run.Run(settings=s)
run._set_backend(fake_backend)
return run
yield run_fn
@pytest.fixture
def records_util():
def records_fn(q):
ru = utils.RecordsUtil(q)
return ru
yield records_fn
@pytest.fixture
def user_test(fake_run, record_q, records_util):
class UserTest:
pass
ut = UserTest()
ut.get_run = fake_run
ut.get_records = lambda: records_util(record_q)
yield ut
# @pytest.hookimpl(tryfirst=True, hookwrapper=True)
# def pytest_runtest_makereport(item, call):
# outcome = yield
# rep = outcome.get_result()
# if rep.when == "call" and rep.failed:
# print("DEBUG PYTEST", rep, item, call, outcome)
@pytest.fixture
def log_debug(caplog):
caplog.set_level(logging.DEBUG)
yield
# for rec in caplog.records:
# print("LOGGER", rec.message, file=sys.stderr)
# ----------------------
# internal test fixtures
# ----------------------
@pytest.fixture()
def internal_result_q():
return queue.Queue()
@pytest.fixture()
def internal_sender_q():
return queue.Queue()
@pytest.fixture()
def internal_writer_q():
return queue.Queue()
@pytest.fixture()
def internal_process():
# FIXME: return mocked process (needs is_alive())
return MockProcess()
class MockProcess:
def __init__(self):
self._alive = True
def is_alive(self):
return self._alive
@pytest.fixture()
def _internal_sender(record_q, internal_result_q, internal_process):
return BackendSender(
record_q=record_q, result_q=internal_result_q, process=internal_process,
)
@pytest.fixture()
def internal_sm(
runner,
internal_sender_q,
internal_result_q,
test_settings,
mock_server,
_internal_sender,
):
with runner.isolated_filesystem():
test_settings.root_dir = os.getcwd()
sm = SendManager(
settings=test_settings,
record_q=internal_sender_q,
result_q=internal_result_q,
interface=_internal_sender,
)
yield sm
@pytest.fixture()
def stopped_event():
stopped = threading.Event()
yield stopped
@pytest.fixture()
def internal_hm(
runner,
record_q,
internal_result_q,
test_settings,
mock_server,
internal_sender_q,
internal_writer_q,
_internal_sender,
stopped_event,
):
with runner.isolated_filesystem():
test_settings.root_dir = os.getcwd()
hm = HandleManager(
settings=test_settings,
record_q=record_q,
result_q=internal_result_q,
stopped=stopped_event,
sender_q=internal_sender_q,
writer_q=internal_writer_q,
interface=_internal_sender,
)
yield hm
@pytest.fixture()
def internal_get_record():
def _get_record(input_q, timeout=None):
try:
i = input_q.get(timeout=timeout)
except queue.Empty:
return None
return i
return _get_record
@pytest.fixture()
def start_send_thread(
internal_sender_q, internal_get_record, stopped_event, internal_process
):
def start_send(send_manager):
def target():
try:
while True:
payload = internal_get_record(
input_q=internal_sender_q, timeout=0.1
)
if payload:
send_manager.send(payload)
elif stopped_event.is_set():
break
except Exception as e:
stopped_event.set()
internal_process._alive = False
t = threading.Thread(target=target)
t.name = "testing-sender"
t.daemon = True
t.start()
return t
yield start_send
stopped_event.set()
@pytest.fixture()
def start_handle_thread(record_q, internal_get_record, stopped_event):
def start_handle(handle_manager):
def target():
while True:
payload = internal_get_record(input_q=record_q, timeout=0.1)
if payload:
handle_manager.handle(payload)
elif stopped_event.is_set():
break
t = threading.Thread(target=target)
t.name = "testing-handler"
t.daemon = True
t.start()
return t
yield start_handle
stopped_event.set()
@pytest.fixture()
def _start_backend(
mocked_run,
internal_hm,
internal_sm,
_internal_sender,
start_handle_thread,
start_send_thread,
log_debug,
):
def start_backend_func(initial_run=True, initial_start=False):
ht = start_handle_thread(internal_hm)
st = start_send_thread(internal_sm)
if initial_run:
run = _internal_sender.communicate_run(mocked_run)
if initial_start:
_internal_sender.communicate_run_start(run.run)
return (ht, st)
yield start_backend_func
@pytest.fixture()
def _stop_backend(
mocked_run,
internal_hm,
internal_sm,
_internal_sender,
start_handle_thread,
start_send_thread,
):
def stop_backend_func(threads=None):
threads = threads or ()
done = False
_internal_sender.publish_exit(0)
for _ in range(30):
poll_exit_resp = _internal_sender.communicate_poll_exit()
if poll_exit_resp:
done = poll_exit_resp.done
if done:
break
time.sleep(1)
_internal_sender.join()
for t in threads:
t.join()
assert done, "backend didnt shutdown"
yield stop_backend_func
@pytest.fixture()
def backend_interface(_start_backend, _stop_backend, _internal_sender):
@contextmanager
def backend_context(initial_run=True, initial_start=False):
threads = _start_backend(initial_run=initial_run, initial_start=initial_start)
try:
yield _internal_sender
finally:
_stop_backend(threads=threads)
return backend_context
@pytest.fixture
def publish_util(
mocked_run, mock_server, backend_interface, parse_ctx,
):
def fn(
metrics=None,
history=None,
artifacts=None,
files=None,
begin_cb=None,
end_cb=None,
initial_start=False,
):
metrics = metrics or []
history = history or []
artifacts = artifacts or []
files = files or []
with backend_interface(initial_start=initial_start) as interface:
if begin_cb:
begin_cb(interface)
for m in metrics:
interface._publish_metric(m)
for h in history:
interface.publish_history(**h)
for a in artifacts:
interface.publish_artifact(**a)
for f in files:
interface.publish_files(**f)
if end_cb:
end_cb(interface)
ctx_util = parse_ctx(mock_server.ctx, run_id=mocked_run.id)
return ctx_util
yield fn
@pytest.fixture
def tbwatcher_util(mocked_run, mock_server, internal_hm, backend_interface, parse_ctx):
def fn(write_function, logdir="./", save=True, root_dir="./"):
with backend_interface() as interface:
proto_run = pb.RunRecord()
mocked_run._make_proto_run(proto_run)
run_start = pb.RunStartRequest()
run_start.run.CopyFrom(proto_run)
request = pb.Request()
request.run_start.CopyFrom(run_start)
record = pb.Record()
record.request.CopyFrom(request)
internal_hm.handle_request_run_start(record)
internal_hm._tb_watcher.add(logdir, save, root_dir)
# need to sleep to give time for the tb_watcher delay
time.sleep(15)
write_function()
ctx_util = parse_ctx(mock_server.ctx)
return ctx_util
yield fn
@pytest.fixture
def inject_requests(mock_server):
"""Fixture for injecting responses and errors to mock_server."""
# TODO(jhr): make this compatible with live_mock_server
return utils.InjectRequests(ctx=mock_server.ctx)
| 28.030197 | 93 | 0.638228 |
29073cc23b1335a67649f59e5b5aeee22ca919ea | 39,852 | py | Python | crabageprediction/venv/Lib/site-packages/pandas/core/groupby/ops.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 3 | 2021-11-23T05:35:28.000Z | 2022-02-10T08:05:53.000Z | crabageprediction/venv/Lib/site-packages/pandas/core/groupby/ops.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 5 | 2022-02-13T14:38:04.000Z | 2022-02-15T00:13:07.000Z | crabageprediction/venv/Lib/site-packages/pandas/core/groupby/ops.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 4 | 2021-11-23T05:36:16.000Z | 2021-11-23T05:39:33.000Z | """
Provide classes to perform the groupby aggregate operations.
These are not exposed to the user and provide implementations of the grouping
operations, primarily in cython. These classes (BaseGrouper and BinGrouper)
are contained *in* the SeriesGroupBy and DataFrameGroupBy objects.
"""
from __future__ import annotations
import collections
import functools
from typing import (
Callable,
Generic,
Hashable,
Iterator,
Sequence,
final,
overload,
)
import numpy as np
from pandas._libs import (
NaT,
lib,
)
import pandas._libs.groupby as libgroupby
import pandas._libs.reduction as libreduction
from pandas._typing import (
ArrayLike,
DtypeObj,
NDFrameT,
Shape,
npt,
)
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
maybe_cast_pointwise_result,
maybe_downcast_to_dtype,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_int64,
ensure_platform_int,
is_1d_only_ea_obj,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_any_dtype,
is_float_dtype,
is_integer_dtype,
is_numeric_dtype,
is_sparse,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import (
isna,
maybe_fill,
)
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.floating import (
Float64Dtype,
FloatingDtype,
)
from pandas.core.arrays.integer import (
Int64Dtype,
_IntegerDtype,
)
from pandas.core.arrays.masked import (
BaseMaskedArray,
BaseMaskedDtype,
)
from pandas.core.arrays.string_ import StringDtype
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import grouper
from pandas.core.indexes.api import (
CategoricalIndex,
Index,
MultiIndex,
ensure_index,
)
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index,
decons_obs_group_ids,
get_flattened_list,
get_group_index,
get_group_index_sorter,
get_indexer_dict,
)
class WrappedCythonOp:
"""
Dispatch logic for functions defined in _libs.groupby
"""
# Functions for which we do _not_ attempt to cast the cython result
# back to the original dtype.
cast_blocklist = frozenset(["rank", "count", "size", "idxmin", "idxmax"])
def __init__(self, kind: str, how: str):
self.kind = kind
self.how = how
_CYTHON_FUNCTIONS = {
"aggregate": {
"add": "group_add",
"prod": "group_prod",
"min": "group_min",
"max": "group_max",
"mean": "group_mean",
"median": "group_median",
"var": "group_var",
"first": "group_nth",
"last": "group_last",
"ohlc": "group_ohlc",
},
"transform": {
"cumprod": "group_cumprod",
"cumsum": "group_cumsum",
"cummin": "group_cummin",
"cummax": "group_cummax",
"rank": "group_rank",
},
}
_MASKED_CYTHON_FUNCTIONS = {"cummin", "cummax", "min", "max"}
_cython_arity = {"ohlc": 4} # OHLC
# Note: we make this a classmethod and pass kind+how so that caching
# works at the class level and not the instance level
@classmethod
@functools.lru_cache(maxsize=None)
def _get_cython_function(
cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool
):
dtype_str = dtype.name
ftype = cls._CYTHON_FUNCTIONS[kind][how]
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(libgroupby, ftype)
if is_numeric:
return f
elif dtype == object:
if "object" not in f.__signatures__:
# raise NotImplementedError here rather than TypeError later
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{dtype_str}]"
)
return f
def get_cython_func_and_vals(self, values: np.ndarray, is_numeric: bool):
"""
Find the appropriate cython function, casting if necessary.
Parameters
----------
values : np.ndarray
is_numeric : bool
Returns
-------
func : callable
values : np.ndarray
"""
how = self.how
kind = self.kind
if how in ["median", "cumprod"]:
# these two only have float64 implementations
if is_numeric:
values = ensure_float64(values)
else:
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{values.dtype.name}]"
)
func = getattr(libgroupby, f"group_{how}_float64")
return func, values
func = self._get_cython_function(kind, how, values.dtype, is_numeric)
if values.dtype.kind in ["i", "u"]:
if how in ["add", "var", "prod", "mean", "ohlc"]:
# result may still include NaN, so we have to cast
values = ensure_float64(values)
return func, values
def _disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False):
"""
Check if we can do this operation with our cython functions.
Raises
------
NotImplementedError
This is either not a valid function for this dtype, or
valid but not implemented in cython.
"""
how = self.how
if is_numeric:
# never an invalid op for those dtypes, so return early as fastpath
return
if is_categorical_dtype(dtype):
# NotImplementedError for methods that can fall back to a
# non-cython implementation.
if how in ["add", "prod", "cumsum", "cumprod"]:
raise TypeError(f"{dtype} type does not support {how} operations")
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_sparse(dtype):
# categoricals are only 1d, so we
# are not setup for dim transforming
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_datetime64_any_dtype(dtype):
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
if how in ["add", "prod", "cumsum", "cumprod"]:
raise TypeError(f"datetime64 type does not support {how} operations")
elif is_timedelta64_dtype(dtype):
if how in ["prod", "cumprod"]:
raise TypeError(f"timedelta64 type does not support {how} operations")
def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:
how = self.how
kind = self.kind
arity = self._cython_arity.get(how, 1)
out_shape: Shape
if how == "ohlc":
out_shape = (ngroups, 4)
elif arity > 1:
raise NotImplementedError(
"arity of more than 1 is not supported for the 'how' argument"
)
elif kind == "transform":
out_shape = values.shape
else:
out_shape = (ngroups,) + values.shape[1:]
return out_shape
def get_out_dtype(self, dtype: np.dtype) -> np.dtype:
how = self.how
if how == "rank":
out_dtype = "float64"
else:
if is_numeric_dtype(dtype):
out_dtype = f"{dtype.kind}{dtype.itemsize}"
else:
out_dtype = "object"
return np.dtype(out_dtype)
@overload
def _get_result_dtype(self, dtype: np.dtype) -> np.dtype:
... # pragma: no cover
@overload
def _get_result_dtype(self, dtype: ExtensionDtype) -> ExtensionDtype:
... # pragma: no cover
def _get_result_dtype(self, dtype: DtypeObj) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : np.dtype or ExtensionDtype
Input dtype.
Returns
-------
np.dtype or ExtensionDtype
The desired dtype of the result.
"""
how = self.how
if how in ["add", "cumsum", "sum", "prod"]:
if dtype == np.dtype(bool):
return np.dtype(np.int64)
elif isinstance(dtype, (BooleanDtype, _IntegerDtype)):
return Int64Dtype()
elif how in ["mean", "median", "var"]:
if isinstance(dtype, (BooleanDtype, _IntegerDtype)):
return Float64Dtype()
elif is_float_dtype(dtype) or is_complex_dtype(dtype):
return dtype
elif is_numeric_dtype(dtype):
return np.dtype(np.float64)
return dtype
def uses_mask(self) -> bool:
return self.how in self._MASKED_CYTHON_FUNCTIONS
@final
def _ea_wrap_cython_operation(
self,
values: ExtensionArray,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
**kwargs,
) -> ArrayLike:
"""
If we have an ExtensionArray, unwrap, call _cython_operation, and
re-wrap if appropriate.
"""
# TODO: general case implementation overridable by EAs.
if isinstance(values, BaseMaskedArray) and self.uses_mask():
return self._masked_ea_wrap_cython_operation(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
**kwargs,
)
if isinstance(values, (DatetimeArray, PeriodArray, TimedeltaArray)):
# All of the functions implemented here are ordinal, so we can
# operate on the tz-naive equivalents
npvalues = values._ndarray.view("M8[ns]")
elif isinstance(values.dtype, (BooleanDtype, _IntegerDtype)):
# IntegerArray or BooleanArray
npvalues = values.to_numpy("float64", na_value=np.nan)
elif isinstance(values.dtype, FloatingDtype):
# FloatingArray
npvalues = values.to_numpy(values.dtype.numpy_dtype, na_value=np.nan)
elif isinstance(values.dtype, StringDtype):
# StringArray
npvalues = values.to_numpy(object, na_value=np.nan)
else:
raise NotImplementedError(
f"function is not implemented for this dtype: {values.dtype}"
)
res_values = self._cython_op_ndim_compat(
npvalues,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=None,
**kwargs,
)
if self.how in ["rank"]:
# i.e. how in WrappedCythonOp.cast_blocklist, since
# other cast_blocklist methods dont go through cython_operation
return res_values
return self._reconstruct_ea_result(values, res_values)
def _reconstruct_ea_result(self, values, res_values):
"""
Construct an ExtensionArray result from an ndarray result.
"""
# TODO: allow EAs to override this logic
if isinstance(
values.dtype, (BooleanDtype, _IntegerDtype, FloatingDtype, StringDtype)
):
dtype = self._get_result_dtype(values.dtype)
cls = dtype.construct_array_type()
return cls._from_sequence(res_values, dtype=dtype)
elif needs_i8_conversion(values.dtype):
i8values = res_values.view("i8")
return type(values)(i8values, dtype=values.dtype)
raise NotImplementedError
@final
def _masked_ea_wrap_cython_operation(
self,
values: BaseMaskedArray,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
**kwargs,
) -> BaseMaskedArray:
"""
Equivalent of `_ea_wrap_cython_operation`, but optimized for masked EA's
and cython algorithms which accept a mask.
"""
orig_values = values
# Copy to ensure input and result masks don't end up shared
mask = values._mask.copy()
result_mask = np.zeros(ngroups, dtype=bool)
arr = values._data
res_values = self._cython_op_ndim_compat(
arr,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=mask,
result_mask=result_mask,
**kwargs,
)
dtype = self._get_result_dtype(orig_values.dtype)
assert isinstance(dtype, BaseMaskedDtype)
cls = dtype.construct_array_type()
if self.kind != "aggregate":
return cls(res_values.astype(dtype.type, copy=False), mask)
else:
return cls(res_values.astype(dtype.type, copy=False), result_mask)
@final
def _cython_op_ndim_compat(
self,
values: np.ndarray,
*,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
mask: np.ndarray | None = None,
result_mask: np.ndarray | None = None,
**kwargs,
) -> np.ndarray:
if values.ndim == 1:
# expand to 2d, dispatch, then squeeze if appropriate
values2d = values[None, :]
if mask is not None:
mask = mask[None, :]
if result_mask is not None:
result_mask = result_mask[None, :]
res = self._call_cython_op(
values2d,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=mask,
result_mask=result_mask,
**kwargs,
)
if res.shape[0] == 1:
return res[0]
# otherwise we have OHLC
return res.T
return self._call_cython_op(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=mask,
result_mask=result_mask,
**kwargs,
)
@final
def _call_cython_op(
self,
values: np.ndarray, # np.ndarray[ndim=2]
*,
min_count: int,
ngroups: int,
comp_ids: np.ndarray,
mask: np.ndarray | None,
result_mask: np.ndarray | None,
**kwargs,
) -> np.ndarray: # np.ndarray[ndim=2]
orig_values = values
dtype = values.dtype
is_numeric = is_numeric_dtype(dtype)
is_datetimelike = needs_i8_conversion(dtype)
if is_datetimelike:
values = values.view("int64")
is_numeric = True
elif is_bool_dtype(dtype):
values = values.astype("int64")
elif is_integer_dtype(dtype):
# GH#43329 If the dtype is explicitly of type uint64 the type is not
# changed to prevent overflow.
if dtype != np.uint64:
values = values.astype(np.int64, copy=False)
elif is_numeric:
if not is_complex_dtype(dtype):
values = ensure_float64(values)
values = values.T
if mask is not None:
mask = mask.T
if result_mask is not None:
result_mask = result_mask.T
out_shape = self._get_output_shape(ngroups, values)
func, values = self.get_cython_func_and_vals(values, is_numeric)
out_dtype = self.get_out_dtype(values.dtype)
result = maybe_fill(np.empty(out_shape, dtype=out_dtype))
if self.kind == "aggregate":
counts = np.zeros(ngroups, dtype=np.int64)
if self.how in ["min", "max", "mean"]:
func(
result,
counts,
values,
comp_ids,
min_count,
mask=mask,
result_mask=result_mask,
is_datetimelike=is_datetimelike,
)
elif self.how in ["add"]:
# We support datetimelike
func(
result,
counts,
values,
comp_ids,
min_count,
datetimelike=is_datetimelike,
)
else:
func(result, counts, values, comp_ids, min_count)
else:
# TODO: min_count
if self.uses_mask():
func(
result,
values,
comp_ids,
ngroups,
is_datetimelike,
mask=mask,
**kwargs,
)
else:
func(result, values, comp_ids, ngroups, is_datetimelike, **kwargs)
if self.kind == "aggregate":
# i.e. counts is defined. Locations where count<min_count
# need to have the result set to np.nan, which may require casting,
# see GH#40767
if is_integer_dtype(result.dtype) and not is_datetimelike:
cutoff = max(1, min_count)
empty_groups = counts < cutoff
if empty_groups.any():
# Note: this conversion could be lossy, see GH#40767
result = result.astype("float64")
result[empty_groups] = np.nan
result = result.T
if self.how not in self.cast_blocklist:
# e.g. if we are int64 and need to restore to datetime64/timedelta64
# "rank" is the only member of cast_blocklist we get here
res_dtype = self._get_result_dtype(orig_values.dtype)
op_result = maybe_downcast_to_dtype(result, res_dtype)
else:
op_result = result
# error: Incompatible return value type (got "Union[ExtensionArray, ndarray]",
# expected "ndarray")
return op_result # type: ignore[return-value]
@final
def cython_operation(
self,
*,
values: ArrayLike,
axis: int,
min_count: int = -1,
comp_ids: np.ndarray,
ngroups: int,
**kwargs,
) -> ArrayLike:
"""
Call our cython function, with appropriate pre- and post- processing.
"""
if values.ndim > 2:
raise NotImplementedError("number of dimensions is currently limited to 2")
elif values.ndim == 2:
assert axis == 1, axis
elif not is_1d_only_ea_obj(values):
# Note: it is *not* the case that axis is always 0 for 1-dim values,
# as we can have 1D ExtensionArrays that we need to treat as 2D
assert axis == 0
dtype = values.dtype
is_numeric = is_numeric_dtype(dtype)
# can we do this operation with our cython functions
# if not raise NotImplementedError
self._disallow_invalid_ops(dtype, is_numeric)
if not isinstance(values, np.ndarray):
# i.e. ExtensionArray
return self._ea_wrap_cython_operation(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
**kwargs,
)
return self._cython_op_ndim_compat(
values,
min_count=min_count,
ngroups=ngroups,
comp_ids=comp_ids,
mask=None,
**kwargs,
)
class BaseGrouper:
"""
This is an internal Grouper class, which actually holds
the generated groups
Parameters
----------
axis : Index
groupings : Sequence[Grouping]
all the grouping instances to handle in this grouper
for example for grouper list to groupby, need to pass the list
sort : bool, default True
whether this grouper will give sorted result or not
group_keys : bool, default True
mutated : bool, default False
indexer : np.ndarray[np.intp], optional
the indexer created by Grouper
some groupers (TimeGrouper) will sort its axis and its
group_info is also sorted, so need the indexer to reorder
"""
axis: Index
def __init__(
self,
axis: Index,
groupings: Sequence[grouper.Grouping],
sort: bool = True,
group_keys: bool = True,
mutated: bool = False,
indexer: npt.NDArray[np.intp] | None = None,
dropna: bool = True,
):
assert isinstance(axis, Index), axis
self.axis = axis
self._groupings: list[grouper.Grouping] = list(groupings)
self._sort = sort
self.group_keys = group_keys
self.mutated = mutated
self.indexer = indexer
self.dropna = dropna
@property
def groupings(self) -> list[grouper.Grouping]:
return self._groupings
@property
def shape(self) -> Shape:
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self) -> int:
return len(self.groupings)
def get_iterator(
self, data: NDFrameT, axis: int = 0
) -> Iterator[tuple[Hashable, NDFrameT]]:
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self.group_keys_seq
for key, group in zip(keys, splitter):
yield key, group.__finalize__(data, method="groupby")
@final
def _get_splitter(self, data: NDFrame, axis: int = 0) -> DataSplitter:
"""
Returns
-------
Generator yielding subsetted objects
__finalize__ has not been called for the subsetted objects returned.
"""
ids, _, ngroups = self.group_info
return get_splitter(data, ids, ngroups, axis=axis)
def _get_grouper(self):
"""
We are a grouper as part of another's groupings.
We have a specific method of grouping, so cannot
convert to a Index for our grouper.
"""
return self.groupings[0].grouping_vector
@final
@cache_readonly
def group_keys_seq(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
return get_flattened_list(ids, ngroups, self.levels, self.codes)
@final
def apply(
self, f: Callable, data: DataFrame | Series, axis: int = 0
) -> tuple[list, bool]:
mutated = self.mutated
splitter = self._get_splitter(data, axis=axis)
group_keys = self.group_keys_seq
result_values = []
# This calls DataSplitter.__iter__
zipped = zip(group_keys, splitter)
for key, group in zipped:
group = group.__finalize__(data, method="groupby")
object.__setattr__(group, "name", key)
# group might be modified
group_axes = group.axes
res = f(group)
if not mutated and not _is_indexed_like(res, group_axes, axis):
mutated = True
result_values.append(res)
# getattr pattern for __name__ is needed for functools.partial objects
if len(group_keys) == 0 and getattr(f, "__name__", None) not in [
"idxmin",
"idxmax",
"nanargmin",
"nanargmax",
]:
# If group_keys is empty, then no function calls have been made,
# so we will not have raised even if this is an invalid dtype.
# So do one dummy call here to raise appropriate TypeError.
f(data.iloc[:0])
return result_values, mutated
@cache_readonly
def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
"""dict {group name -> group indices}"""
if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex):
# This shows unused categories in indices GH#38642
return self.groupings[0].indices
codes_list = [ping.codes for ping in self.groupings]
keys = [ping.group_index for ping in self.groupings]
return get_indexer_dict(codes_list, keys)
@final
@property
def codes(self) -> list[np.ndarray]:
return [ping.codes for ping in self.groupings]
@property
def levels(self) -> list[Index]:
return [ping.group_index for ping in self.groupings]
@property
def names(self) -> list[Hashable]:
return [ping.name for ping in self.groupings]
@final
def size(self) -> Series:
"""
Compute group sizes.
"""
ids, _, ngroups = self.group_info
out: np.ndarray | list
if ngroups:
out = np.bincount(ids[ids != -1], minlength=ngroups)
else:
out = []
return Series(out, index=self.result_index, dtype="int64")
@cache_readonly
def groups(self) -> dict[Hashable, np.ndarray]:
"""dict {group name -> group labels}"""
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = zip(*(ping.grouping_vector for ping in self.groupings))
index = Index(to_groupby)
return self.axis.groupby(index)
@final
@cache_readonly
def is_monotonic(self) -> bool:
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:
comp_ids, obs_group_ids = self._get_compressed_codes()
ngroups = len(obs_group_ids)
comp_ids = ensure_platform_int(comp_ids)
return comp_ids, obs_group_ids, ngroups
@final
@cache_readonly
def codes_info(self) -> npt.NDArray[np.intp]:
# return the codes of items in original grouped axis
ids, _, _ = self.group_info
if self.indexer is not None:
sorter = np.lexsort((ids, self.indexer))
ids = ids[sorter]
ids = ensure_platform_int(ids)
# TODO: if numpy annotates np.lexsort, this ensure_platform_int
# may become unnecessary
return ids
@final
def _get_compressed_codes(self) -> tuple[np.ndarray, npt.NDArray[np.intp]]:
# The first returned ndarray may have any signed integer dtype
if len(self.groupings) > 1:
group_index = get_group_index(self.codes, self.shape, sort=True, xnull=True)
return compress_group_index(group_index, sort=self._sort)
ping = self.groupings[0]
return ping.codes, np.arange(len(ping.group_index), dtype=np.intp)
@final
@cache_readonly
def ngroups(self) -> int:
return len(self.result_index)
@property
def reconstructed_codes(self) -> list[np.ndarray]:
codes = self.codes
ids, obs_ids, _ = self.group_info
return decons_obs_group_ids(ids, obs_ids, self.shape, codes, xnull=True)
@final
@cache_readonly
def result_arraylike(self) -> ArrayLike:
"""
Analogous to result_index, but returning an ndarray/ExtensionArray
allowing us to retain ExtensionDtypes not supported by Index.
"""
# TODO(ExtensionIndex): once Index supports arbitrary EAs, this can
# be removed in favor of result_index
if len(self.groupings) == 1:
return self.groupings[0].group_arraylike
# result_index is MultiIndex
return self.result_index._values
@cache_readonly
def result_index(self) -> Index:
if len(self.groupings) == 1:
return self.groupings[0].result_index.rename(self.names[0])
codes = self.reconstructed_codes
levels = [ping.result_index for ping in self.groupings]
return MultiIndex(
levels=levels, codes=codes, verify_integrity=False, names=self.names
)
@final
def get_group_levels(self) -> list[ArrayLike]:
# Note: only called from _insert_inaxis_grouper_inplace, which
# is only called for BaseGrouper, never for BinGrouper
if len(self.groupings) == 1:
return [self.groupings[0].group_arraylike]
name_list = []
for ping, codes in zip(self.groupings, self.reconstructed_codes):
codes = ensure_platform_int(codes)
levels = ping.group_arraylike.take(codes)
name_list.append(levels)
return name_list
# ------------------------------------------------------------
# Aggregation functions
@final
def _cython_operation(
self,
kind: str,
values,
how: str,
axis: int,
min_count: int = -1,
**kwargs,
) -> ArrayLike:
"""
Returns the values of a cython operation.
"""
assert kind in ["transform", "aggregate"]
cy_op = WrappedCythonOp(kind=kind, how=how)
ids, _, _ = self.group_info
ngroups = self.ngroups
return cy_op.cython_operation(
values=values,
axis=axis,
min_count=min_count,
comp_ids=ids,
ngroups=ngroups,
**kwargs,
)
@final
def agg_series(
self, obj: Series, func: Callable, preserve_dtype: bool = False
) -> ArrayLike:
"""
Parameters
----------
obj : Series
func : function taking a Series and returning a scalar-like
preserve_dtype : bool
Whether the aggregation is known to be dtype-preserving.
Returns
-------
np.ndarray or ExtensionArray
"""
# test_groupby_empty_with_category gets here with self.ngroups == 0
# and len(obj) > 0
if len(obj) == 0:
# SeriesGrouper would raise if we were to call _aggregate_series_fast
result = self._aggregate_series_pure_python(obj, func)
elif not isinstance(obj._values, np.ndarray):
result = self._aggregate_series_pure_python(obj, func)
# we can preserve a little bit more aggressively with EA dtype
# because maybe_cast_pointwise_result will do a try/except
# with _from_sequence. NB we are assuming here that _from_sequence
# is sufficiently strict that it casts appropriately.
preserve_dtype = True
else:
result = self._aggregate_series_pure_python(obj, func)
npvalues = lib.maybe_convert_objects(result, try_float=False)
if preserve_dtype:
out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True)
else:
out = npvalues
return out
@final
def _aggregate_series_pure_python(
self, obj: Series, func: Callable
) -> npt.NDArray[np.object_]:
ids, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = np.empty(ngroups, dtype="O")
initialized = False
# equiv: splitter = self._get_splitter(obj, axis=0)
splitter = get_splitter(obj, ids, ngroups, axis=0)
for i, group in enumerate(splitter):
group = group.__finalize__(obj, method="groupby")
res = func(group)
res = libreduction.extract_result(res)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(res, group.dtype)
initialized = True
counts[i] = group.shape[0]
result[i] = res
return result
class BinGrouper(BaseGrouper):
"""
This is an internal Grouper class
Parameters
----------
bins : the split index of binlabels to group the item of axis
binlabels : the label list
mutated : bool, default False
indexer : np.ndarray[np.intp]
Examples
--------
bins: [2, 4, 6, 8, 10]
binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',
'2005-01-05', '2005-01-07', '2005-01-09'],
dtype='datetime64[ns]', freq='2D')
the group_info, which contains the label of each item in grouped
axis, the index of label in label list, group number, is
(array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)
means that, the grouped axis has 10 items, can be grouped into 5
labels, the first and second items belong to the first label, the
third and forth items belong to the second label, and so on
"""
bins: npt.NDArray[np.int64]
binlabels: Index
mutated: bool
def __init__(
self,
bins,
binlabels,
mutated: bool = False,
indexer=None,
):
self.bins = ensure_int64(bins)
self.binlabels = ensure_index(binlabels)
self.mutated = mutated
self.indexer = indexer
# These lengths must match, otherwise we could call agg_series
# with empty self.bins, which would raise in libreduction.
assert len(self.binlabels) == len(self.bins)
@cache_readonly
def groups(self):
"""dict {group name -> group labels}"""
# this is mainly for compat
# GH 3881
result = {
key: value
for key, value in zip(self.binlabels, self.bins)
if key is not NaT
}
return result
@property
def nkeys(self) -> int:
# still matches len(self.groupings), but we can hard-code
return 1
def _get_grouper(self):
"""
We are a grouper as part of another's groupings.
We have a specific method of grouping, so cannot
convert to a Index for our grouper.
"""
return self
def get_iterator(self, data: NDFrame, axis: int = 0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if axis == 0:
slicer = lambda start, edge: data.iloc[start:edge]
else:
slicer = lambda start, edge: data.iloc[:, start:edge]
length = len(data.axes[axis])
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not NaT:
yield label, slicer(start, edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start, None)
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]:
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups, dtype=np.intp)
rep = np.diff(np.r_[0, self.bins])
rep = ensure_platform_int(rep)
if ngroups == len(self.bins):
comp_ids = np.repeat(np.arange(ngroups), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
return (
ensure_platform_int(comp_ids),
obs_group_ids,
ngroups,
)
@cache_readonly
def reconstructed_codes(self) -> list[np.ndarray]:
# get unique result indices, and prepend 0 as groupby starts from the first
return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]
@cache_readonly
def result_index(self):
if len(self.binlabels) != 0 and isna(self.binlabels[0]):
return self.binlabels[1:]
return self.binlabels
@property
def levels(self) -> list[Index]:
return [self.binlabels]
@property
def names(self) -> list[Hashable]:
return [self.binlabels.name]
@property
def groupings(self) -> list[grouper.Grouping]:
lev = self.binlabels
ping = grouper.Grouping(lev, lev, in_axis=False, level=None)
return [ping]
def _aggregate_series_fast(self, obj: Series, func: Callable) -> np.ndarray:
# -> np.ndarray[object]
raise NotImplementedError(
"This should not be reached; use _aggregate_series_pure_python"
)
def _is_indexed_like(obj, axes, axis: int) -> bool:
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.axes[axis].equals(axes[axis])
elif isinstance(obj, DataFrame):
return obj.axes[axis].equals(axes[axis])
return False
# ----------------------------------------------------------------------
# Splitting / application
class DataSplitter(Generic[NDFrameT]):
def __init__(
self,
data: NDFrameT,
labels: npt.NDArray[np.intp],
ngroups: int,
axis: int = 0,
):
self.data = data
self.labels = ensure_platform_int(labels) # _should_ already be np.intp
self.ngroups = ngroups
self.axis = axis
assert isinstance(axis, int), axis
@cache_readonly
def slabels(self) -> npt.NDArray[np.intp]:
# Sorted labels
return self.labels.take(self._sort_idx)
@cache_readonly
def _sort_idx(self) -> npt.NDArray[np.intp]:
# Counting sort indexer
return get_group_index_sorter(self.labels, self.ngroups)
def __iter__(self):
sdata = self.sorted_data
if self.ngroups == 0:
# we are inside a generator, rather than raise StopIteration
# we merely return signal the end
return
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for start, end in zip(starts, ends):
yield self._chop(sdata, slice(start, end))
@cache_readonly
def sorted_data(self) -> NDFrameT:
return self.data.take(self._sort_idx, axis=self.axis)
def _chop(self, sdata, slice_obj: slice) -> NDFrame:
raise AbstractMethodError(self)
class SeriesSplitter(DataSplitter):
def _chop(self, sdata: Series, slice_obj: slice) -> Series:
# fastpath equivalent to `sdata.iloc[slice_obj]`
mgr = sdata._mgr.get_slice(slice_obj)
# __finalize__ not called here, must be applied by caller if applicable
return sdata._constructor(mgr, name=sdata.name, fastpath=True)
class FrameSplitter(DataSplitter):
def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:
# Fastpath equivalent to:
# if self.axis == 0:
# return sdata.iloc[slice_obj]
# else:
# return sdata.iloc[:, slice_obj]
mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis)
# __finalize__ not called here, must be applied by caller if applicable
return sdata._constructor(mgr)
def get_splitter(
data: NDFrame, labels: np.ndarray, ngroups: int, axis: int = 0
) -> DataSplitter:
if isinstance(data, Series):
klass: type[DataSplitter] = SeriesSplitter
else:
# i.e. DataFrame
klass = FrameSplitter
return klass(data, labels, ngroups, axis)
| 31.305577 | 88 | 0.583785 |
25668328a712aa3552345c86503fad46bab46dee | 584 | py | Python | trust-rank/setup.py | allentran/trust-rank | 1c9b351c2ec2ede3128b3c0f6b5ad20fd7433467 | [
"Apache-2.0"
] | 4 | 2015-12-13T16:40:46.000Z | 2019-01-29T09:54:29.000Z | trust-rank/setup.py | allentran/trust-rank | 1c9b351c2ec2ede3128b3c0f6b5ad20fd7433467 | [
"Apache-2.0"
] | null | null | null | trust-rank/setup.py | allentran/trust-rank | 1c9b351c2ec2ede3128b3c0f6b5ad20fd7433467 | [
"Apache-2.0"
] | 7 | 2016-12-27T03:01:44.000Z | 2021-05-04T09:38:01.000Z | from setuptools import setup, find_packages
if __name__ == '__main__':
name = 'trust-finder'
setup(
name = name,
version = "0.0.0",
author = 'Allen Tran',
author_email = 'realallentran@gmail.com',
description = 'Graph based recommender with trust based measures',
packages = find_packages(),
classifiers = [
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Operating System :: Unix',
'Operating System :: MacOS',
],
setup_requires = [
'setuptools>=3.4.4',
],
)
| 25.391304 | 71 | 0.583904 |
74dc35f2ef044dd24757e390f37fde932d5285dd | 22,054 | py | Python | test/test_spark_keras.py | Vikas-kum/horovod | 6b77884daf92649ecf031fcc8ff29697bbea0132 | [
"Apache-2.0"
] | 1 | 2019-01-17T16:37:06.000Z | 2019-01-17T16:37:06.000Z | test/test_spark_keras.py | Vikas-kum/horovod | 6b77884daf92649ecf031fcc8ff29697bbea0132 | [
"Apache-2.0"
] | null | null | null | test/test_spark_keras.py | Vikas-kum/horovod | 6b77884daf92649ecf031fcc8ff29697bbea0132 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import collections
import warnings
import mock
import numpy as np
import tensorflow as tf
import pyspark.sql.types as T
from pyspark.ml.linalg import DenseVector, SparseVector
from pyspark.sql.functions import udf
import horovod.spark.keras as hvd
from horovod.spark.common import constants, util
from horovod.spark.keras import remote
from horovod.spark.keras.estimator import EstimatorParams
from horovod.spark.keras.util import _custom_sparse_to_dense_fn, _serialize_param_value, BareKerasUtil, TFKerasUtil
from common import temppath
from spark_common import CallbackBackend, create_mnist_data, create_xor_data, local_store, spark_session
def create_xor_model():
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(8, input_dim=2))
model.add(tf.keras.layers.Activation('tanh'))
model.add(tf.keras.layers.Dense(1))
model.add(tf.keras.layers.Activation('sigmoid'))
return model
def create_mnist_model():
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=(8, 8, 1)))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(10, activation='softmax'))
return model
def get_mock_fit_fn():
def fit(model, train_data, val_data, steps_per_epoch, validation_steps, callbacks, verbose):
for callback in callbacks:
callback.set_model(model)
callback.on_epoch_end(0, logs={})
return mock.Mock()
return fit
class SparkKerasTests(tf.test.TestCase):
def __init__(self, *args, **kwargs):
super(SparkKerasTests, self).__init__(*args, **kwargs)
warnings.simplefilter('module')
def test_fit_model(self):
model = create_xor_model()
optimizer = tf.keras.optimizers.SGD(lr=0.1)
loss = 'binary_crossentropy'
with spark_session('test_fit_model') as spark:
df = create_xor_data(spark)
with local_store() as store:
keras_estimator = hvd.KerasEstimator(
num_proc=2,
store=store,
model=model,
optimizer=optimizer,
loss=loss,
feature_cols=['features'],
label_cols=['y'],
batch_size=1,
epochs=3,
verbose=2)
keras_model = keras_estimator.fit(df)
trained_model = keras_model.getModel()
pred = trained_model.predict([np.ones([1, 2], dtype=np.float32)])
assert len(pred) == 1
assert pred.dtype == np.float32
def test_fit_model_multiclass(self):
model = create_mnist_model()
optimizer = tf.keras.optimizers.Adadelta(1.0)
loss = tf.keras.losses.categorical_crossentropy
for num_cores in [2, constants.TOTAL_BUFFER_MEMORY_CAP_GIB + 1]:
with spark_session('test_fit_model_multiclass', cores=num_cores) as spark:
df = create_mnist_data(spark)
with local_store() as store:
keras_estimator = hvd.KerasEstimator(
num_proc=num_cores,
store=store,
model=model,
optimizer=optimizer,
loss=loss,
metrics=['accuracy'],
feature_cols=['features'],
label_cols=['label_vec'],
batch_size=2,
epochs=2,
verbose=2)
keras_model = keras_estimator.fit(df).setOutputCols(['label_prob'])
pred_df = keras_model.transform(df)
argmax = udf(lambda v: float(np.argmax(v)), returnType=T.DoubleType())
pred_df = pred_df.withColumn('label_pred', argmax(pred_df.label_prob))
preds = pred_df.collect()
assert len(preds) == df.count()
row = preds[0]
label_prob = row.label_prob.toArray().tolist()
assert label_prob[int(row.label_pred)] == max(label_prob)
@mock.patch('horovod.spark.keras.remote._pin_gpu_fn')
@mock.patch('horovod.spark.keras.util.TFKerasUtil.fit_fn')
def test_restore_from_checkpoint(self, mock_fit_fn, mock_pin_gpu_fn):
mock_fit_fn.return_value = get_mock_fit_fn()
mock_pin_gpu_fn.return_value = mock.Mock()
model = create_xor_model()
optimizer = tf.keras.optimizers.SGD(lr=0.1)
loss = 'binary_crossentropy'
with spark_session('test_restore_from_checkpoint') as spark:
df = create_xor_data(spark)
backend = CallbackBackend()
run_id = 'run01'
with local_store() as store:
keras_estimator = hvd.KerasEstimator(
backend=backend,
store=store,
model=model,
optimizer=optimizer,
loss=loss,
feature_cols=['features'],
label_cols=['y'],
batch_size=1,
epochs=3,
verbose=2,
run_id=run_id)
keras_estimator._load_model_from_checkpoint = mock.Mock(
side_effect=keras_estimator._load_model_from_checkpoint)
ckpt_path = store.get_checkpoint_path(run_id)
assert not store.exists(ckpt_path)
keras_estimator._load_model_from_checkpoint.assert_not_called()
keras_model = keras_estimator.fit(df)
trained_model = keras_model.getModel()
pred = trained_model.predict([np.ones([1, 2], dtype=np.float64)])
assert len(pred) == 1
assert store.exists(ckpt_path)
keras_estimator.fit(df)
keras_estimator._load_model_from_checkpoint.assert_called()
@mock.patch('horovod.spark.keras.remote._pin_gpu_fn')
@mock.patch('horovod.spark.keras.util.TFKerasUtil.fit_fn')
def test_keras_direct_parquet_train(self, mock_fit_fn, mock_pin_gpu_fn):
mock_fit_fn.return_value = get_mock_fit_fn()
mock_pin_gpu_fn.return_value = mock.Mock()
with spark_session('test_keras_direct_parquet_train') as spark:
df = create_xor_data(spark)
backend = CallbackBackend()
with local_store() as store:
store.get_train_data_path = lambda v=None: store._train_path
store.get_val_data_path = lambda v=None: store._val_path
with util.prepare_data(backend.num_processes(),
store,
df,
feature_columns=['features'],
label_columns=['y']):
model = create_xor_model()
optimizer = tf.keras.optimizers.SGD(lr=0.1)
loss = 'binary_crossentropy'
est = hvd.KerasEstimator(
backend=backend,
store=store,
model=model,
optimizer=optimizer,
loss=loss,
feature_cols=['features'],
label_cols=['y'],
batch_size=1,
epochs=3,
verbose=2)
transformer = est.fit_on_parquet()
predictions = transformer.transform(df)
assert predictions.count() == df.count()
@mock.patch('horovod.spark.keras.estimator.remote.RemoteTrainer')
def test_model_serialization(self, mock_remote_trainer):
model = create_xor_model()
optimizer = tf.keras.optimizers.SGD(lr=0.1)
loss = 'binary_crossentropy'
def train(serialized_model, train_rows, val_rows, avg_row_size):
return None, serialized_model, 2
mock_remote_trainer.return_value = train
with spark_session('test_model_serialization') as spark:
df = create_xor_data(spark)
keras_estimator = hvd.KerasEstimator(
model=model,
optimizer=optimizer,
loss=loss,
feature_cols=['features'],
label_cols=['y'],
batch_size=1,
epochs=3,
verbose=2)
backend = CallbackBackend()
with local_store() as store:
with temppath() as saved_path:
keras_estimator.save(saved_path)
keras_estimator_loaded = hvd.KerasEstimator.load(saved_path)
keras_model = keras_estimator_loaded.fit(df, params={
keras_estimator_loaded.backend: backend,
keras_estimator_loaded.store: store
})
trained_model = keras_model.getModel()
pred = trained_model.predict([np.ones([1, 2], dtype=np.float32)])
assert len(pred) == 1
assert pred.dtype == np.float32
def test_serialize_param_value(self):
serialized_backend = _serialize_param_value(EstimatorParams.backend.name, 'dummy_value', None, None)
assert serialized_backend is None
serialized_store = _serialize_param_value(EstimatorParams.store.name, 'dummy_value', None, None)
assert serialized_store is None
serialized_dummy_param = _serialize_param_value('dummy_param_name', None, None, None)
assert serialized_dummy_param is None
def test_calculate_shuffle_buffer_size_small_row_size(self):
hvd_size = 4
local_size = 2
hvd_mock = mock.MagicMock()
hvd_mock.local_size.return_value = local_size
hvd_mock.allgather.return_value = [local_size for _ in range(hvd_size)]
avg_row_size = 100
train_row_count_per_worker = 100
calculate_shuffle_buffer_size = remote._calculate_shuffle_buffer_size_fn()
shuffle_size = calculate_shuffle_buffer_size(hvd_mock, avg_row_size, train_row_count_per_worker)
assert shuffle_size == train_row_count_per_worker
def test_calculate_shuffle_buffer_size(self):
# case with 2 workers, one with 5 ranks and second with 3 ranks
hvd_mock = mock.MagicMock()
hvd_mock.allgather.return_value = [5, 5, 5, 5, 5, 3, 3, 3]
hvd_mock.local_size.return_value = 2
avg_row_size = 100000
train_row_count_per_worker = 1000000
calculate_shuffle_buffer_size = remote._calculate_shuffle_buffer_size_fn()
shuffle_size = calculate_shuffle_buffer_size(hvd_mock, avg_row_size, train_row_count_per_worker)
assert int(shuffle_size) == int(constants.TOTAL_BUFFER_MEMORY_CAP_GIB * constants.BYTES_PER_GIB / avg_row_size / 5)
def test_custom_sparse_to_dense_fn(self):
dense_shape = 10
custom_sparse_to_dense = _custom_sparse_to_dense_fn()
dense_vector = tf.constant([3., 1., 3., 6., 10., 30., 60., 0, 0, 0])
sparse_vector = custom_sparse_to_dense(dense_vector, dense_shape)
sparse_vector_values = self.evaluate(sparse_vector)[0]
assert sparse_vector_values[1] == 10
assert sparse_vector_values[3] == 30
assert sparse_vector_values[6] == 60
assert len(sparse_vector_values) == dense_shape
def test_convert_custom_sparse_to_dense_bare_keras_fn(self):
convert_custom_sparse_to_dense_bare_keras = BareKerasUtil._convert_custom_sparse_to_dense_fn()
custom_sparse_row = np.array([2, 1, 2, 0.1, 0.2])
sparse_row = convert_custom_sparse_to_dense_bare_keras(custom_sparse_row, 4)
assert np.array_equal(sparse_row, np.array([0., 0.1, 0.2, 0.]))
def test_prepare_data_bare_keras_fn(self):
metadata = \
{
'col1': {
'dtype': float,
'intermediate_format': 'nochange',
'max_size': 1,
'shape': 1
},
'col2': {
'dtype': 'float',
'intermediate_format': 'nochange',
'max_size': 1,
'shape': 1
},
'col3': {
'dtype': SparseVector,
'intermediate_format': 'custom_sparse_format',
'max_size': 7,
'shape': 10
}
}
prepare_data_bare_keras = BareKerasUtil._prepare_data_fn(metadata)
col1 = np.array([1., 2., 3.])
col1_prepared = prepare_data_bare_keras(col1, 'col1', [-1, 3])
assert col1_prepared.shape == (1, 3)
assert np.array_equal(col1_prepared, np.array([[1., 2., 3.]]))
col3 = [np.array([3., 0., 2., 5., 0., 0.2, 0.5, 0, 0]),
np.array([4., 0., 2., 5., 6., 0.2, 0.5, 0.6, 0])]
col3_prepared = prepare_data_bare_keras(col3, 'col3', [-1, 10])
assert col3_prepared.shape == (2, 10)
assert np.array_equal(col3_prepared, np.array(
[[0., 0., 0.2, 0., 0., 0.5, 0., 0., 0., 0.], [0.2, 0., 0.5, 0., 0., 0.6, 0., 0., 0., 0.]]))
def test_batch_generator_fn(self):
shuffle_buffer_size = 10
rows_in_row_group = 100
batch_size = 32
def _create_numpy_array(n_rows, shape):
return np.array([[i for i in range(j, j + shape)] for j in range(n_rows)])
def dummy_reader():
Row = collections.namedtuple('row', ['col1', 'col2', 'sample_weight', 'label'])
col11 = _create_numpy_array(rows_in_row_group, 1)
col21 = _create_numpy_array(rows_in_row_group, 10)
label1 = _create_numpy_array(rows_in_row_group, 8)
sw1 = np.array([i / 100. for i in range(rows_in_row_group)])
row1 = Row(col1=col11, col2=col21, label=label1, sample_weight=sw1)
col12 = _create_numpy_array(rows_in_row_group, 1)
col22 = _create_numpy_array(rows_in_row_group, 10)
label2 = _create_numpy_array(rows_in_row_group, 8)
sw2 = np.array([i / 100. for i in range(rows_in_row_group)])
row2 = Row(col1=col12, col2=col22, label=label2, sample_weight=sw2)
while True:
yield row1
yield row2
metadata = \
{
'col1': {
'dtype': float,
'intermediate_format': constants.NOCHANGE,
'max_size': 1,
'shape': 1
},
'col2': {
'dtype': DenseVector,
'intermediate_format': constants.ARRAY,
'max_size': 10,
'shape': 10
},
'label': {
'dtype': float,
'intermediate_format': constants.NOCHANGE,
'max_size': 1,
'shape': 1
},
}
reader = dummy_reader()
feature_columns = ['col1', 'col2']
label_columns = ['label']
sample_weight_col = 'sample_weight'
input_shapes = [[-1, 1], [-1, 2, 5]]
output_shapes = [[-1, 2, 4]]
batch_generator = BareKerasUtil._batch_generator_fn(
feature_columns, label_columns, sample_weight_col,
input_shapes, output_shapes, batch_size, metadata)
for shuffle in [True, False]:
batch_gen = batch_generator(reader, shuffle_buffer_size, shuffle=shuffle)
for _ in range(10):
batch = next(batch_gen)
assert batch[0][0][0].shape == (1,)
assert batch[0][1][0].shape == (2, 5)
assert batch[1][0][0].shape == (2, 4)
# sample weight has to be a singel np array with shape (batch_size,)
assert batch[2][0].shape == (batch_size,)
def test_reshape(self):
metadata = \
{
'col1': {
'dtype': float,
'intermediate_format': constants.NOCHANGE,
'max_size': 1,
'shape': 1
},
'col2': {
'dtype': SparseVector,
'intermediate_format': constants.CUSTOM_SPARSE,
'max_size': 5,
'shape': 10
},
'label': {
'dtype': float,
'intermediate_format': constants.NOCHANGE,
'max_size': 1,
'shape': 1
},
}
feature_columns = ['col1', 'col2']
label_columns = ['label']
sample_weight_col = 'sample_weight'
Row = collections.namedtuple('row', ['col1', 'col2', 'sample_weight', 'label'])
col11 = tf.constant([3.])
col21 = tf.constant([3., 1., 3., 6., 10., 30., 60., 0, 0, 0, 0])
label1 = tf.constant([1.])
sw1 = tf.constant([.06])
row1 = Row(col1=col11, col2=col21, label=label1, sample_weight=sw1)
reshape_fn = TFKerasUtil._reshape_fn(
sample_weight_col, feature_columns, label_columns, metadata)
reshaped_row = reshape_fn(row1)
reshaped_row_value = self.evaluate(reshaped_row)
assert np.allclose(reshaped_row_value['sample_weight'], np.array([0.06]))
assert np.allclose(reshaped_row_value['col1'], np.array([3.]))
assert np.allclose(reshaped_row_value['col2'],
np.array([[0., 10., 0., 30., 0., 0., 60., 0., 0., 0.]]))
assert np.allclose(reshaped_row_value['label'], np.array([1.]))
def test_prep_data_tf_keras_fn_with_sparse_col(self):
has_sparse_col = True
feature_columns = ['col1', 'col2']
label_columns = ['label1', 'label2']
sample_weight_col = 'sample_weight'
col1 = tf.constant([3.])
col2 = tf.constant([3., 1., 3., 6., 10., 30., 60., 0, 0, 0])
label1 = tf.constant([1., 2., 3., 4.])
label2 = tf.constant([1., 2., 3., 4.])
sw1 = tf.constant([.06])
input_shapes = [[-1, 1], [-1, 2, 5]]
output_shapes = [[-1, 4], [-1, 2, 2]]
output_names = ['label1', 'label2']
prep_data_tf_keras = \
TFKerasUtil._prep_data_fn(has_sparse_col, sample_weight_col,
feature_columns, label_columns, input_shapes,
output_shapes, output_names)
row = {'col1': col1, 'col2': col2, 'label1': label1, 'label2': label2, sample_weight_col: sw1}
prepped_row = prep_data_tf_keras(row)
prepped_row_vals = self.evaluate(prepped_row)
assert np.array_equal(prepped_row_vals[0][0], np.array([[3.]]))
assert np.array_equal(prepped_row_vals[0][1],
np.array([[[3., 1., 3., 6., 10.], [30., 60., 0., 0., 0.]]]))
assert np.array_equal(prepped_row_vals[1][0], np.array([[1., 2., 3., 4.]]))
assert np.array_equal(prepped_row_vals[1][1], np.array([[[1., 2.], [3., 4.]]]))
assert np.allclose(prepped_row_vals[2]['label1'], np.array([0.06]))
assert np.allclose(prepped_row_vals[2]['label2'], np.array([0.06]))
def test_prep_data_tf_keras_fn_without_sparse_col(self):
has_sparse_col = False
feature_columns = ['col1', 'col2']
label_columns = ['label1', 'label2']
sample_weight_col = 'sample_weight'
col1 = tf.constant([3.])
col2 = tf.constant([float(i) for i in range(10)])
label1 = tf.constant([1., 2., 3., 4.])
label2 = tf.constant([1., 2., 3., 4.])
sw1 = tf.constant([.06])
input_shapes = [[-1, 1], [-1, 2, 5]]
output_shapes = [[-1, 4], [-1, 2, 2]]
output_names = ['label1', 'label2']
prep_data_tf_keras = \
TFKerasUtil._prep_data_fn(has_sparse_col, sample_weight_col,
feature_columns, label_columns, input_shapes,
output_shapes, output_names)
Row = collections.namedtuple('row', ['col1', 'col2', sample_weight_col, 'label1', 'label2'])
row = Row(col1=col1, col2=col2, label1=label1, label2=label2, sample_weight=sw1)
prepped_row = prep_data_tf_keras(row)
prepped_row_vals = self.evaluate(prepped_row)
assert np.array_equal(prepped_row_vals[0][0], np.array([[3.]]))
assert np.array_equal(prepped_row_vals[0][1],
np.array([[[0., 1., 2., 3., 4.], [5., 6., 7., 8., 9.]]]))
assert np.array_equal(prepped_row_vals[1][0], np.array([[1., 2., 3., 4.]]))
assert np.array_equal(prepped_row_vals[1][1], np.array([[[1., 2.], [3., 4.]]]))
assert np.allclose(prepped_row_vals[2]['label1'], np.array([0.06]))
assert np.allclose(prepped_row_vals[2]['label2'], np.array([0.06]))
| 40.17122 | 123 | 0.563617 |
47f42afdaf30ea92c08e78aeba937120b0482b7b | 10,244 | py | Python | models/modules/evolved_modules.py | vuiseng9/CalibTIP | 69077c92611b079234706784c344e8c9156f3283 | [
"MIT"
] | 354 | 2017-02-02T14:28:11.000Z | 2022-03-10T07:37:30.000Z | models/modules/evolved_modules.py | vuiseng9/CalibTIP | 69077c92611b079234706784c344e8c9156f3283 | [
"MIT"
] | 18 | 2017-08-13T02:37:30.000Z | 2021-10-01T03:34:00.000Z | models/modules/evolved_modules.py | vuiseng9/CalibTIP | 69077c92611b079234706784c344e8c9156f3283 | [
"MIT"
] | 97 | 2017-02-05T11:41:42.000Z | 2021-11-25T14:39:56.000Z | """
adapted from https://github.com/quark0/darts
"""
from collections import namedtuple
import torch
import torch.nn as nn
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
OPS = {
'avg_pool_3x3': lambda channels, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
'max_pool_3x3': lambda channels, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),
'skip_connect': lambda channels, stride, affine: Identity() if stride == 1 else FactorizedReduce(channels, channels, affine=affine),
'sep_conv_3x3': lambda channels, stride, affine: SepConv(channels, channels, 3, stride, 1, affine=affine),
'sep_conv_5x5': lambda channels, stride, affine: SepConv(channels, channels, 5, stride, 2, affine=affine),
'sep_conv_7x7': lambda channels, stride, affine: SepConv(channels, channels, 7, stride, 3, affine=affine),
'dil_conv_3x3': lambda channels, stride, affine: DilConv(channels, channels, 3, stride, 2, 2, affine=affine),
'dil_conv_5x5': lambda channels, stride, affine: DilConv(channels, channels, 5, stride, 4, 2, affine=affine),
'conv_7x1_1x7': lambda channels, stride, affine: nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(channels, channels, (1, 7), stride=(1, stride),
padding=(0, 3), bias=False),
nn.Conv2d(channels, channels, (7, 1), stride=(stride, 1),
padding=(3, 0), bias=False),
nn.BatchNorm2d(channels, affine=affine)
),
}
# genotypes
GENOTYPES = dict(
NASNet=Genotype(
normal=[
('sep_conv_5x5', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 0),
('sep_conv_3x3', 0),
('avg_pool_3x3', 1),
('skip_connect', 0),
('avg_pool_3x3', 0),
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('skip_connect', 1),
],
normal_concat=[2, 3, 4, 5, 6],
reduce=[
('sep_conv_5x5', 1),
('sep_conv_7x7', 0),
('max_pool_3x3', 1),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('sep_conv_5x5', 0),
('skip_connect', 3),
('avg_pool_3x3', 2),
('sep_conv_3x3', 2),
('max_pool_3x3', 1),
],
reduce_concat=[4, 5, 6],
),
AmoebaNet=Genotype(
normal=[
('avg_pool_3x3', 0),
('max_pool_3x3', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 2),
('sep_conv_3x3', 0),
('avg_pool_3x3', 3),
('sep_conv_3x3', 1),
('skip_connect', 1),
('skip_connect', 0),
('avg_pool_3x3', 1),
],
normal_concat=[4, 5, 6],
reduce=[
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('max_pool_3x3', 0),
('sep_conv_7x7', 2),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('max_pool_3x3', 0),
('max_pool_3x3', 1),
('conv_7x1_1x7', 0),
('sep_conv_3x3', 5),
],
reduce_concat=[3, 4, 6]
),
DARTS_V1=Genotype(
normal=[
('sep_conv_3x3', 1),
('sep_conv_3x3', 0),
('skip_connect', 0),
('sep_conv_3x3', 1),
('skip_connect', 0),
('sep_conv_3x3', 1),
('sep_conv_3x3', 0),
('skip_connect', 2)],
normal_concat=[2, 3, 4, 5],
reduce=[('max_pool_3x3', 0),
('max_pool_3x3', 1),
('skip_connect', 2),
('max_pool_3x3', 0),
('max_pool_3x3', 0),
('skip_connect', 2),
('skip_connect', 2),
('avg_pool_3x3', 0)],
reduce_concat=[2, 3, 4, 5]),
DARTS=Genotype(normal=[('sep_conv_3x3', 0),
('sep_conv_3x3', 1),
('sep_conv_3x3', 0),
('sep_conv_3x3', 1),
('sep_conv_3x3', 1),
('skip_connect', 0),
('skip_connect', 0),
('dil_conv_3x3', 2)],
normal_concat=[2, 3, 4, 5],
reduce=[('max_pool_3x3', 0),
('max_pool_3x3', 1),
('skip_connect', 2),
('max_pool_3x3', 1),
('max_pool_3x3', 0),
('skip_connect', 2),
('skip_connect', 2),
('max_pool_3x3', 1)],
reduce_concat=[2, 3, 4, 5]),
)
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride,
padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.op(x)
class DilConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):
super(DilConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride,
padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1,
padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, affine=True):
super(FactorizedReduce, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1,
stride=2, padding=0, bias=False)
self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1,
stride=2, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x):
x = self.relu(x)
out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)
out = self.bn(out)
return out
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = x.new(x.size(0), 1, 1, 1).bernoulli_(keep_prob)
x.div_(keep_prob)
x.mul_(mask)
return x
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
if reduction:
op_names, indices = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
op_names, indices = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, indices, concat, reduction)
def _compile(self, C, op_names, indices, concat, reduction):
assert len(op_names) == len(indices)
self._steps = len(op_names) // 2
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for name, index in zip(op_names, indices):
stride = 2 if reduction and index < 2 else 1
op = OPS[name](C, stride, True)
self._ops += [op]
self._indices = indices
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[2*i]]
h2 = states[self._indices[2*i+1]]
op1 = self._ops[2*i]
op2 = self._ops[2*i+1]
h1 = op1(h1)
h2 = op2(h2)
if self.training and drop_prob > 0.:
if not isinstance(op1, Identity):
h1 = drop_path(h1, drop_prob)
if not isinstance(op2, Identity):
h2 = drop_path(h2, drop_prob)
s = h1 + h2
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1)
class NasNetCell(Cell):
def __init__(self, *kargs, **kwargs):
super(NasNetCell, self).__init__(GENOTYPES['NASNet'], *kargs, **kwargs)
class AmoebaNetCell(Cell):
def __init__(self, *kargs, **kwargs):
super(AmoebaNetCell, self).__init__(
GENOTYPES['AmoebaNet'], *kargs, **kwargs)
class DARTSCell(Cell):
def __init__(self, *kargs, **kwargs):
super(DARTSCell, self).__init__(GENOTYPES['DARTS'], *kargs, **kwargs)
| 35.69338 | 137 | 0.513959 |
95d8af7f01e3ff2507103a62c153a9915ea299e3 | 992 | py | Python | examples/load_custom_dataset.py | PGBI/Surprise | 76e47037675afc6c0fb017490a88d1b2b2dff0f7 | [
"BSD-3-Clause"
] | 5,572 | 2016-11-24T08:21:53.000Z | 2022-03-31T20:35:00.000Z | examples/load_custom_dataset.py | daihui-lu/Surprise | 46b9914995e6c8c7d227b46f2eaeef2d4600580f | [
"BSD-3-Clause"
] | 393 | 2016-11-22T12:48:00.000Z | 2022-03-26T15:09:53.000Z | examples/load_custom_dataset.py | daihui-lu/Surprise | 46b9914995e6c8c7d227b46f2eaeef2d4600580f | [
"BSD-3-Clause"
] | 1,096 | 2016-12-08T22:01:57.000Z | 2022-03-29T03:55:54.000Z | """
This module descibes how to load a custom dataset from a single file.
As a custom dataset we will actually use the movielens-100k dataset, but act as
if it were not built-in.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
from surprise import BaselineOnly
from surprise import Dataset
from surprise import Reader
from surprise.model_selection import cross_validate
# path to dataset file
file_path = os.path.expanduser('~/.surprise_data/ml-100k/ml-100k/u.data')
# As we're loading a custom dataset, we need to define a reader. In the
# movielens-100k dataset, each line has the following format:
# 'user item rating timestamp', separated by '\t' characters.
reader = Reader(line_format='user item rating timestamp', sep='\t')
data = Dataset.load_from_file(file_path, reader=reader)
# We can now use this dataset as we please, e.g. calling cross_validate
cross_validate(BaselineOnly(), data, verbose=True)
| 34.206897 | 79 | 0.762097 |
150eb85f9057d2427ac59aeddbf5841e470e19f4 | 12,795 | py | Python | language/mentionmemory/tasks/eae_task.py | greck2908/language | 61fa7260ac7d690d11ef72ca863e45a37c0bdc80 | [
"Apache-2.0"
] | 1,199 | 2018-10-16T01:30:18.000Z | 2022-03-31T21:05:24.000Z | language/mentionmemory/tasks/eae_task.py | greck2908/language | 61fa7260ac7d690d11ef72ca863e45a37c0bdc80 | [
"Apache-2.0"
] | 116 | 2018-10-18T03:31:46.000Z | 2022-03-24T13:40:50.000Z | language/mentionmemory/tasks/eae_task.py | greck2908/language | 61fa7260ac7d690d11ef72ca863e45a37c0bdc80 | [
"Apache-2.0"
] | 303 | 2018-10-22T12:35:12.000Z | 2022-03-27T17:38:17.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains Entities as Experts pre-training task."""
from typing import Any, Callable, Dict, Optional, Tuple
import flax.linen as nn
import jax.numpy as jnp
from language.mentionmemory.encoders import eae_encoder
from language.mentionmemory.modules import mention_losses
from language.mentionmemory.modules import mlm_layer
from language.mentionmemory.tasks import mention_encoder_task
from language.mentionmemory.tasks import task_registry
from language.mentionmemory.utils import jax_utils as jut
from language.mentionmemory.utils import metric_utils
from language.mentionmemory.utils.custom_types import Array, MetricGroups # pylint: disable=g-multiple-import
import ml_collections
class EaEModel(nn.Module):
"""Entities as Experts (EaE) pre-training model.
Attributes:
encoder_config: EaE encoder hyperparameters.
"""
encoder_config: ml_collections.FrozenConfigDict
def setup(self):
self.encoder = eae_encoder.EaEEncoder(**self.encoder_config)
self.mlm_layer = mlm_layer.MLMLayer(
vocab_size=self.encoder.vocab_size,
hidden_size=self.encoder.hidden_size,
dtype=self.encoder.dtype,
layer_norm_epsilon=self.encoder.layer_norm_epsilon,
embedding_init=self.encoder.kernel_init,
bias_init=self.encoder.bias_init,
)
def __call__(
self, batch: Dict[str, Array],
deterministic: bool) -> Tuple[Dict[str, Array], Dict[str, Array]]:
encoded_input, loss_helpers, logging_helpers = self.encoder.forward(
batch, deterministic)
loss_helpers['mlm_logits'] = self.mlm_layer(
encoded_input=encoded_input,
mlm_target_positions=batch['mlm_target_positions'],
shared_embedding=loss_helpers['word_embeddings'])
return loss_helpers, logging_helpers
@task_registry.register_task('eae')
class EaETask(mention_encoder_task.MentionEncoderTask):
"""Task for pre-training Entities as Experts (EaE) encoder."""
model_class = EaEModel
encoder_name = 'eae'
@classmethod
def make_loss_fn(
cls, config: ml_collections.ConfigDict
) -> Callable[..., Tuple[float, MetricGroups, Dict[str, Any]]]:
"""Creates task loss function.
See BaseTask.
EaE is pre-trained with a combination of 1) MLM loss, 2) entity-linking loss
comparing mention encodings to entity embeddings at the retrieval and final
layers, and 3) Matching the Blanks-style loss encouraging mentions of the
same entity which co-occur with mentions of the same second entity to have
similar representations.
Args:
config: contains experiment hyperparameters.
Returns:
Loss function.
"""
mlm_weight = config.mlm_weight
el_im_weight = config.el_im_weight
el_final_weight = config.el_final_weight
el_score_mode = config.get('el_score_mode', 'dot')
mtb_im_weight = config.get('mtb_im_weight', 0)
mtb_final_weight = config.get('mtb_final_weight', 0)
mtb_score_mode = config.get('mtb_score_mode', 'dot')
def loss_fn(
model_config: ml_collections.FrozenConfigDict,
model_params: Dict[str, Any],
model_vars: Dict[str, Any], # pylint: disable=unused-argument
batch: Dict[str, Any],
deterministic: bool,
dropout_rng: Optional[Dict[str, Array]] = None,
) -> Tuple[float, MetricGroups, Dict[str, Any]]:
"""Task-specific loss function. See BaseTask."""
batch_size = batch['text_ids'].shape[0]
loss_helpers, logging_helpers = cls.build_model(model_config).apply( # pylint: disable=unused-variable
{'params': model_params},
batch,
deterministic=deterministic,
rngs=dropout_rng)
mention_target_is_masked = batch['mention_target_is_masked']
mention_target_is_not_masked = 1 - batch['mention_target_is_masked']
mention_target_ids = batch['mention_target_ids']
mention_target_ids = mention_target_ids * batch['mention_target_weights']
mlm_logits = loss_helpers['mlm_logits']
mlm_loss, mlm_denom = metric_utils.compute_weighted_cross_entropy(
mlm_logits, batch['mlm_target_ids'], batch['mlm_target_weights'])
mlm_correct_mask = jnp.equal(
jnp.argmax(mlm_logits, axis=-1),
batch['mlm_target_ids']) * batch['mlm_target_weights']
mlm_acc = mlm_correct_mask.sum()
mlm_mention_acc = (mlm_correct_mask *
batch['mlm_target_is_mention']).sum()
mlm_mention_denom = (batch['mlm_target_weights'] *
batch['mlm_target_is_mention']).sum()
mlm_non_mention_acc = (mlm_correct_mask *
(1 - batch['mlm_target_is_mention'])).sum()
mlm_non_mention_denom = (batch['mlm_target_weights'] *
(1 - batch['mlm_target_is_mention'])).sum()
metrics = {
'mlm': {
'loss': mlm_loss,
'acc': mlm_acc,
'denominator': mlm_denom,
},
'mlm_mention': {
'acc': mlm_mention_acc,
'denominator': mlm_mention_denom,
},
'mlm_non_mention': {
'acc': mlm_non_mention_acc,
'denominator': mlm_non_mention_denom,
},
}
if 'intermediate_mention_encodings' in loss_helpers:
intermediate_target_mention_encodings = jut.matmul_slice(
loss_helpers['intermediate_mention_encodings'],
batch['mention_target_indices'])
else:
intermediate_target_mention_encodings = loss_helpers[
'im_target_mention_encodings']
if model_config.encoder_config.get('no_entity_attention', False):
(el_im_loss, el_im_metrics,
(el_im_acc_per_mention,
el_im_weight_per_mention)) = mention_losses.entity_linking_loss(
intermediate_target_mention_encodings,
loss_helpers['entity_embeddings'], mention_target_ids,
batch['mention_target_weights'], el_score_mode)
el_im_denom = el_im_metrics['denominator']
metrics['el_intermediate'] = el_im_metrics
metrics['el_intermediate_masked'] = {
'acc':
jnp.dot(el_im_acc_per_mention,
el_im_weight_per_mention * mention_target_is_masked),
'denominator':
jnp.dot(el_im_weight_per_mention, mention_target_is_not_masked),
}
metrics['el_intermediate_non_masked'] = {
'acc':
jnp.dot(el_im_acc_per_mention,
el_im_weight_per_mention * mention_target_is_masked),
'denominator':
jnp.dot(el_im_weight_per_mention, mention_target_is_not_masked),
}
else:
intermediate_entity_attention = loss_helpers[
'intermediate_entity_attention']
# Construct targets and ids for intermediate entity linking loss
intermediate_target_ids = jnp.zeros_like(batch['mention_mask'])
intermediate_target_ids = intermediate_target_ids.at[
batch['mention_target_indices']].add(
mention_target_ids * batch['mention_target_weights'])
intermediate_target_weights = jnp.zeros_like(
batch['mention_mask'], dtype=intermediate_entity_attention.dtype)
intermediate_target_weights = intermediate_target_weights.at[
batch['mention_target_indices']].add(
batch['mention_target_weights'])
mention_is_masked = jnp.zeros_like(batch['mention_mask'])
mention_is_masked = mention_is_masked.at[
batch['mention_target_indices']].add(
mention_target_is_masked * batch['mention_target_weights'])
el_im_loss, el_im_denom = metric_utils.compute_weighted_cross_entropy(
intermediate_entity_attention,
intermediate_target_ids,
intermediate_target_weights,
inputs_are_prob=True)
el_im_correct_mask = jnp.equal(
jnp.argmax(intermediate_entity_attention, axis=-1),
intermediate_target_ids) * intermediate_target_weights
el_im_acc = el_im_correct_mask.sum()
el_im_acc, _ = metric_utils.compute_weighted_accuracy(
intermediate_entity_attention, intermediate_target_ids,
intermediate_target_weights)
intermediate_entity_cos_sim = loss_helpers[
'intermediate_entity_cos_sim'][batch['mention_target_indices'],
mention_target_ids]
metrics['el_intermediate'] = {
'loss':
el_im_loss,
'acc':
el_im_acc,
'cos_sim':
jnp.dot(intermediate_entity_cos_sim,
batch['mention_target_weights']),
'denominator':
el_im_denom,
}
metrics['el_intermediate_masked'] = {
'acc':
jnp.dot(el_im_correct_mask, mention_is_masked),
'denominator':
jnp.dot(batch['mention_target_weights'],
batch['mention_target_is_masked']),
}
metrics['el_intermediate_non_masked'] = {
'acc':
jnp.dot(el_im_correct_mask, (1 - mention_is_masked)),
'denominator':
jnp.dot(batch['mention_target_weights'],
(1 - batch['mention_target_is_masked'])),
}
im_final_mention_encodings_cos_sim = jut.cosine_similarity(
intermediate_target_mention_encodings,
loss_helpers['target_mention_encodings'])
metrics['im_final_mention_encodings'] = {
'cos_sim':
jnp.dot(im_final_mention_encodings_cos_sim,
batch['mention_target_weights']),
'denominator':
batch['mention_target_weights'].sum(),
}
(el_final_loss, el_final_metrics,
(el_final_acc_per_mention,
el_final_weight_per_mention)) = mention_losses.entity_linking_loss(
loss_helpers['target_mention_encodings'],
loss_helpers['entity_embeddings'], mention_target_ids,
batch['mention_target_weights'], el_score_mode)
el_final_denom = el_final_metrics['denominator']
metrics['el_final'] = el_final_metrics
metrics['el_final_masked'] = {
'acc':
jnp.dot(el_final_acc_per_mention,
el_final_weight_per_mention * mention_target_is_masked),
'denominator':
jnp.dot(el_final_weight_per_mention, mention_target_is_masked),
}
metrics['el_final_non_masked'] = {
'acc':
jnp.dot(
el_final_acc_per_mention,
el_final_weight_per_mention * mention_target_is_not_masked),
'denominator':
jnp.dot(el_final_weight_per_mention,
mention_target_is_not_masked),
}
loss = mlm_weight * mlm_loss / mlm_denom
loss += el_im_weight * el_im_loss / el_im_denom
loss += el_final_weight * el_final_loss / el_final_denom
if mtb_im_weight > 0:
(mtb_im_loss, mtb_im_metrics) = mention_losses.mtb_loss(
intermediate_target_mention_encodings,
batch['mention_target_batch_positions'], mention_target_ids,
batch_size, mtb_score_mode, mention_target_is_masked, 'im_')
mtb_im_denom = mtb_im_metrics['im_mtb']['denominator']
loss += mtb_im_weight * mtb_im_loss / mtb_im_denom
metrics.update(mtb_im_metrics)
if mtb_final_weight > 0:
(mtb_final_loss, mtb_final_metrics) = mention_losses.mtb_loss(
loss_helpers['target_mention_encodings'],
batch['mention_target_batch_positions'], mention_target_ids,
batch_size, mtb_score_mode, mention_target_is_masked, 'final_')
mtb_final_denom = mtb_final_metrics['final_mtb']['denominator']
loss += mtb_final_weight * mtb_final_loss / mtb_final_denom
metrics.update(mtb_final_metrics)
metrics['agg'] = {
'loss': loss,
'denominator': 1.0,
}
return loss, metrics, {}
return loss_fn
| 40.362776 | 110 | 0.659789 |
2031e0b5f3a916074ea94a50156353c91e1f6277 | 4,368 | py | Python | am_i_the_asshole/models/classifier.py | mirandrom/am-i-the-asshole | e7e4f00aa193931d45e4012db5cc65d3679faa90 | [
"MIT"
] | 1 | 2020-10-05T16:39:18.000Z | 2020-10-05T16:39:18.000Z | am_i_the_asshole/models/classifier.py | amr-amr/am-i-the-asshole | e7e4f00aa193931d45e4012db5cc65d3679faa90 | [
"MIT"
] | null | null | null | am_i_the_asshole/models/classifier.py | amr-amr/am-i-the-asshole | e7e4f00aa193931d45e4012db5cc65d3679faa90 | [
"MIT"
] | null | null | null | from typing import Dict, Optional
import numpy
from overrides import overrides
import torch
import torch.nn.functional as F
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules import FeedForward, Seq2VecEncoder, TextFieldEmbedder
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("aita_classifier")
class AitaClassifier(Model):
def __init__(self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
title_encoder: Seq2VecEncoder,
text_encoder: Seq2VecEncoder,
classifier_feedforward: FeedForward,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.title_encoder = title_encoder
self.text_encoder = text_encoder
self.classifier_feedforward = classifier_feedforward
if text_field_embedder.get_output_dim() != title_encoder.get_input_dim():
raise ConfigurationError(f"The output dimension of the text_field_"
f"embedder must match the input dimension"
f" of the summary_encoder. Found "
f"{text_field_embedder.get_output_dim()} "
f"and {title_encoder.get_input_dim()}, "
f"respectively.")
if text_field_embedder.get_output_dim() != text_encoder.get_input_dim():
raise ConfigurationError(f"The output dimension of the text_field_"
f"embedder must match the input dimension"
f" of the summary_encoder. Found "
f"{text_field_embedder.get_output_dim()} "
f"and {text_encoder.get_input_dim()}, "
f"respectively.")
self.metrics = {
"accuracy": CategoricalAccuracy(),
"accuracy3": CategoricalAccuracy(top_k=3)
}
self.loss = torch.nn.CrossEntropyLoss()
initializer(self)
@overrides
def forward(self,
title: Dict[str, torch.LongTensor],
text: Dict[str, torch.LongTensor],
label: torch.LongTensor = None) -> Dict[str, torch.Tensor]:
embedded_title = self.text_field_embedder(title)
title_mask = util.get_text_field_mask(title)
encoded_title = self.title_encoder(embedded_title, title_mask)
embedded_text = self.text_field_embedder(text)
text_mask = util.get_text_field_mask(text)
encoded_text = self.text_encoder(embedded_text, text_mask)
logits = self.classifier_feedforward(torch.cat([encoded_title, encoded_text], dim=-1))
output_dict = {'logits': logits}
if label is not None:
loss = self.loss(logits, label)
for metric in self.metrics.values():
metric(logits, label)
output_dict["loss"] = loss
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Does a simple argmax over the class probabilities, converts indices to string labels, and
adds a ``"label"`` key to the dictionary with the result.
"""
class_probabilities = F.softmax(output_dict['logits'], dim=-1)
output_dict['class_probabilities'] = class_probabilities
predictions = class_probabilities.cpu().data.numpy()
argmax_indices = numpy.argmax(predictions, axis=-1)
labels = [self.vocab.get_token_from_index(x, namespace="labels")
for x in argmax_indices]
output_dict['label'] = labels
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics = {metric_name: metric.get_metric(reset) for metric_name, metric in self.metrics.items()}
return metrics | 44.121212 | 105 | 0.627747 |
f224ee9ef26bea7eb9be49ccd9da266f020a434d | 1,161 | py | Python | sparkdq/exceptions/TransformExceptions.py | PasaLab/SparkDQ | 16d50210747ef7de03cf36d689ce26ff7445f63a | [
"Apache-2.0"
] | 1 | 2021-02-08T07:49:54.000Z | 2021-02-08T07:49:54.000Z | sparkdq/exceptions/TransformExceptions.py | PasaLab/SparkDQ | 16d50210747ef7de03cf36d689ce26ff7445f63a | [
"Apache-2.0"
] | null | null | null | sparkdq/exceptions/TransformExceptions.py | PasaLab/SparkDQ | 16d50210747ef7de03cf36d689ce26ff7445f63a | [
"Apache-2.0"
] | null | null | null | class TransformException(Exception):
"""
Transform exception happens in the process of transform, can be divided into TransformPreconditionException and
TransformRuntimeException.
"""
@staticmethod
def wrap_if_necessary(exception):
"""
Wrap the exception caught when executing Spark jobs as AnalysisCalculationException
"""
if isinstance(exception, TransformException):
return exception
return
class TransformPreconditionException(TransformException):
"""
Category 1: Transform precondition exception happens in the process of checking preconditions,
preconditions exceptions are almost common.
"""
pass
class IllegalTransformerParameterException(TransformPreconditionException):
pass
class TransformRuntimeException(TransformException):
"""
Category 2: Transform runtime exception happens in the process of executing transform jobs,
including calculation and other exceptions.
"""
pass
class TransformCalculationException(TransformRuntimeException):
pass
class UnknownTransformerException(TransformRuntimeException):
pass
| 27.642857 | 115 | 0.750215 |
7dd53cf1d299073f29a4f4be0c7b0acba1e3045d | 63 | py | Python | npbench/benchmarks/polybench/atax/atax_numpy.py | frahlg/npbench | 1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26 | [
"BSD-3-Clause"
] | 27 | 2021-05-10T11:49:13.000Z | 2022-03-22T18:07:19.000Z | npbench/benchmarks/polybench/atax/atax_numpy.py | frahlg/npbench | 1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26 | [
"BSD-3-Clause"
] | 3 | 2021-12-01T13:03:17.000Z | 2022-03-17T10:53:00.000Z | npbench/benchmarks/polybench/atax/atax_numpy.py | frahlg/npbench | 1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26 | [
"BSD-3-Clause"
] | 7 | 2021-06-24T03:40:25.000Z | 2022-01-26T09:04:33.000Z | import numpy as np
def kernel(A, x):
return (A @ x) @ A
| 9 | 22 | 0.555556 |
c2140fb08d314aab8a43133d27be7267cefc95e4 | 433 | py | Python | learning/30days/two/2-3-.py | huangjunxiong11/TF2 | 6de61c28c59ef34be7e53762b3a759da152642f7 | [
"MIT"
] | null | null | null | learning/30days/two/2-3-.py | huangjunxiong11/TF2 | 6de61c28c59ef34be7e53762b3a759da152642f7 | [
"MIT"
] | null | null | null | learning/30days/two/2-3-.py | huangjunxiong11/TF2 | 6de61c28c59ef34be7e53762b3a759da152642f7 | [
"MIT"
] | null | null | null | import tensorflow as tf
x = tf.Variable(0.0, name="x", dtype=tf.float32)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
@tf.function
def f():
a = tf.constant(1.0)
b = tf.constant(-2.0)
c = tf.constant(1.0)
y = a * tf.pow(x, 2) + b * x + c
return (y)
@tf.function
def train(epoch):
for _ in tf.range(epoch):
optimizer.minimize(f, [x])
return (f())
tf.print(train(1000))
tf.print(x)
| 17.32 | 55 | 0.600462 |
9e22037918d51196bcdb952f7be203506bdbd5a5 | 472 | py | Python | extragear/choqok/choqok.py | KDE/craft-blueprints-kde | 14932d4b95ce0070ab8ae5669411c62ffa304c9b | [
"BSD-2-Clause"
] | 14 | 2017-09-04T09:01:03.000Z | 2022-01-04T20:09:00.000Z | extragear/choqok/choqok.py | KDE/craft-blueprints-kde | 14932d4b95ce0070ab8ae5669411c62ffa304c9b | [
"BSD-2-Clause"
] | 14 | 2017-12-15T08:11:22.000Z | 2020-12-29T19:11:13.000Z | extragear/choqok/choqok.py | KDE/craft-blueprints-kde | 14932d4b95ce0070ab8ae5669411c62ffa304c9b | [
"BSD-2-Clause"
] | 19 | 2017-09-05T19:16:21.000Z | 2020-10-18T12:46:06.000Z | import info
from Package.CMakePackageBase import *
class subinfo(info.infoclass):
def setTargets(self):
self.svnTargets['svnHEAD'] = 'https://anongit.kde.org/choqok'
self.svnTargets['0.6.0'] = 'tags/choqok/0.6.0/choqok'
self.defaultTarget = 'svnHEAD'
def setDependencies(self):
self.runtimeDependencies["kdesupport/qjson"] = None
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
| 26.222222 | 69 | 0.688559 |
177e93fc87228c32762a43fb38b518eb58df11be | 14,477 | py | Python | test/py/qa.qa_config_unittest.py | regnauld/ganeti | c1d88461a964a5d0d89cd1ba0571429e01f0a1b5 | [
"BSD-2-Clause"
] | 2 | 2018-09-26T10:09:23.000Z | 2018-09-27T07:27:06.000Z | test/py/qa.qa_config_unittest.py | regnauld/ganeti | c1d88461a964a5d0d89cd1ba0571429e01f0a1b5 | [
"BSD-2-Clause"
] | null | null | null | test/py/qa.qa_config_unittest.py | regnauld/ganeti | c1d88461a964a5d0d89cd1ba0571429e01f0a1b5 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
#
# Copyright (C) 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing qa.qa_config"""
import unittest
import tempfile
import shutil
import os
from ganeti import utils
from ganeti import serializer
from ganeti import constants
from ganeti import compat
from qa import qa_config
from qa import qa_error
import testutils
class TestTestEnabled(unittest.TestCase):
def testSimple(self):
for name in ["test", ["foobar"], ["a", "b"]]:
self.assertTrue(qa_config.TestEnabled(name, _cfg={}))
for default in [False, True]:
self.assertFalse(qa_config.TestEnabled("foo", _cfg={
"tests": {
"default": default,
"foo": False,
},
}))
self.assertTrue(qa_config.TestEnabled("bar", _cfg={
"tests": {
"default": default,
"bar": True,
},
}))
def testEitherWithDefault(self):
names = qa_config.Either("one")
self.assertTrue(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
},
}))
self.assertFalse(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": False,
},
}))
def testEither(self):
names = [qa_config.Either(["one", "two"]),
qa_config.Either("foo"),
"hello",
["bar", "baz"]]
self.assertTrue(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
},
}))
self.assertFalse(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": False,
},
}))
for name in ["foo", "bar", "baz", "hello"]:
self.assertFalse(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
name: False,
},
}))
self.assertFalse(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
"one": False,
"two": False,
},
}))
self.assertTrue(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
"one": False,
"two": True,
},
}))
self.assertFalse(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
"one": True,
"two": True,
"foo": False,
},
}))
def testEitherNestedWithAnd(self):
names = qa_config.Either([["one", "two"], "foo"])
self.assertTrue(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
},
}))
for name in ["one", "two"]:
self.assertFalse(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
"foo": False,
name: False,
},
}))
def testCallable(self):
self.assertTrue(qa_config.TestEnabled([lambda: True], _cfg={}))
for value in [None, False, "", 0]:
self.assertFalse(qa_config.TestEnabled(lambda: value, _cfg={}))
class TestQaConfigLoad(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testLoadNonExistent(self):
filename = utils.PathJoin(self.tmpdir, "does.not.exist")
self.assertRaises(EnvironmentError, qa_config._QaConfig.Load, filename)
@staticmethod
def _WriteConfig(filename, data):
utils.WriteFile(filename, data=serializer.DumpJson(data))
def _CheckLoadError(self, filename, data, expected):
self._WriteConfig(filename, data)
try:
qa_config._QaConfig.Load(filename)
except qa_error.Error, err:
self.assertTrue(str(err).startswith(expected))
else:
self.fail("Exception was not raised")
def testFailsValidation(self):
filename = utils.PathJoin(self.tmpdir, "qa.json")
testconfig = {}
check_fn = compat.partial(self._CheckLoadError, filename, testconfig)
# No cluster name
check_fn("Cluster name is required")
testconfig["name"] = "cluster.example.com"
# No nodes
check_fn("Need at least one node")
testconfig["nodes"] = [
{
"primary": "xen-test-0",
"secondary": "192.0.2.1",
},
]
# No instances
check_fn("Need at least one instance")
testconfig["instances"] = [
{
"name": "xen-test-inst1",
},
]
# Missing "disk" and "disk-growth"
check_fn("Config option 'disks'")
testconfig["disks"] = []
# Minimal accepted configuration
self._WriteConfig(filename, testconfig)
result = qa_config._QaConfig.Load(filename)
self.assertTrue(result.get("nodes"))
# Non-existent instance check script
testconfig[qa_config._INSTANCE_CHECK_KEY] = \
utils.PathJoin(self.tmpdir, "instcheck")
check_fn("Can't find instance check script")
del testconfig[qa_config._INSTANCE_CHECK_KEY]
# No enabled hypervisor
testconfig[qa_config._ENABLED_HV_KEY] = None
check_fn("No hypervisor is enabled")
# Unknown hypervisor
testconfig[qa_config._ENABLED_HV_KEY] = ["#unknownhv#"]
check_fn("Unknown hypervisor(s) enabled:")
del testconfig[qa_config._ENABLED_HV_KEY]
# Invalid path for virtual cluster base directory
testconfig[qa_config._VCLUSTER_MASTER_KEY] = "value"
testconfig[qa_config._VCLUSTER_BASEDIR_KEY] = "./not//normalized/"
check_fn("Path given in option 'vcluster-basedir' must be")
# Inconsistent virtual cluster settings
testconfig.pop(qa_config._VCLUSTER_MASTER_KEY)
testconfig[qa_config._VCLUSTER_BASEDIR_KEY] = "/tmp"
check_fn("All or none of the")
testconfig[qa_config._VCLUSTER_MASTER_KEY] = "master.example.com"
testconfig.pop(qa_config._VCLUSTER_BASEDIR_KEY)
check_fn("All or none of the")
# Accepted virtual cluster settings
testconfig[qa_config._VCLUSTER_MASTER_KEY] = "master.example.com"
testconfig[qa_config._VCLUSTER_BASEDIR_KEY] = "/tmp"
self._WriteConfig(filename, testconfig)
result = qa_config._QaConfig.Load(filename)
self.assertEqual(result.GetVclusterSettings(),
("master.example.com", "/tmp"))
class TestQaConfigWithSampleConfig(unittest.TestCase):
"""Tests using C{qa-sample.json}.
This test case serves two purposes:
- Ensure shipped C{qa-sample.json} file is considered a valid QA
configuration
- Test some functions of L{qa_config._QaConfig} without having to
mock a whole configuration file
"""
def setUp(self):
filename = "%s/qa/qa-sample.json" % testutils.GetSourceDir()
self.config = qa_config._QaConfig.Load(filename)
def testGetEnabledHypervisors(self):
self.assertEqual(self.config.GetEnabledHypervisors(),
[constants.DEFAULT_ENABLED_HYPERVISOR])
def testGetDefaultHypervisor(self):
self.assertEqual(self.config.GetDefaultHypervisor(),
constants.DEFAULT_ENABLED_HYPERVISOR)
def testGetInstanceCheckScript(self):
self.assertTrue(self.config.GetInstanceCheckScript() is None)
def testGetAndGetItem(self):
self.assertEqual(self.config["nodes"], self.config.get("nodes"))
def testGetMasterNode(self):
self.assertEqual(self.config.GetMasterNode(), self.config["nodes"][0])
def testGetVclusterSettings(self):
# Shipped default settings should be to not use a virtual cluster
self.assertEqual(self.config.GetVclusterSettings(), (None, None))
self.assertFalse(qa_config.UseVirtualCluster(_cfg=self.config))
class TestQaConfig(unittest.TestCase):
def setUp(self):
filename = \
testutils.TestDataFilename("qa-minimal-nodes-instances-only.json")
self.config = qa_config._QaConfig.Load(filename)
def testExclusiveStorage(self):
self.assertRaises(AssertionError, self.config.GetExclusiveStorage)
for value in [False, True, 0, 1, 30804, ""]:
self.config.SetExclusiveStorage(value)
self.assertEqual(self.config.GetExclusiveStorage(), bool(value))
def testIsTemplateSupported(self):
enabled_dts = self.config.GetEnabledDiskTemplates()
for e_s in [False, True]:
self.config.SetExclusiveStorage(e_s)
for template in constants.DISK_TEMPLATES:
if (template not in enabled_dts or
e_s and template not in constants.DTS_EXCL_STORAGE):
self.assertFalse(self.config.IsTemplateSupported(template))
else:
self.assertTrue(self.config.IsTemplateSupported(template))
def testInstanceConversion(self):
self.assertTrue(isinstance(self.config["instances"][0],
qa_config._QaInstance))
def testNodeConversion(self):
self.assertTrue(isinstance(self.config["nodes"][0],
qa_config._QaNode))
def testAcquireAndReleaseInstance(self):
self.assertFalse(compat.any(i.used for i in self.config["instances"]))
inst = qa_config.AcquireInstance(_cfg=self.config)
self.assertTrue(inst.used)
self.assertTrue(inst.disk_template is None)
inst.Release()
self.assertFalse(inst.used)
self.assertTrue(inst.disk_template is None)
self.assertFalse(compat.any(i.used for i in self.config["instances"]))
def testAcquireInstanceTooMany(self):
# Acquire all instances
for _ in range(len(self.config["instances"])):
inst = qa_config.AcquireInstance(_cfg=self.config)
self.assertTrue(inst.used)
self.assertTrue(inst.disk_template is None)
# The next acquisition must fail
self.assertRaises(qa_error.OutOfInstancesError,
qa_config.AcquireInstance, _cfg=self.config)
def testAcquireNodeNoneAdded(self):
self.assertFalse(compat.any(n.added for n in self.config["nodes"]))
# First call must return master node
node = qa_config.AcquireNode(_cfg=self.config)
self.assertEqual(node, self.config.GetMasterNode())
# Next call with exclusion list fails
self.assertRaises(qa_error.OutOfNodesError, qa_config.AcquireNode,
exclude=[node], _cfg=self.config)
def testAcquireNodeTooMany(self):
# Mark all nodes as marked (master excluded)
for node in self.config["nodes"]:
if node != self.config.GetMasterNode():
node.MarkAdded()
nodecount = len(self.config["nodes"])
self.assertTrue(nodecount > 1)
acquired = []
for _ in range(nodecount):
node = qa_config.AcquireNode(exclude=acquired, _cfg=self.config)
if node == self.config.GetMasterNode():
self.assertFalse(node.added)
else:
self.assertTrue(node.added)
self.assertEqual(node.use_count, 1)
acquired.append(node)
self.assertRaises(qa_error.OutOfNodesError, qa_config.AcquireNode,
exclude=acquired, _cfg=self.config)
def testAcquireNodeOrder(self):
# Mark all nodes as marked (master excluded)
for node in self.config["nodes"]:
if node != self.config.GetMasterNode():
node.MarkAdded()
nodecount = len(self.config["nodes"])
for iterations in [0, 1, 3, 100, 127, 7964]:
acquired = []
for i in range(iterations):
node = qa_config.AcquireNode(_cfg=self.config)
self.assertTrue(node.use_count > 0)
self.assertEqual(node.use_count, (i / nodecount + 1))
acquired.append((node.use_count, node.primary, node))
# Check if returned nodes were in correct order
key_fn = lambda (a, b, c): (a, utils.NiceSortKey(b), c)
self.assertEqual(acquired, sorted(acquired, key=key_fn))
# Release previously acquired nodes
qa_config.ReleaseManyNodes([a[2] for a in acquired])
# Check if nodes were actually released
for node in self.config["nodes"]:
self.assertEqual(node.use_count, 0)
self.assertTrue(node.added or node == self.config.GetMasterNode())
class TestRepresentation(unittest.TestCase):
def _Check(self, target, part):
self.assertTrue(part in repr(target).split())
def testQaInstance(self):
inst = qa_config._QaInstance("inst1.example.com", [])
self._Check(inst, "name=inst1.example.com")
self._Check(inst, "nicmac=[]")
# Default values
self._Check(inst, "disk_template=None")
self._Check(inst, "used=None")
# Use instance
inst.Use()
self._Check(inst, "used=True")
# Disk template
inst.SetDiskTemplate(constants.DT_DRBD8)
self._Check(inst, "disk_template=%s" % constants.DT_DRBD8)
# Release instance
inst.Release()
self._Check(inst, "used=False")
self._Check(inst, "disk_template=None")
def testQaNode(self):
node = qa_config._QaNode("primary.example.com", "192.0.2.1")
self._Check(node, "primary=primary.example.com")
self._Check(node, "secondary=192.0.2.1")
self._Check(node, "added=False")
self._Check(node, "use_count=0")
# Mark as added
node.MarkAdded()
self._Check(node, "added=True")
# Use node
for i in range(1, 5):
node.Use()
self._Check(node, "use_count=%s" % i)
# Release node
for i in reversed(range(1, 5)):
node.Release()
self._Check(node, "use_count=%s" % (i - 1))
self._Check(node, "use_count=0")
# Mark as added
node.MarkRemoved()
self._Check(node, "added=False")
if __name__ == "__main__":
testutils.GanetiTestProgram()
| 29.973085 | 76 | 0.665884 |
5c2a0201c4ed658e5fad21bdbe94e8ea9c919163 | 14,584 | py | Python | src/hangar/backends/numpy_10.py | KarenImmanuel/hangar-py | 2a5caff259ad699db56676f14a70cb94e75d8a5b | [
"Apache-2.0"
] | null | null | null | src/hangar/backends/numpy_10.py | KarenImmanuel/hangar-py | 2a5caff259ad699db56676f14a70cb94e75d8a5b | [
"Apache-2.0"
] | null | null | null | src/hangar/backends/numpy_10.py | KarenImmanuel/hangar-py | 2a5caff259ad699db56676f14a70cb94e75d8a5b | [
"Apache-2.0"
] | null | null | null | """Local Numpy memmap Backend Implementation, Identifier: ``NUMPY_10``
Backend Identifiers
===================
* Backend: ``1``
* Version: ``0``
* Format Code: ``10``
* Canonical Name: ``NUMPY_10``
Storage Method
==============
* Data is written to specific subarray indexes inside a numpy memmapped array on disk.
* Each file is a zero-initialized array of
* ``dtype: {schema_dtype}``; ie ``np.float32`` or ``np.uint8``
* ``shape: (COLLECTION_SIZE, *{schema_shape})``; ie ``(500, 10)`` or ``(500,
4, 3)``. The first index in the array is referred to as a "collection
index".
Record Format
=============
Fields Recorded for Each Array
------------------------------
* Format Code
* File UID
* Alder32 Checksum
* Collection Index (0:COLLECTION_SIZE subarray selection)
* Subarray Shape
Separators used
---------------
* ``SEP_KEY: ":"``
* ``SEP_HSH: "$"``
* ``SEP_LST: " "``
* ``SEP_SLC: "*"``
Examples
--------
1) Adding the first piece of data to a file:
* Array shape (Subarray Shape): (10)
* File UID: "NJUUUK"
* Alder32 Checksum: 900338819
* Collection Index: 2
``Record Data => '10:NJUUUK$900338819$2*10'``
1) Adding to a piece of data to a the middle of a file:
* Array shape (Subarray Shape): (20, 2, 3)
* File UID: "Mk23nl"
* Alder32 Checksum: 2546668575
* Collection Index: 199
``Record Data => "10:Mk23nl$2546668575$199*20 2 3"``
Technical Notes
===============
* A typical numpy memmap file persisted to disk does not retain information
about its datatype or shape, and as such must be provided when re-opened
after close. In order to persist a memmap in ``.npy`` format, we use the a
special function ``open_memmap`` imported from ``np.lib.format`` which can
open a memmap file and persist necessary header info to disk in ``.npy``
format.
* On each write, an ``alder32`` checksum is calculated. This is not for use as
the primary hash algorithm, but rather stored in the local record format
itself to serve as a quick way to verify no disk corruption occurred. This is
required since numpy has no built in data integrity validation methods when
reading from disk.
"""
import os
import re
from collections import ChainMap
from functools import partial
from os.path import join as pjoin
from os.path import splitext as psplitext
from typing import MutableMapping, NamedTuple, Tuple, Optional
from xxhash import xxh64_hexdigest
import numpy as np
from numpy.lib.format import open_memmap
from .. import constants as c
from ..utils import random_string, symlink_rel
# ----------------------------- Configuration ---------------------------------
# number of subarray contents of a single numpy memmap file
COLLECTION_SIZE = 1000
# -------------------------------- Parser Implementation ----------------------
_FmtCode = '10'
# match and remove the following characters: '[' ']' '(' ')' ','
_ShapeFmtRE = re.compile('[,\(\)\[\]]')
# split up a formated parsed string into unique fields
_SplitDecoderRE = re.compile(fr'[\{c.SEP_KEY}\{c.SEP_HSH}\{c.SEP_SLC}]')
NUMPY_10_DataHashSpec = NamedTuple('NUMPY_10_DataHashSpec',
[('backend', str), ('uid', str),
('checksum', str), ('collection_idx', int),
('shape', Tuple[int])])
def numpy_10_encode(uid: str, checksum: str, collection_idx: int, shape: tuple) -> bytes:
"""converts the numpy data spect to an appropriate db value
Parameters
----------
uid : str
file name (schema uid) of the np file to find this data piece in.
checksum : int
xxhash64_hexdigest checksum of the data as computed on that local machine.
collection_idx : int
collection first axis index in which this data piece resides.
shape : tuple
shape of the data sample written to the collection idx. ie: what
subslices of the array should be read to retrieve the sample as
recorded.
Returns
-------
bytes
hash data db value recording all input specifications
"""
out_str = f'{_FmtCode}{c.SEP_KEY}'\
f'{uid}{c.SEP_HSH}{checksum}{c.SEP_HSH}'\
f'{collection_idx}{c.SEP_SLC}'\
f'{_ShapeFmtRE.sub("", str(shape))}'
return out_str.encode()
def numpy_10_decode(db_val: bytes) -> NUMPY_10_DataHashSpec:
"""converts a numpy data hash db val into a numpy data python spec
Parameters
----------
db_val : bytes
data hash db val
Returns
-------
DataHashSpec
numpy data hash specification containing `backend`, `schema`, and
`uid`, `collection_idx` and `shape` fields.
"""
db_str = db_val.decode()
_, uid, checksum, collection_idx, shape_vs = _SplitDecoderRE.split(db_str)
# if the data is of empty shape -> shape_vs = '' str.split() default value
# of none means split according to any whitespace, and discard empty strings
# from the result. So long as c.SEP_LST = ' ' this will work
shape = tuple(int(x) for x in shape_vs.split())
raw_val = NUMPY_10_DataHashSpec(backend=_FmtCode,
uid=uid,
checksum=checksum,
collection_idx=int(collection_idx),
shape=shape)
return raw_val
# ------------------------- Accessor Object -----------------------------------
class NUMPY_10_FileHandles(object):
def __init__(self, repo_path: os.PathLike, schema_shape: tuple, schema_dtype: np.dtype):
self.repo_path = repo_path
self.schema_shape = schema_shape
self.schema_dtype = schema_dtype
self._dflt_backend_opts: Optional[dict] = None
self.rFp: MutableMapping[str, np.memmap] = {}
self.wFp: MutableMapping[str, np.memmap] = {}
self.Fp = ChainMap(self.rFp, self.wFp)
self.mode: str = None
self.w_uid: str = None
self.hIdx: int = None
self.slcExpr = np.s_
self.slcExpr.maketuple = False
self.STAGEDIR = pjoin(self.repo_path, c.DIR_DATA_STAGE, _FmtCode)
self.REMOTEDIR = pjoin(self.repo_path, c.DIR_DATA_REMOTE, _FmtCode)
self.DATADIR = pjoin(self.repo_path, c.DIR_DATA, _FmtCode)
self.STOREDIR = pjoin(self.repo_path, c.DIR_DATA_STORE, _FmtCode)
if not os.path.isdir(self.DATADIR):
os.makedirs(self.DATADIR)
def __enter__(self):
return self
def __exit__(self, *exc):
if self.w_uid in self.wFp:
self.wFp[self.w_uid].flush()
@property
def backend_opts(self):
return self._dflt_backend_opts
@backend_opts.setter
def backend_opts(self, val):
if self.mode == 'a':
self._dflt_backend_opts = val
return
else:
raise AttributeError(f"can't set property in read only mode")
def open(self, mode: str, *, remote_operation: bool = False):
"""open numpy file handle coded directories
Parameters
----------
mode : str
one of `a` for `write-enabled` mode or `r` for read-only
remote_operation : bool, optional, kwarg only
True if remote operations call this method. Changes the symlink
directories used while writing., by default False
"""
self.mode = mode
if self.mode == 'a':
process_dir = self.REMOTEDIR if remote_operation else self.STAGEDIR
if not os.path.isdir(process_dir):
os.makedirs(process_dir)
process_uids = [psplitext(x)[0] for x in os.listdir(process_dir) if x.endswith('.npy')]
for uid in process_uids:
file_pth = pjoin(process_dir, f'{uid}.npy')
self.rFp[uid] = partial(open_memmap, file_pth, 'r')
if not remote_operation:
if not os.path.isdir(self.STOREDIR):
return
store_uids = [psplitext(x)[0] for x in os.listdir(self.STOREDIR) if x.endswith('.npy')]
for uid in store_uids:
file_pth = pjoin(self.STOREDIR, f'{uid}.npy')
self.rFp[uid] = partial(open_memmap, file_pth, 'r')
def close(self, *args, **kwargs):
"""Close any open file handles.
"""
if self.mode == 'a':
if self.w_uid in self.wFp:
self.wFp[self.w_uid].flush()
self.w_uid = None
self.hIdx = None
for k in list(self.wFp.keys()):
del self.wFp[k]
for k in list(self.rFp.keys()):
del self.rFp[k]
@staticmethod
def delete_in_process_data(repo_path, *, remote_operation=False):
"""Removes some set of files entirely from the stage/remote directory.
DANGER ZONE. This should essentially only be used to perform hard resets
of the repository state.
Parameters
----------
repo_path : str
path to the repository on disk
remote_operation : optional, kwarg only, bool
If true, modify contents of the remote_dir, if false (default) modify
contents of the staging directory.
"""
data_dir = pjoin(repo_path, c.DIR_DATA, _FmtCode)
PDIR = c.DIR_DATA_STAGE if not remote_operation else c.DIR_DATA_REMOTE
process_dir = pjoin(repo_path, PDIR, _FmtCode)
if not os.path.isdir(process_dir):
return
process_uids = (psplitext(x)[0] for x in os.listdir(process_dir) if x.endswith('.npy'))
for process_uid in process_uids:
remove_link_pth = pjoin(process_dir, f'{process_uid}.npy')
remove_data_pth = pjoin(data_dir, f'{process_uid}.npy')
os.remove(remove_link_pth)
os.remove(remove_data_pth)
os.rmdir(process_dir)
def _create_schema(self, *, remote_operation: bool = False):
"""stores the shape and dtype as the schema of a arrayset.
Parameters
----------
remote_operation : optional, kwarg only, bool
if this schema is being created from a remote fetch operation, then do not
place the file symlink in the staging directory. Instead symlink it
to a special remote staging directory. (default is False, which places the
symlink in the stage data directory.)
"""
uid = random_string()
file_path = pjoin(self.DATADIR, f'{uid}.npy')
m = open_memmap(file_path,
mode='w+',
dtype=self.schema_dtype,
shape=(COLLECTION_SIZE, *self.schema_shape))
self.wFp[uid] = m
self.w_uid = uid
self.hIdx = 0
if remote_operation:
symlink_file_path = pjoin(self.REMOTEDIR, f'{uid}.npy')
else:
symlink_file_path = pjoin(self.STAGEDIR, f'{uid}.npy')
symlink_rel(file_path, symlink_file_path)
def read_data(self, hashVal: NUMPY_10_DataHashSpec) -> np.ndarray:
"""Read data from disk written in the numpy_00 fmtBackend
Parameters
----------
hashVal : NUMPY_10_DataHashSpec
record specification stored in the db
Returns
-------
np.ndarray
tensor data stored at the provided hashVal specification.
Raises
------
RuntimeError
If the recorded checksum does not match the received checksum.
Notes
-----
TO AVOID DATA LOSS / CORRUPTION:
* On a read operation, we copy memmap subarray tensor data to a new
`np.ndarray` instance so as to prevent writes on a raw memmap result
slice (a `np.memmap` instance) from propogating to data on disk.
* This is an issue for reads from a write-enabled checkout where data
was just written, since the np flag "WRITEABLE" and "OWNDATA" will be
true, and writes to the returned array would be overwrite that data
slice on disk.
* For read-only checkouts, modifications to the resultant array would
perform a "copy on write"-like operation which would be propogated to
all future reads of the subarray from that process, but which would
not be persisted to disk.
"""
srcSlc = (self.slcExpr[hashVal.collection_idx],
*(self.slcExpr[0:x] for x in hashVal.shape))
try:
res = self.Fp[hashVal.uid][srcSlc]
except TypeError:
self.Fp[hashVal.uid] = self.Fp[hashVal.uid]()
res = self.Fp[hashVal.uid][srcSlc]
except KeyError:
process_dir = self.STAGEDIR if self.mode == 'a' else self.STOREDIR
file_pth = pjoin(process_dir, f'{hashVal.uid}.npy')
if os.path.islink(file_pth):
self.rFp[hashVal.uid] = open_memmap(file_pth, 'r')
res = self.Fp[hashVal.uid][srcSlc]
else:
raise
out = np.array(res, dtype=res.dtype, order='C')
if xxh64_hexdigest(out) != hashVal.checksum:
raise RuntimeError(
f'DATA CORRUPTION Checksum {xxh64_hexdigest(out)} != recorded {hashVal}')
return out
def write_data(self, array: np.ndarray, *, remote_operation: bool = False) -> bytes:
"""writes array data to disk in the numpy_00 fmtBackend
Parameters
----------
array : np.ndarray
tensor to write to disk
remote_operation : bool, optional, kwarg only
True if writing in a remote operation, otherwise False. Default is
False
Returns
-------
bytes
db hash record value specifying location information
"""
checksum = xxh64_hexdigest(array)
if self.w_uid in self.wFp:
self.hIdx += 1
if self.hIdx >= COLLECTION_SIZE:
self.wFp[self.w_uid].flush()
self._create_schema(remote_operation=remote_operation)
else:
self._create_schema(remote_operation=remote_operation)
destSlc = (self.slcExpr[self.hIdx], *(self.slcExpr[0:x] for x in array.shape))
self.wFp[self.w_uid][destSlc] = array
hashVal = numpy_10_encode(uid=self.w_uid,
checksum=checksum,
collection_idx=self.hIdx,
shape=array.shape)
return hashVal
| 35.142169 | 99 | 0.597504 |
2d6fd80e87706166893681c054da9a6c90977413 | 896 | py | Python | pathways/views/__init__.py | uw-it-aca/pathways | ad1d136f6e4eab6a4ccc09429d56d3d31a247b27 | [
"Apache-2.0"
] | null | null | null | pathways/views/__init__.py | uw-it-aca/pathways | ad1d136f6e4eab6a4ccc09429d56d3d31a247b27 | [
"Apache-2.0"
] | 148 | 2021-07-12T20:34:57.000Z | 2022-03-11T21:41:33.000Z | pathways/views/__init__.py | uw-it-aca/pathways | ad1d136f6e4eab6a4ccc09429d56d3d31a247b27 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from uw_saml.utils import is_member_of_group
from django.conf import settings
def eval_group_required(group_id):
"""
Similar to UW_SAML's group_required but only applies for specific env
"""
def decorator(view_func):
def wrapper(request, *args, **kwargs):
if settings.LIMIT_USER_ACCESS:
if is_member_of_group(request, group_id):
return view_func(request, *args, **kwargs)
return render(request,
'uw_saml/access_denied.html',
status=401)
return view_func(request, *args, **kwargs)
return login_required(function=wrapper)
return decorator
| 32 | 73 | 0.65067 |
9c8d4eaa984fd481577ffee3dd3a75a5a5c7a9d2 | 1,403 | py | Python | app/core/controllers/base_test.py | abhi1381/foundation-website | 1019c9c9607600b7c124e978d13d66ab781f1282 | [
"Apache-2.0"
] | 9 | 2018-12-31T16:37:29.000Z | 2021-03-07T07:34:23.000Z | app/core/controllers/base_test.py | abhi1381/foundation-website | 1019c9c9607600b7c124e978d13d66ab781f1282 | [
"Apache-2.0"
] | 129 | 2018-07-04T22:01:03.000Z | 2022-03-02T19:14:45.000Z | app/core/controllers/base_test.py | abhi1381/foundation-website | 1019c9c9607600b7c124e978d13d66ab781f1282 | [
"Apache-2.0"
] | 37 | 2018-06-24T18:35:11.000Z | 2021-12-19T09:12:07.000Z | # Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for generic controller behavior."""
from core.tests import app_engine_test_base
import config
import main
class BaseHandlerTest(app_engine_test_base.GenericTestBase):
"""Backend integration tests for generic controller."""
def test_handle_exception_sends_email_to_admin(self):
"""Test handle_exception logs 500s and sends out email."""
messages = self.mail_stub.get_sent_messages(
to=config.ADMIN_EMAIL_ADDRESS)
self.testapp.post(main.MAIL_HANDLER_URL, params='', expect_errors=True)
messages = self.mail_stub.get_sent_messages(
to=config.ADMIN_EMAIL_ADDRESS)
self.assertEqual(1, len(messages))
self.assertIn(
'ValueError: No JSON object could be decoded',
messages[0].body.decode())
| 36.921053 | 79 | 0.732716 |
7dceb346261b0c6ee4c04433edd78777aac8184b | 54,635 | py | Python | django/db/models/fields/related.py | haroldl/homeworklog | 07f0b1a301091c2d37cbbb7810db267386d4a114 | [
"BSD-3-Clause"
] | 2 | 2020-03-13T15:07:55.000Z | 2020-03-14T14:45:42.000Z | django/db/models/fields/related.py | haroldl/homeworklog | 07f0b1a301091c2d37cbbb7810db267386d4a114 | [
"BSD-3-Clause"
] | null | null | null | django/db/models/fields/related.py | haroldl/homeworklog | 07f0b1a301091c2d37cbbb7810db267386d4a114 | [
"BSD-3-Clause"
] | 1 | 2020-10-16T04:11:58.000Z | 2020-10-16T04:11:58.000Z | from django.conf import settings
from django.db import connection, router, transaction, connections
from django.db.backends import util
from django.db.models import signals, get_model
from django.db.models.fields import (AutoField, Field, IntegerField,
PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist)
from django.db.models.related import RelatedObject
from django.db.models.query import QuerySet
from django.db.models.query_utils import QueryWrapper
from django.db.models.deletion import CASCADE
from django.utils.encoding import smart_unicode
from django.utils.translation import (ugettext_lazy as _, string_concat,
ungettext, ugettext)
from django.utils.functional import curry
from django.core import exceptions
from django import forms
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
except AttributeError:
# If it doesn't have a split it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = get_model(app_label, model_name, False)
if model:
operation(field, model, cls)
else:
key = (app_label, model_name)
value = (cls, field, operation)
pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
#HACK
class RelatedField(object):
def contribute_to_class(self, cls, name):
sup = super(RelatedField, self)
# Store the opts for related_query_name()
self.opts = cls._meta
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name)
if not cls._meta.abstract and self.rel.related_name:
self.rel.related_name = self.rel.related_name % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower(),
}
other = self.rel.to
if isinstance(other, basestring) or other._meta.pk is None:
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.object_name.lower() + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.field_name = self.rel.field_name or self.rel.to._meta.pk.name
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, self.related)
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
# FIXME: lt and gt are explicitly allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt', 'gte', 'lte']:
return self._pk_trace(value, 'get_prep_lookup', lookup_type)
if lookup_type in ('range', 'in'):
return [self._pk_trace(v, 'get_prep_lookup', lookup_type) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Related Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
# FIXME: lt and gt are explicitly allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt', 'gte', 'lte']:
return [self._pk_trace(value, 'get_db_prep_lookup', lookup_type,
connection=connection, prepared=prepared)]
if lookup_type in ('range', 'in'):
return [self._pk_trace(v, 'get_db_prep_lookup', lookup_type,
connection=connection, prepared=prepared)
for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Related Field has invalid lookup: %s" % lookup_type)
def _pk_trace(self, value, prep_func, lookup_type, **kwargs):
# Value may be a primary key, or an object held in a relation.
# If it is an object, then we need to get the primary key value for
# that object. In certain conditions (especially one-to-one relations),
# the primary key may itself be an object - so we need to keep drilling
# down until we hit a value that can be used for a comparison.
v = value
# In the case of an FK to 'self', this check allows to_field to be used
# for both forwards and reverse lookups across the FK. (For normal FKs,
# it's only relevant for forward lookups).
if isinstance(v, self.rel.to):
field_name = getattr(self.rel, "field_name", None)
else:
field_name = None
try:
while True:
if field_name is None:
field_name = v._meta.pk.name
v = getattr(v, field_name)
field_name = None
except AttributeError:
pass
except exceptions.ObjectDoesNotExist:
v = None
field = self
while field.rel:
if hasattr(field.rel, 'field_name'):
field = field.rel.to._meta.get_field(field.rel.field_name)
else:
field = field.rel.to._meta.pk
if lookup_type in ('range', 'in'):
v = [v]
v = getattr(field, prep_func)(lookup_type, v, **kwargs)
if isinstance(v, list):
v = v[0]
return v
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_name or self.opts.object_name.lower()
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_name)
except AttributeError:
params = {'%s__pk' % self.related.field.name: instance._get_pk_val()}
db = router.db_for_read(self.related.model, instance=instance)
rel_obj = self.related.model._base_manager.using(db).get(**params)
setattr(instance, self.cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self.related.opts.object_name)
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.related.get_accessor_name()))
elif value is not None and not isinstance(value, self.related.model):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.related.get_accessor_name(), self.related.opts.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": instance is on database "%s", value is on database "%s"' %
(value, instance._state.db, value._state.db))
# Set the value of the related field to the value of the related object's related field
setattr(value, self.related.field.attname, getattr(instance, self.related.field.rel.get_related_field().attname))
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
def __get__(self, instance, instance_type=None):
if instance is None:
return self
cache_name = self.field.get_cache_name()
try:
return getattr(instance, cache_name)
except AttributeError:
val = getattr(instance, self.field.attname)
if val is None:
# If NULL is an allowed value, return it.
if self.field.null:
return None
raise self.field.rel.to.DoesNotExist
other_field = self.field.rel.get_related_field()
if other_field.rel:
params = {'%s__pk' % self.field.rel.field_name: val}
else:
params = {'%s__exact' % self.field.rel.field_name: val}
# If the related manager indicates that it should be used for
# related fields, respect that.
rel_mgr = self.field.rel.to._default_manager
db = router.db_for_read(self.field.rel.to, instance=instance)
if getattr(rel_mgr, 'use_for_related_fields', False):
rel_obj = rel_mgr.using(db).get(**params)
else:
rel_obj = QuerySet(self.field.rel.to).using(db).get(**params)
setattr(instance, cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self._field.name)
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name))
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.field.name, self.field.rel.to._meta.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": instance is on database "%s", value is on database "%s"' %
(value, instance._state.db, value._state.db))
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.field.get_cache_name(), None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related:
cache_name = self.field.related.get_cache_name()
try:
delattr(related, cache_name)
except AttributeError:
pass
# Set the value of the related field
try:
val = getattr(value, self.field.rel.get_related_field().attname)
except AttributeError:
val = None
setattr(instance, self.field.attname, val)
# Since we already know what the related object is, seed the related
# object cache now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.field.get_cache_name(), value)
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.create_manager(instance,
self.related.model._default_manager.__class__)
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
def delete_manager(self, instance):
"""
Returns a queryset based on the related model's base manager (rather
than the default manager, as returned by __get__). Used by
Model.delete().
"""
return self.create_manager(instance,
self.related.model._base_manager.__class__)
def create_manager(self, instance, superclass):
"""
Creates the managers used by other methods (__get__() and delete()).
"""
rel_field = self.related.field
rel_model = self.related.model
class RelatedManager(superclass):
def get_query_set(self):
db = self._db or router.db_for_read(rel_model, instance=instance)
return superclass.get_query_set(self).using(db).filter(**(self.core_filters))
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, rel_field.name, instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs.update({rel_field.name: instance})
db = router.db_for_write(rel_model, instance=instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
# Update kwargs with the related object that this
# ForeignRelatedObjectsDescriptor knows about.
kwargs.update({rel_field.name: instance})
db = router.db_for_write(rel_model, instance=instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs):
val = getattr(instance, rel_field.rel.get_related_field().attname)
for obj in objs:
# Is obj actually part of this descriptor set?
if getattr(obj, rel_field.attname) == val:
setattr(obj, rel_field.name, None)
obj.save()
else:
raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, instance))
remove.alters_data = True
def clear(self):
for obj in self.all():
setattr(obj, rel_field.name, None)
obj.save()
clear.alters_data = True
manager = RelatedManager()
attname = rel_field.rel.get_related_field().name
manager.core_filters = {'%s__%s' % (rel_field.name, attname):
getattr(instance, attname)}
manager.model = self.related.model
return manager
def create_many_related_manager(superclass, rel=False):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
through = rel.through
class ManyRelatedManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_field_name=None, target_field_name=None,
reverse=False):
super(ManyRelatedManager, self).__init__()
self.core_filters = core_filters
self.model = model
self.symmetrical = symmetrical
self.instance = instance
self.source_field_name = source_field_name
self.target_field_name = target_field_name
self.through = through
self._pk_val = self.instance.pk
self.reverse = reverse
if self._pk_val is None:
raise ValueError("%r instance needs to have a primary key value before a many-to-many relationship can be used." % instance.__class__.__name__)
def get_query_set(self):
db = self._db or router.db_for_read(self.instance.__class__, instance=self.instance)
return superclass.get_query_set(self).using(db)._next_is_sticky().filter(**(self.core_filters))
# If the ManyToMany relation has an intermediary model,
# the add and remove methods do not exist.
if rel.through._meta.auto_created:
def add(self, *objs):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_field_name, self.source_field_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_field_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_field_name)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not rel.through._meta.auto_created:
opts = through._meta
raise AttributeError("Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = \
super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# join_table: name of the m2m link table
# source_field_name: the PK fieldname in join_table for the source object
# target_field_name: the PK fieldname in join_table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError('Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db))
new_ids.add(obj.pk)
elif isinstance(obj, Model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True)
vals = vals.filter(**{
source_field_name: self._pk_val,
'%s__in' % target_field_name: new_ids,
})
new_ids = new_ids - set(vals)
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
for obj_id in new_ids:
self.through._default_manager.using(db).create(**{
'%s_id' % source_field_name: self._pk_val,
'%s_id' % target_field_name: obj_id,
})
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to remove
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
old_ids.add(obj.pk)
else:
old_ids.add(obj)
# Work out what DB we're operating on
db = router.db_for_write(self.through, instance=self.instance)
# Send a signal to the other end if need be.
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
# Remove the specified objects from the join table
self.through._default_manager.using(db).filter(**{
source_field_name: self._pk_val,
'%s__in' % target_field_name: old_ids
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
def _clear_items(self, source_field_name):
db = router.db_for_write(self.through, instance=self.instance)
# source_col_name: the PK colname in join_table for the source object
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
self.through._default_manager.using(db).filter(**{
source_field_name: self._pk_val
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model = self.related.model
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.related.field.rel)
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.related.field.name: instance._get_pk_val()},
instance=instance,
symmetrical=False,
source_field_name=self.related.field.m2m_reverse_field_name(),
target_field_name=self.related.field.m2m_field_name(),
reverse=True
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
if not self.related.field.rel.through._meta.auto_created:
opts = self.related.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
def _through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.field.rel.through
through = property(_through)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model=self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.field.rel)
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.field.related_query_name(): instance._get_pk_val()},
instance=instance,
symmetrical=self.field.rel.symmetrical,
source_field_name=self.field.m2m_field_name(),
target_field_name=self.field.m2m_reverse_field_name(),
reverse=False
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ManyToOneRel(object):
def __init__(self, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.to, self.field_name = to, field_name
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.multiple = True
self.parent_link = parent_link
self.on_delete = on_delete
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
class OneToOneRel(ManyToOneRel):
def __init__(self, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None):
super(OneToOneRel, self).__init__(to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete
)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None):
self.to = to
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the field in the to' object to which this relationship is tied
(this is always the primary key on the target model). Provided for
symmetry with ManyToOneRel.
"""
return self.to._meta.pk
class ForeignKey(RelatedField, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Model %(model)s with pk %(pk)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, to_field=None, rel_class=ManyToOneRel, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
if 'db_index' not in kwargs:
kwargs['db_index'] = True
kwargs['rel'] = rel_class(to, to_field,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
Field.__init__(self, **kwargs)
def validate(self, value, model_instance):
if self.rel.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.rel.limit_choices_to)
if not qs.exists():
raise exceptions.ValidationError(self.error_messages['invalid'] % {
'model': self.rel.to._meta.verbose_name, 'pk': value})
def get_attname(self):
return '%s_id' % self.name
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.rel.get_related_field().name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.rel.get_related_field().attname)
return field_default
def get_db_prep_save(self, value, connection):
if value == '' or value == None:
return None
else:
return self.rel.get_related_field().get_db_prep_save(value,
connection=connections[router.db_for_read(self.rel.to)])
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_unicode(choice_list[1][0])
return Field.value_to_string(self, obj)
def contribute_to_class(self, cls, name):
super(ForeignKey, self).contribute_to_class(cls, name)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "o2m")
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# don't get a related descriptor.
if not self.rel.is_hidden():
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
if self.rel.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to)
if self.rel.field_name is None:
self.rel.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.rel.get_related_field()
return rel_field.related_db_type(connection=connections[router.db_for_read(rel_field.model)])
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
description = _("One-to-one relationship")
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(),
SingleRelatedObjectDescriptor(related))
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.rel.to):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
managed = True
if isinstance(field.rel.to, basestring) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
to_model = field.rel.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.rel.to, basestring):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.rel.to._meta.object_name
to_model = field.rel.to
managed = klass._meta.managed or to_model._meta.managed
name = '%s_%s' % (klass._meta.object_name, field.name)
if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.object_name.lower()
to = to.lower()
meta = type('Meta', (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
})
# Construct and return the new class.
return type(name, (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(klass, related_name='%s+' % name),
to: models.ForeignKey(to_model, related_name='%s+' % name)
})
class ManyToManyField(RelatedField, Field):
description = _("Many-to-many relationship")
def __init__(self, to, **kwargs):
try:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to==RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None))
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
Field.__init__(self, **kwargs)
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return util.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"Function that can be curried to provide the source accessor or DB column name for the m2m table"
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
for f in self.rel.through._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.model:
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"Function that can be curried to provide the related accessor or DB column name for the m2m table"
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
for f in self.rel.through._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.parent_model:
if related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
else:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_unicode(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
if not self.rel.through and not cls._meta.abstract:
self.rel.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, basestring):
def resolve_through_model(field, model, cls):
field.rel.through = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "m2m")
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# don't get a related descriptor.
if not self.rel.is_hidden():
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'rel')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to)
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
| 46.856775 | 222 | 0.627656 |
83b5061f6116e7a2c54b94d46868d9737baef6b7 | 17,757 | py | Python | qa/rpc-tests/test_framework/util.py | chris-vl/V | 42150e3fb70f4970eaad931855963614ee19e243 | [
"MIT"
] | 1 | 2018-01-19T19:29:29.000Z | 2018-01-19T19:29:29.000Z | qa/rpc-tests/test_framework/util.py | chris-vl/v | 42150e3fb70f4970eaad931855963614ee19e243 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/util.py | chris-vl/v | 42150e3fb70f4970eaad931855963614ee19e243 | [
"MIT"
] | null | null | null | # Copyright (c) 2014-2015 The VCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-vcoinrpc to module search path:
import os
import sys
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
vcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "vcoin.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("rpcuser=rt\n")
f.write("rpcpassword=rt\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
vcoind and vcoin-cli must be in search path.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
#find and delete old cache directories if any exist
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
devnull = open(os.devnull, "w")
# Create cache directories, run vcoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("VCOIND", "vcoind"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
vcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: vcoind started, calling vcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("VCOINCLI", "vcoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: vcoin-cli -rpcwait getblockcount completed"
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d" % (rpc_port(i),)
rpcs.append(get_rpc_proxy(url, i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_vcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in vcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a vcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("VCOIND", "vcoind")
# RPC tests still depend on free transactions
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-blockprioritysize=50000" ]
if extra_args is not None: args.extend(extra_args)
vcoind_processes[i] = subprocess.Popen(args)
devnull = open(os.devnull, "w")
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: vcoind started, calling vcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("VCOINCLI", "vcoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: calling vcoin-cli -rpcwait getblockcount returned"
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple vcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
vcoind_processes[i].wait()
del vcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_vcoinds():
# Wait for all vcoinds to cleanly exit
for vcoind in vcoind_processes.values():
vcoind.wait()
vcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, basestring):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in xrange(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in xrange (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in xrange(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in xrange(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
| 35.514 | 119 | 0.64414 |
ff8b2d53eb544a8e7cca54b12f5bce76979a843d | 554 | py | Python | util.py | DeDPanda-c/my_first_neuron | 34d837ebf7ed1def3813af2e0d3ee4989d21ae48 | [
"MIT"
] | null | null | null | util.py | DeDPanda-c/my_first_neuron | 34d837ebf7ed1def3813af2e0d3ee4989d21ae48 | [
"MIT"
] | null | null | null | util.py | DeDPanda-c/my_first_neuron | 34d837ebf7ed1def3813af2e0d3ee4989d21ae48 | [
"MIT"
] | null | null | null | import numpy as np
def create_first_matrix(matrix):
if (type(matrix) == np.ndarray and matrix[0] == 1 and matrix[1] == 2 and matrix[2] == 3):
print("SUCCESS")
else:
print("FAILURE")
def my_new_array():
return np.array([[0] * 5, [3] * 5])
def check_random_matrix(a1, a2, a3):
if (a1.shape != (2, 2)): print("FAILURE")
if (a2.shape != (4, 4)): print("FAILURE")
if (a3.shape != (5, 2)): print("FAILURE")
print("SUCCESS")
def check_mul(a4):
if (a4.shape != (5, 2)): print("FAILURE")
print("SUCCESS") | 26.380952 | 95 | 0.568592 |
1c2e31e4d91019f9a9d7ed49e73b96ac5eae1835 | 1,715 | py | Python | my_classes/ScopesClosuresAndDecorators/.history/Decoraators2_20210716225144.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/ScopesClosuresAndDecorators/.history/Decoraators2_20210716225144.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/ScopesClosuresAndDecorators/.history/Decoraators2_20210716225144.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """ Decorator Parametors
In the previous ideos we saw some built-in decorators that can handle some arguments:
@wraps(fn) @lru_cache(maxsize=256) <\
def inner(): def factorial(n): \
... ... \>function call
This should look quite differient grom the decorators we have been creating and using:
@timed <----------- no function call
def Fibonacci(n):
...
"""
from symbol import parameters
from time import perf_counter
from unittest import result
def timed(fn):
from time import perf_counter
def inner(*arhs, **kwarrgs):
total_elapse = 0
for i in range(10): # hardcoded value 10 # need to pass as a parameter
start = perf_counter()
result = fn(*args, **kwargs)
total_elapsed += (perf_counter() - start)
avg_elapsed = total_elapsed / 10
print(avg_elapsed)
return result
return inner
"""
@timed
def my_func(): or my_func = timed(my_func)
...
On e Approach to passing (line 24) as a parameter
/ < extra parameter
def timed(fn, reps):
from time import perf_counter
def inner(*args, **kwargs):
total_elapsed = 0 / free variable
for i in range(reps): <
start = perf_counter()
result = fn(*ars, **kwargs)
total_elapsed += (perf_counter() - start)
avg_elapsed = total_elapsed / reps
print(avg_elapsed)
return result
return inner
my_func = timed(my_func, 10) # @timed
# def my_func():
...
"""
| 27.66129 | 89 | 0.542857 |
8e4f878cc76f9af344cc205f8e1bc28cd8e5bd51 | 2,182 | py | Python | chainercv/datasets/transform_dataset.py | iory/chainercv | ecb1953f78c526dfd38308d68a4094c9f4df3a8d | [
"MIT"
] | 1 | 2017-09-04T22:03:03.000Z | 2017-09-04T22:03:03.000Z | chainercv/datasets/transform_dataset.py | iory/chainercv | ecb1953f78c526dfd38308d68a4094c9f4df3a8d | [
"MIT"
] | null | null | null | chainercv/datasets/transform_dataset.py | iory/chainercv | ecb1953f78c526dfd38308d68a4094c9f4df3a8d | [
"MIT"
] | 2 | 2019-12-16T02:20:26.000Z | 2022-01-17T02:00:49.000Z | import warnings
class TransformDataset(object):
"""Dataset that indexes data of a base dataset and transforms it.
This dataset wraps a base dataset by modifying the behavior of the base
dataset's :meth:`__getitem__`. Arrays returned by :meth:`__getitem__` of
the base dataset with an integer index are transformed by the given
function :obj:`transform`.
The function :obj:`transform` takes, as an argument, :obj:`in_data`, which
is output of the base dataset's :meth:`__getitem__`, and returns
transformed arrays as output. Please see the following example.
>>> from chainer.datasets import get_mnist
>>> from chainercv.datasets import TransformDataset
>>> dataset, _ = get_mnist()
>>> def transform(in_data):
>>> img, label = in_data
>>> img -= 0.5 # scale to [-0.5, -0.5]
>>> return img, label
>>> dataset = TransformDataset(dataset, transform)
.. note::
The index used to access data is either an integer or a slice. If it
is a slice, the base dataset is assumed to return a list of outputs
each corresponding to the output of the integer indexing.
.. note::
This class is deprecated. Please use
:class:`chainer.datasets.TransformDataset` instead.
Args:
dataset: Underlying dataset. The index of this dataset corresponds
to the index of the base dataset.
transform (callable): A function that is called to transform values
returned by the underlying dataset's :meth:`__getitem__`.
"""
def __init__(self, dataset, transform):
warnings.warn(
'chainercv.datasets.TransformDataset is deprecated. '
'Please use chainer.datasets.TransformDataset instead.',
DeprecationWarning)
self._dataset = dataset
self._transform = transform
def __getitem__(self, index):
in_data = self._dataset[index]
if isinstance(index, slice):
return [self._transform(in_data_elem) for in_data_elem in in_data]
else:
return self._transform(in_data)
def __len__(self):
return len(self._dataset)
| 35.193548 | 78 | 0.662695 |
e1de5c5ac2ced5208c1dca9804e8dbf2d58f8565 | 22,911 | py | Python | test/functional/test_framework/mininode.py | gingfinger/divi99 | 3b0602b41bf35fb1e30c12b1bf06ef1da58935eb | [
"MIT"
] | null | null | null | test/functional/test_framework/mininode.py | gingfinger/divi99 | 3b0602b41bf35fb1e30c12b1bf06ef1da58935eb | [
"MIT"
] | null | null | null | test/functional/test_framework/mininode.py | gingfinger/divi99 | 3b0602b41bf35fb1e30c12b1bf06ef1da58935eb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2018 The Divi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Divi P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P
P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
and can respond correctly to getdata and getheaders messages"""
import asyncio
from collections import defaultdict
from io import BytesIO
import logging
import struct
import sys
import threading
from test_framework.messages import (
CBlockHeader,
MIN_VERSION_SUPPORTED,
msg_addr,
msg_block,
MSG_BLOCK,
msg_blocktxn,
msg_cmpctblock,
msg_feefilter,
msg_getaddr,
msg_getblocks,
msg_getblocktxn,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_mempool,
msg_notfound,
msg_ping,
msg_pong,
msg_reject,
msg_sendcmpct,
msg_sendheaders,
msg_tx,
MSG_TX,
MSG_TYPE_MASK,
msg_verack,
msg_version,
NODE_NETWORK,
NODE_WITNESS,
sha256,
)
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"notfound": msg_notfound,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
class P2PConnection(asyncio.Protocol):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# The underlying transport of the connection.
# Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
self._transport = None
@property
def is_connected(self):
return self._transport is not None
def peer_connect(self, dstaddr, dstport, net="regtest"):
assert not self.is_connected
self.dstaddr = dstaddr
self.dstport = dstport
# The initial message to send after the connection was made:
self.on_connection_send_msg = None
self.recvbuf = b""
self.network = net
logger.debug('Connecting to Divi Node: %s:%d' % (self.dstaddr, self.dstport))
loop = NetworkThread.network_event_loop
conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe)
return conn_gen
def peer_disconnect(self):
# Connection could have already been closed by other end.
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
# Connection and disconnection methods
def connection_made(self, transport):
"""asyncio callback when a connection is opened."""
assert not self._transport
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None # Never used again
self.on_open()
def connection_lost(self, exc):
"""asyncio callback when a connection is closed."""
if exc:
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
else:
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
self._transport = None
self.recvbuf = b""
self.on_close()
# Socket read methods
def data_received(self, t):
"""asyncio callback when data is read from the socket."""
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def send_message(self, message):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
tmsg = self.build_message(message)
self._log_message("send", message)
return self.send_raw_message(tmsg)
def send_raw_message(self, raw_message_bytes):
if not self.is_connected:
raise IOError('Not connected')
def maybe_write():
if not self._transport:
return
# Python <3.4.4 does not have is_closing, so we have to check for
# its existence explicitly as long as Divi Core supports all
# Python 3.4 versions.
if hasattr(self._transport, 'is_closing') and self._transport.is_closing():
return
self._transport.write(raw_message_bytes)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
# Class utility methods
def build_message(self, message):
"""Build a serialized P2P message"""
command = message.command
data = message.serialize()
tmsg = MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a Divi node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self):
super().__init__()
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.on_connection_send_msg = vt # Will be sent soon after connection_made
return create_conn
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_notfound(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
pass
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_header(self, blockhash, timeout=60):
def test_function():
last_headers = self.last_message.get('headers')
if not last_headers:
return False
return last_headers.headers[0].rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
"""Waits for a getdata message.
Receiving any getdata message will satisfy the predicate. the last_message["getdata"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block/tx has been requested."""
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
"""Waits for a getheaders message.
Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested."""
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message, timeout=60):
self.send_message(message)
self.sync_with_ping(timeout=timeout)
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# One lock for synchronizing all data access between the network event loop (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
network_event_loop = None
def __init__(self):
super().__init__(name="NetworkThread")
# There is only one event loop and no more than one thread must be created
assert not self.network_event_loop
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
"""Start the network thread."""
self.network_event_loop.run_forever()
def close(self, timeout=10):
"""Close the connections and network event loop."""
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
class P2PDataStore(P2PInterface):
"""A P2P data store class.
Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
def __init__(self):
super().__init__()
# store of blocks. key is block hash, value is a CBlock object
self.block_store = {}
self.last_block_hash = ''
# store of txs. key is txid, value is a CTransaction object
self.tx_store = {}
self.getdata_requests = []
def on_getdata(self, message):
"""Check for the tx/block in our stores and if found, reply with an inv message."""
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
def on_getheaders(self, message):
"""Search back through our block store for the locator, and reply with a headers message if found."""
locator, hash_stop = message.locator, message.hashstop
# Assume that the most recent block added is the tip
if not self.block_store:
return
headers_list = [self.block_store[self.last_block_hash]]
maxheaders = 2000
while headers_list[-1].sha256 not in locator.vHave:
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list[-1].hashPrevBlock
if prev_block_hash in self.block_store:
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if prev_block_header.sha256 == hash_stop:
# if this is the hashstop header, stop here
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
# Truncate the list if there are too many headers
headers_list = headers_list[:-maxheaders - 1:-1]
response = msg_headers(headers_list)
if response is not None:
self.send_message(response)
def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60):
"""Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
- send a headers message for the final block
- the on_getheaders handler will ensure that any getheaders are responded to
- if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will
ensure that any getdata messages are responded to. Otherwise send the full block unsolicited.
- if success is True: assert that the node's tip advances to the most recent block
- if success is False: assert that the node's tip doesn't advance
- if reject_reason is set: assert that the correct reject message is logged"""
with mininode_lock:
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
if force_send:
for b in blocks:
self.send_message(msg_block(block=b))
else:
self.send_message(msg_headers([CBlockHeader(blocks[-1])]))
wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)
if expect_disconnect:
self.wait_for_disconnect(timeout=timeout)
else:
self.sync_with_ping(timeout=timeout)
if success:
wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
else:
assert node.getbestblockhash() != blocks[-1].hash
def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None):
"""Send txs to test node and test whether they're accepted to the mempool.
- add all txs to our tx_store
- send tx messages for all txs
- if success is True/False: assert that the txs are/are not accepted to the mempool
- if expect_disconnect is True: Skip the sync with ping
- if reject_reason is set: assert that the correct reject message is logged."""
with mininode_lock:
for tx in txs:
self.tx_store[tx.sha256] = tx
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = node.getrawmempool()
if success:
# Check that all txs are now in the mempool
for tx in txs:
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
else:
# Check that none of the txs are now in the mempool
for tx in txs:
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
| 39.365979 | 182 | 0.645061 |
e8705f9161d7b301844cf1bb70a4fde9c0037a02 | 6,038 | py | Python | cosypose/lib3d/rotations.py | ompugao/cosypose | 4e471c16f19d5ee632668cd52eaa57b562f287d6 | [
"MIT"
] | 202 | 2020-08-19T19:28:03.000Z | 2022-03-29T07:10:47.000Z | cosypose/lib3d/rotations.py | ompugao/cosypose | 4e471c16f19d5ee632668cd52eaa57b562f287d6 | [
"MIT"
] | 66 | 2020-08-24T09:28:05.000Z | 2022-03-31T07:11:06.000Z | cosypose/lib3d/rotations.py | ompugao/cosypose | 4e471c16f19d5ee632668cd52eaa57b562f287d6 | [
"MIT"
] | 66 | 2020-08-19T19:28:05.000Z | 2022-03-18T20:47:55.000Z | import torch
import numpy as np
import transforms3d
def compute_rotation_matrix_from_ortho6d(poses):
"""
Code from https://github.com/papagina/RotationContinuity
On the Continuity of Rotation Representations in Neural Networks
Zhou et al. CVPR19
https://zhouyisjtu.github.io/project_rotation/rotation.html
"""
assert poses.shape[-1] == 6
x_raw = poses[..., 0:3]
y_raw = poses[..., 3:6]
x = x_raw / torch.norm(x_raw, p=2, dim=-1, keepdim=True)
z = torch.cross(x, y_raw, dim=-1)
z = z / torch.norm(z, p=2, dim=-1, keepdim=True)
y = torch.cross(z, x, dim=-1)
matrix = torch.stack((x, y, z), -1)
return matrix
def euler2quat(xyz, axes='sxyz'):
"""
euler: sxyz
quaternion: xyzw
"""
wxyz = transforms3d.euler.euler2quat(*xyz, axes=axes)
xyzw = [*wxyz[1:], wxyz[0]]
return np.array(xyzw)
def angle_axis_to_rotation_matrix(angle_axis):
"""Convert 3d vector of axis-angle rotation to 4x4 rotation matrix
Args:
angle_axis (Tensor): tensor of 3d vector of axis-angle rotations.
Returns:
Tensor: tensor of 4x4 rotation matrices.
Shape:
- Input: :math:`(N, 3)`
- Output: :math:`(N, 4, 4)`
Example:
>>> input = torch.rand(1, 3) # Nx3
>>> output = tgm.angle_axis_to_rotation_matrix(input) # Nx4x4
"""
def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6):
# We want to be careful to only evaluate the square root if the
# norm of the angle_axis vector is greater than zero. Otherwise
# we get a division by zero.
k_one = 1.0
theta = torch.sqrt(theta2)
wxyz = angle_axis / (theta + eps)
wx, wy, wz = torch.chunk(wxyz, 3, dim=1)
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
r00 = cos_theta + wx * wx * (k_one - cos_theta)
r10 = wz * sin_theta + wx * wy * (k_one - cos_theta)
r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta)
r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta
r11 = cos_theta + wy * wy * (k_one - cos_theta)
r21 = wx * sin_theta + wy * wz * (k_one - cos_theta)
r02 = wy * sin_theta + wx * wz * (k_one - cos_theta)
r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta)
r22 = cos_theta + wz * wz * (k_one - cos_theta)
rotation_matrix = torch.cat(
[r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1)
return rotation_matrix.view(-1, 3, 3)
def _compute_rotation_matrix_taylor(angle_axis):
rx, ry, rz = torch.chunk(angle_axis, 3, dim=1)
k_one = torch.ones_like(rx)
rotation_matrix = torch.cat(
[k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1)
return rotation_matrix.view(-1, 3, 3)
# stolen from ceres/rotation.h
_angle_axis = torch.unsqueeze(angle_axis, dim=1)
theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2))
theta2 = torch.squeeze(theta2, dim=1)
# compute rotation matrices
rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2)
rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis)
# create mask to handle both cases
eps = 1e-6
mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device)
mask_pos = (mask).type_as(theta2)
mask_neg = (mask == False).type_as(theta2) # noqa
# create output pose matrix
batch_size = angle_axis.shape[0]
rotation_matrix = torch.eye(4).to(angle_axis.device).type_as(angle_axis)
rotation_matrix = rotation_matrix.view(1, 4, 4).repeat(batch_size, 1, 1)
# fill output matrix with masked values
rotation_matrix[..., :3, :3] = \
mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor
return rotation_matrix # Nx4x4
def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
"""Convert quaternion vector to angle axis of rotation.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
quaternion (torch.Tensor): tensor with quaternions.
Return:
torch.Tensor: tensor with angle axis of rotation.
Shape:
- Input: :math:`(*, 4)` where `*` means, any number of dimensions
- Output: :math:`(*, 3)`
Example:
>>> quaternion = torch.rand(2, 4) # Nx4
>>> angle_axis = tgm.quaternion_to_angle_axis(quaternion) # Nx3
"""
if not torch.is_tensor(quaternion):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError("Input must be a tensor of shape Nx4 or 4. Got {}"
.format(quaternion.shape))
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
q2: torch.Tensor = quaternion[..., 2]
q3: torch.Tensor = quaternion[..., 3]
sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)
cos_theta: torch.Tensor = quaternion[..., 0]
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0,
torch.atan2(-sin_theta, -cos_theta),
torch.atan2(sin_theta, cos_theta))
k_pos: torch.Tensor = two_theta / sin_theta
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)
k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
angle_axis[..., 0] += q1 * k
angle_axis[..., 1] += q2 * k
angle_axis[..., 2] += q3 * k
return angle_axis
def quat2mat(quat):
q_xyzw = quat
q_wxyz = quat.clone()
q_wxyz[..., 0] = q_xyzw[..., -1]
q_wxyz[..., 1:] = q_xyzw[..., :-1]
return angle_axis_to_rotation_matrix(quaternion_to_angle_axis(q_wxyz))
def compute_rotation_matrix_from_quaternions(quats):
assert quats.shape[-1] == 4
quats = quats / torch.norm(quats, p=2, dim=-1, keepdim=True)
mat = quat2mat(quats)[:, :3, :3]
return mat
| 35.309942 | 77 | 0.626035 |
1125547cc47e6ff6d4c54335b18799b003c9b221 | 9,785 | py | Python | scibotpark/pybullet/robot.py | Shiduo-zh/pybulletSim | a51c71adc328d2071d7faf53e4bc5cd695f03ab2 | [
"MIT"
] | null | null | null | scibotpark/pybullet/robot.py | Shiduo-zh/pybulletSim | a51c71adc328d2071d7faf53e4bc5cd695f03ab2 | [
"MIT"
] | null | null | null | scibotpark/pybullet/robot.py | Shiduo-zh/pybulletSim | a51c71adc328d2071d7faf53e4bc5cd695f03ab2 | [
"MIT"
] | null | null | null | import os
import numpy as np
import pybullet
import pybullet_data as pb_data
from pybullet_utils import bullet_client
class PybulletRobot:
def __init__(self,
default_base_transform= None, # 3-translation + 4-orientation
pb_control_mode= pybullet.POSITION_CONTROL,
pb_control_kwargs= dict(),
pb_client= None,
):
self.default_base_transform = np.array([0,-10,0,0,1,0,1]) if default_base_transform is None else default_base_transform
self.pb_control_kwargs = pb_control_kwargs
try:
self.pb_control_mode = getattr(pybullet, pb_control_mode) if isinstance(pb_control_mode, str) else pb_control_mode
except AttributeError:
self.pb_control_mode = pb_control_mode
self.pb_client = bullet_client.BulletClient(connection_mode=pybullet.DIRECT) if pb_client is None else pb_client
self.build_robot_model()
def load_robot_model(self):
self._body_id = -1
raise NotImplementedError("load_robot_model is not implemented")
def set_default_joint_states(self):
pass
def reset_joint_states(self):
pass
def build_robot_model(self):
""" initialization sequence to build and set the robot model """
self.load_robot_model() # must implement in subclass
self.set_valid_joint_ids(getattr(self, "valid_joint_types", None))
self.set_default_joint_states()
self.reset_joint_states()
def reset(self, base_transform= None):
""" reset the robot model to initial state
Args:
base_transform: a np array of base translation 3-translation + 4 orientation
"""
if base_transform is None:
base_transform = self.default_base_transform
self.pb_client.resetBasePositionAndOrientation(
self.body_id,
base_transform[:3],
base_transform[3:]
)
self.pb_client.resetBaseVelocity(self.body_id, [0, 0, 0], [0, 0, 0])
self.reset_joint_states()
def set_valid_joint_ids(self,
valid_joint_types= None, # a list of valid joint type, check pybullet docs
):
"""
New attributes:
valid_joint_ids: a list of valid joint ids
"""
if valid_joint_types is None:
self.valid_joint_ids = list(range(self.pb_client.getNumJoints(self._body_id)))
else:
self.valid_joint_ids = [
joint_id for joint_id in range(self.pb_client.getNumJoints(self._body_id)) \
if self.pb_client.getJointInfo(self.body_id, joint_id)[2] in valid_joint_types
]
def get_joint_limits(self,
modal= "position", # "position", "velocity", "torque"
):
""" get joint limits of current robot (under joint validity configuration)
Returns:
limits: a np array of joint limits for the actual robot command, shape (2, n_valid_joints)
"""
limits = []
for joint_id in self.valid_joint_ids:
joint_info = self.pb_client.getJointInfo(self.body_id, joint_id)
joint_type = joint_info[2]
if (joint_type == pybullet.JOINT_PRISMATIC or joint_type == pybullet.JOINT_REVOLUTE)\
and joint_info[8] == 0. and joint_info[9] == -1.:
assert modal != "position", "position control for joint {} is not supported".format(joint_info[0])
if modal == "position":
limits.append(np.array([joint_info[8], joint_info[9]]))
elif modal == "velocity":
limits.append(np.array([-joint_info[11], joint_info[11]]))
elif modal == "torque":
limits.append(np.array([-joint_info[12], joint_info[12]]))
else:
raise NotImplementedError("modal {} is not implemented".format(modal))
limits = np.stack(limits, axis=-1) # (2, n_valid_joints)
return limits
def get_cmd_limits(self):
""" return the command limits for the current robot (under joint validity configuration)
Returns:
cmd_limits: a np array of joint limits for the actual robot command, shape (2, n_valid_joints)
"""
if self.pb_control_mode == pybullet.POSITION_CONTROL:
return self.get_joint_limits(modal= "position")
elif self.pb_control_mode == pybullet.VELOCITY_CONTROL:
return self.get_joint_limits(modal= "velocity")
elif self.pb_control_mode == pybullet.TORQUE_CONTROL:
return self.get_joint_limits(modal= "torque")
elif self.pb_control_mode == pybullet.STABLE_PD_CONTROL:
return self.get_joint_limits(modal= "position")
else:
raise ValueError("pb_control_mode {} is not implemented".format(self.pb_control_mode))
def get_joint_states(self,
modal= "position", # "position", "velocity", "torque"
):
""" get joint state of current robot (under joint validity configuration)
Returns:
joint_states: a np array of joint position/velocity/torque
"""
joint_states = self.pb_client.getJointStates(self.body_id, self.valid_joint_ids)
if modal == "position":
return np.array([joint_state[0] for joint_state in joint_states])
elif modal == "velocity":
return np.array([joint_state[1] for joint_state in joint_states])
elif modal == "torque":
return np.array([joint_state[3] for joint_state in joint_states])
else:
raise NotImplementedError("modal {} is not implemented".format(modal))
def send_joints_cmd(self, cmd):
""" NOTE: This method only sends command to valid joints
"""
assert len(cmd) == len(self.valid_joint_ids), "cmd length {} is not equal to valid joint ids length {}".format(len(cmd), len(self.valid_joint_ids))
# convert cmd to control_arguments
if self.pb_control_mode == pybullet.POSITION_CONTROL:
control_kwargs = dict(targetPositions= cmd,)
elif self.pb_control_mode == pybullet.VELOCITY_CONTROL:
control_kwargs = dict(targetVelocities= cmd,)
elif self.pb_control_mode == pybullet.TORQUE_CONTROL:
control_kwargs = dict(forces= cmd,)
elif self.pb_control_mode == pybullet.STABLE_PD_CONTROL:
control_kwargs = dict(targetPositions= [[c] for c in cmd],)
else:
control_kwargs = dict()
# send command to valid joints
if self.pb_control_mode in [pybullet.POSITION_CONTROL, pybullet.VELOCITY_CONTROL, pybullet.TORQUE_CONTROL]:
self.pb_client.setJointMotorControlArray(
self.body_id,
self.valid_joint_ids,
controlMode= self.pb_control_mode,
**control_kwargs,
**getattr(self, "pb_control_arguments", dict()),
)
elif self.pb_control_mode in [pybullet.STABLE_PD_CONTROL]:
if getattr(self, "pb_control_arguments", dict()).get("forces", None) is not None:
self.pb_control_kwargs["forces"] = [[f] for f in self.pb_control_kwargs["forces"]]
if getattr(self, "pb_control_arguments", dict()).get("positionGains", None) is not None:
self.pb_control_kwargs["positionGains"] = [[f] for f in self.pb_control_kwargs["positionGains"]]
if getattr(self, "pb_control_arguments", dict()).get("velocityGains", None) is not None:
self.pb_control_kwargs["velocityGains"] = [[f] for f in self.pb_control_kwargs["velocityGains"]]
if getattr(self, "pb_control_arguments", dict()).get("maxVelocities", None) is not None:
self.pb_control_kwargs["maxVelocities"] = [[f] for f in self.pb_control_kwargs["maxVelocities"]]
self.pb_client.setJointMotorControlMultiDofArray(
self.body_id,
self.valid_joint_ids,
controlMode= self.pb_control_mode,
**control_kwargs,
**getattr(self, "pb_control_arguments", dict()),
)
# must include the following property
@property
def body_id(self):
return self._body_id
class DeltaPositionControlMixin:
def __init__(self,
*args,
delta_control_limit= 1.,
pb_control_mode= "DELTA_POSITION_CONTROL",
**kwargs
):
if pb_control_mode == "DELTA_POSITION_CONTROL":
super().__init__(
*args,
pb_control_mode= "POSITION_CONTROL",
**kwargs,
)
self.delta_control_limit = delta_control_limit
else:
super().__init__(*args, pb_control_mode= pb_control_mode, **kwargs)
def get_cmd_limits(self):
""" return the command limits for the current robot (under joint validity configuration)
Returns:
cmd_limits: a np array of joint limits for the actual robot command, shape (2, n_valid_joints)
"""
if hasattr(self, "delta_control_limit"):
num_valid_joints = len(self.valid_joint_ids)
limits = np.ones((num_valid_joints,), dtype= np.float32) * self.delta_control_limit
return np.stack([-limits, limits], axis= 0)
else:
return super().get_cmd_limits()
def send_joints_cmd(self, cmd):
if hasattr(self, "delta_control_limit"):
current_joint_states = self.get_joint_states()
current_joint_states += cmd
super().send_joints_cmd(current_joint_states)
else:
super().send_joints_cmd(cmd)
| 44.885321 | 155 | 0.625345 |
de70d1e313e569a87cedbca4639e77f789094176 | 25,115 | py | Python | api/core/utility.py | michael-pryor/GeoTweetSearch | cb6d0a7732a0584022f3720e3f696fb709dd45b5 | [
"Apache-2.0"
] | 1 | 2016-04-08T08:40:34.000Z | 2016-04-08T08:40:34.000Z | api/core/utility.py | watfordxp/GeoTweetSearch | cb6d0a7732a0584022f3720e3f696fb709dd45b5 | [
"Apache-2.0"
] | null | null | null | api/core/utility.py | watfordxp/GeoTweetSearch | cb6d0a7732a0584022f3720e3f696fb709dd45b5 | [
"Apache-2.0"
] | 2 | 2015-08-28T17:08:26.000Z | 2016-12-30T21:59:46.000Z | import hashlib
from math import sqrt, ceil, log
from collections import Hashable, OrderedDict, namedtuple, MutableMapping
import copy
import logging
import os
import string
import struct
from threading import RLock, Lock
import unittest
import urllib
import uuid
import time
import sys
__author__ = 'Michael Pryor'
logger = logging.getLogger(__name__)
def getModulePath(pyName):
return os.path.dirname(pyName)
def join(delim, theList):
result = ""
for item in theList:
if item is not None:
result += delim + ('%s' % item)
return result[len(delim):]
def joinStringsToLengthPretty(strings, maxLength):
result = ''
count = 0
for string in strings:
remainingLength = maxLength - len(result)
truncatedWord = string[:remainingLength]
if len(truncatedWord) < len(string):
if len(result) > 0:
result = result[:-2]
else:
if remainingLength > 0:
result += truncatedWord
count += 1
more = (len(strings) - count)
if more > 0:
result += '.. (%d more)' % more
else:
result += '..'
return result
else:
if remainingLength > 0:
result += truncatedWord + ', '
count += 1
return result[:-2]
def joinStringsGrammarPretty(strings):
theList = list()
for string in strings:
if string is not None and len(string) > 0:
theList.append(unicode(string))
if len(theList) == 0:
return ''
if len(theList) == 1:
return theList[0]
if len(theList) > 1:
result = ''
listLength = len(theList)
for n in range(0,listLength-1):
result += theList[n] + ', '
result = result[:-2]
result += ' and ' + theList[listLength-1]
return result
def getMidPoint(coord1, coord2):
x1, y1 = coord1
x2, y2 = coord2
xMid = (x1 + x2) / 2
yMid = (y1 + y2) / 2
return xMid, yMid
def getMidPointBox(north1, north2, south1, south2):
midNorth = getMidPoint(north1, north2)
midSouth = getMidPoint(south1, south2)
return getMidPoint(midNorth, midSouth)
def getDistance(coord1, coord2):
x1, y1 = coord1
x2, y2 = coord2
return sqrt( (x2 - x1)**2 + (y2 - y1)**2 )
def joinLists(list1, list2):
if list1 is None and list2 is None:
return None
if list1 is None:
list1 = []
if list2 is None:
list2 = []
return list1 + list2
def joinListOfLists(listOfLists):
result = list()
for subList in listOfLists:
result += subList
return result
def prepareLowerAlpha(text):
""" Normalizes string removing spaces and making it lower case """
if text is None:
return None
text = unicode(text).lower()
result = ''
lastWasSpace = False
for x in text:
if x in string.ascii_lowercase:
result += x
if lastWasSpace:
lastWasSpace = False
else:
if not lastWasSpace and x == ' ':
result += ' '
lastWasSpace = True
if lastWasSpace:
result = result[:-1]
return result
def extractWords(text):
return text.split()
def urlEncodeText(text):
if text is None:
return None
return urllib.quote(text)
def criticalSection(lock,function):
lock.acquire()
try:
return function()
finally:
lock.release()
class AtomicReference(object):
""" We use this object to make it clear that the reference is treated as atomic.
Also, should reference changes no longer be atomic in a later version of Python,
it will be easy to adapt the code as we only have to make changes to this class.
More importantly we can do 'get and set' atomically, which would otherwise not
be guarenteed by python interpreter. """
def __init__(self, item):
self._item = item
self._lock = RLock()
@property
def item(self):
self._lock.acquire()
try:
return self._item
finally:
self._lock.release()
@item.setter
def item(self, item):
self._lock.acquire()
try:
self._item = item
finally:
self._lock.release()
def getAndSet(self, item):
self._lock.acquire()
try:
aux = self._item
self._item = item
return aux
finally:
self._lock.release()
""" A neat bit of lambda to reverse and return a list
This works because None is evaluated to False. """
reverse_list = lambda x: x.reverse() or x
lower_item = lambda x: x.lower() or x
def getUniqueId():
return uuid.uuid4()
class HashableImpl(Hashable):
""" Basic hash layout for classes which have no convenient unique ID. """
def __init__(self):
self.id = hash(getUniqueId())
def __hash__(self):
return self.id
def __str__(self):
return str(self.id)
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return self.id != other.id
def getAddedItems(oldSet, newSet):
return newSet.difference(oldSet)
def getRemovedItems(oldSet, newSet):
return oldSet.difference(newSet)
def isDifference(oldSet, newSet):
return len(oldSet.symmetric_difference(newSet)) > 0
def packArguments(*args, **kwargs):
names = []
finalArgs = dict()
for i, arg in enumerate(args):
name = 'arg%d' % i
names.append(name)
finalArgs[name] = arg
for key, value in kwargs.iteritems():
names.append(key)
finalArgs[key] = value
ntuple = namedtuple('packed_arguments',names)
return ntuple(**finalArgs)
def callAllCombinations(list, maxSize, function):
for i in range(0,len(list),1):
for j in range(len(list),i,-1):
if j-i > maxSize:
continue
thisQuery = list[i:j]
function(thisQuery)
def getEpochMs():
return time.time() * 1000
def convertEpochMsToGmTime(epochMs):
epoch = epochMs / 1000
return time.gmtime(epoch)
def convertGmTimeToString(g):
assert isinstance(g, time.struct_time)
return '%02d/%02d/%04d %02d:%02d:%02d' % (g.tm_mday, g.tm_mon, g.tm_year, g.tm_hour, g.tm_min, g.tm_sec)
def getDateTime():
return convertGmTimeToString(convertEpochMsToGmTime(getEpochMs()))
def splitList(theList,maxSize):
chunks = []
counter = 0
while True:
chunk = theList[counter:counter+maxSize]
if len(chunk) < 1:
break
chunks.append(chunk)
counter += maxSize
return chunks
def getPercentage(val, total):
if total is None or total == 0:
total = 1
val = 0
if val is None:
val = 0
if total < val:
val = total
return (float(val) / float(total)) * 100
class DummyIterable(object):
def __init__(self):
super(DummyIterable,self).__init__()
def __iter__(self):
while True:
time.sleep(100)
def prune(function, items):
""" Prunes any structure, including nested dicts/lists, removing timestamped
items older than maxAge.
@param function A function when given an item from the structure returns true if item should be removed,
false if not. If an exception is thrown from the function false is assumed.
@param items structure of items. Iterable objects will be iterated through recursively.
@return true if an item was removed. """
# Single item
try:
iter(items)
except TypeError:
try:
return function(items)
except Exception:
return False
copyItems = copy.copy(items)
# Dict iteration.
itera = None
try:
itera = copyItems.iteritems()
except TypeError:
pass
except AttributeError:
pass
if itera:
for itemKey, itemValue in itera:
if prune(function, itemValue):
items.pop(itemKey)
# Remove empty containers.
if len(items) == 0:
return True
else:
return False
# List iteration.
itera = None
try:
itera = iter(copyItems)
except TypeError:
pass
if itera:
for item in itera:
if prune(function, item):
items.remove(item)
if len(items) == 0:
return True
else:
return False
return False
def doUrlEncode(string):
return urllib.quote_plus(string.encode('utf-8'))
def hashStringToInteger32(string):
hashFunc = hashlib.md5()
hashFunc.update(doUrlEncode(string))
trunc = hashFunc.digest()[:4]
return struct.unpack("<L", trunc)[0]
class Timer(HashableImpl):
def __init__(self, frequencyMs=None, tickOnFirstCall=True):
super(Timer,self).__init__()
if frequencyMs is None:
frequencyMs = 0
self.frequency = frequencyMs
self._lock = RLock()
self.tick_on_first_call = tickOnFirstCall
self.has_ticked = False
self.initialized_timer = getEpochMs()
self.resetTimer()
def resetTimer(self):
with self._lock:
self.timer = getEpochMs()
@property
def time_since_last_tick(self):
with self._lock:
return getEpochMs() - self.timer
def tick_missed(self):
with self._lock:
count = 0
# round down.
timeSinceLastTick = self.time_since_last_tick
count += int(timeSinceLastTick / self.frequency)
oldTimer = self.timer
if count > 0:
self.timer += timeSinceLastTick
if self.ticked():
count += 1
return count
@property
def time_since_constructed(self):
with self._lock:
return getEpochMs() - self.initialized_timer
def ticked(self):
with self._lock:
newTimer = getEpochMs()
if not self.has_ticked and self.tick_on_first_call:
self.has_ticked = True
ticked = True
else:
ticked = newTimer - self.timer > self.frequency
if ticked:
self.timer = newTimer
return ticked
def waitForTick(self):
with self._lock:
if not self.has_ticked and self.tick_on_first_call:
timeToWaitSeconds = 0
self.has_ticked = True
else:
timeToWaitMs = self.frequency - (getEpochMs() - self.timer)
timeToWaitSeconds = timeToWaitMs / 1000
if timeToWaitSeconds > 0:
time.sleep(timeToWaitSeconds)
self.resetTimer()
return True
@classmethod
def rate_limited(cls, numTicks, timePeriod, tickOnFirstCall=True):
frequency = timePeriod / numTicks
logger.debug('Rate limited timer created, num ticks: %d, time period: %d, frequency %d' % (numTicks, timePeriod, frequency))
return cls(frequency,tickOnFirstCall)
class EventTimer(Timer):
def __init__(self, maxEventCount, withinTimeMs):
super(EventTimer,self).__init__(withinTimeMs,False)
self.max_event_count = maxEventCount
self.time_frame = withinTimeMs
self.event_count = 0
self.triggered_reset = False
def onEvent(self):
""" @return True if max_event_count number of events
happen within withinTimeMs of each other, where the
gap since each previous event is < withinTimeMs. """
if self.ticked():
self.event_count = 0
self.triggered_reset = True
else:
self.triggered_reset = False
self.event_count += 1
self.resetTimer()
return self.event_count > self.max_event_count
def resetEventCount(self):
self.event_count = 0
class EventFrequencyCounter(object):
# avoids us accidently creating a memory leak.
MAX_MULTIPLIER = 200
def __init__(self, updateFrequency, timePeriod):
super(EventFrequencyCounter, self).__init__()
self.timer = Timer(updateFrequency, False)
self.count = 0
self._last_count_cache = list()
self.time_period = timePeriod
self.multiplier = float(timePeriod) / float(updateFrequency)
if self.multiplier > EventFrequencyCounter.MAX_MULTIPLIER:
logger.error('Number of updates per time period is too high, either increase the limit or change parameters')
assert False
self.new_data = False
self._lock = RLock()
def _checkForTick(self):
ticks = self.timer.tick_missed()
with self._lock:
while ticks > 1:
ticks -= 1
self._last_count_cache.append(0)
if len(self._last_count_cache) > self.multiplier:
self._last_count_cache.pop(0)
if ticks > 0:
self._last_count_cache.append(self.count)
if len(self._last_count_cache) > self.multiplier:
self._last_count_cache.pop(0)
self.count = 0
self.new_data = True
def onEvent(self):
with self._lock:
self.count += 1
self._checkForTick()
@property
def time_period_count(self):
with self._lock:
self._checkForTick()
requiredCacheSize = float(self.time_period) / float(self.timer.frequency)
total = 0
numCacheItems = 0
for item in self._last_count_cache:
total += item
numCacheItems += 1
if numCacheItems == requiredCacheSize:
break
if numCacheItems > 0:
missingMultiplier = float(requiredCacheSize) / float(numCacheItems)
total *= missingMultiplier
else:
total = 0
return total
def time_period_count_updated(self, returnLast=False, includeUpdateFlag=False, castToInteger=False):
with self._lock:
if self.new_data:
result = self.time_period_count
self.last_time_period_count = result
self.new_data = False
if castToInteger:
result = int(result)
updated = True
else:
if returnLast:
try:
if castToInteger:
result = int(self.last_time_period_count)
else:
result = self.last_time_period_count
except AttributeError:
result = None
else:
result = None
updated = False
if includeUpdateFlag:
return result, updated
else:
return result
def parseInteger(value, minimum=None, maximum=None, default=None):
if len(value) == 0:
return default
else:
val = int(value)
if minimum is not None and val < minimum:
val = minimum
elif maximum is not None and val > maximum:
val= maximum
return val
def parseString(theString, acceptableStrings=None, ignoreCase=True, default = None):
if not isinstance(theString, basestring):
logger.warn('parseString received bad type of: %s' % type(theString))
return None
if theString is None or len(theString) == 0:
return default
if acceptableStrings is None or len(acceptableStrings) == 0:
return theString
if ignoreCase:
theString = theString.lower()
for sub in acceptableStrings:
if ignoreCase:
sub = sub.lower()
if theString == sub:
return theString
return default
def parseBoolean(theString, default = None):
val = parseString(theString,['true','false','1','0'], ignoreCase=True)
if val is None:
if default is not None:
return default
else:
return None
return val == 'true' or val == '1'
class OrderedDictEx(MutableMapping):
def __init__(self, fifo=True, maxSize=None, readImpactsOrder=False):
super(OrderedDictEx,self).__init__()
self._dic = OrderedDict()
self.fifo = fifo
self.max_size = maxSize
self.read_impacts_order = readImpactsOrder
self._recursing = False
self._lock = RLock()
self.recurse_count = 0
def __setitem__(self, key, value):
with self._lock:
# Store items in order that key was last added.
# This will ensure update has same impact on position
# as setting the item.
skipSizeCheck = False # optimization
if key in self._dic:
del self._dic[key]
skipSizeCheck = True
self._dic[key] = value
if self.max_size is not None and not skipSizeCheck:
while len(self._dic) > self.max_size:
self.removeOrderedItem()
def __str__(self):
return str(self._dic)
def __unicode__(self):
return unicode(self._dic)
def __repr__(self):
return repr(self._dic)
def __len__(self):
return len(self._dic)
def __getitem__(self, key):
with self._lock:
val = self._dic[key]
if self.read_impacts_order:
self.__setitem__(key, val)
return val
def __delitem__(self, key):
del self._dic[key]
def __iter__(self):
return self._dic.__iter__()
def removeOrderedItem(self):
return self._dic.popitem(not self.fifo)
def upperPowerTwo(value):
if value == 0:
return 1
return int(pow(2, ceil(log(value, 2))))
def searchDictionary(searchName, searchSourceDict, maxResults=None, caseInsensitive=None):
results = list()
if caseInsensitive is None:
caseInsensitive = True
if caseInsensitive:
searchName = searchName.lower()
if maxResults is not None and maxResults < 1:
return results
count = 0
for key, value in searchSourceDict.iteritems():
if caseInsensitive:
key = key.lower()
if searchName in key:
results.append(value)
count += 1
if maxResults is not None and count >= maxResults:
break
return results
def redirectOutputToLogger():
""" Python prints stack traces to console but not to logger, so we
monkey patch stderr and force it to write to logger. """
class writer(object):
def __init__(self):
self.data = list()
def write(self, string):
if string.endswith('\n'):
self.data.append(string[:-1])
self.flush()
else:
self.data.append(string)
def close(self):
self.flush()
def flush(self):
logger.error(''.join(self.data))
self.data = list()
sys.stderr = writer()
class testUtility(unittest.TestCase):
def testJoin(self):
result = join('_', ['hello'])
logger.info(result)
assert(len(result) == 5)
result = join('_', ['hello', 'world'])
logger.info(result)
assert(len(result) == 11)
result = join('_', ['hello', 'big', 'wide', 'world'])
logger.info(result)
assert(len(result) == 20)
def testAtomicReference(self):
ref = AtomicReference('hello')
logger.info('Current item: %s' % ref.item)
assert ref.item == 'hello'
previous_item = ref.getAndSet('world')
logger.info('Previous item: %s' % previous_item)
logger.info('Current item: %s' % ref.item)
assert previous_item == 'hello'
assert ref.item == 'world'
previous_item = ref.getAndSet('new york')
logger.info('Previous item: %s' % previous_item)
logger.info('Current item: %s' % ref.item)
assert previous_item == 'world'
assert ref.item == 'new york'
def testGetUniqueId(self):
uid = getUniqueId()
logger.info(uid)
logger.info(hash(uid))
assert uid is not None
uid2 = getUniqueId()
logger.info(uid2)
logger.info(hash(uid2))
assert uid2 is not None
assert uid != uid2
assert hash(uid) != hash(uid2)
def testSetComparison(self):
l = ['hello', 'world', 'whats', 'up']
oldSet = set(l)
newSet = set(l)
assert not isDifference(oldSet, newSet)
addedItems = getAddedItems(oldSet, newSet)
assert len(addedItems) == 0
removedItems = getRemovedItems(oldSet, newSet)
assert len(removedItems) == 0
newSet.remove('up')
addedItems = getAddedItems(oldSet, newSet)
assert len(addedItems) == 0
removedItems = getRemovedItems(oldSet, newSet)
assert len(removedItems) == 1
assert 'up' in removedItems
assert isDifference(oldSet, newSet)
oldSet.remove('world')
addedItems = getAddedItems(oldSet, newSet)
assert len(addedItems) == 1
assert 'world' in addedItems
removedItems = getRemovedItems(oldSet, newSet)
assert len(removedItems) == 1
assert 'up' in removedItems
assert isDifference(oldSet, newSet)
def testPrepareLowerAlpha(self):
str = 'Hello'
result = prepareLowerAlpha(str)
assert result == 'hello'
str = 'H e l l o 1 2 3 '
result = prepareLowerAlpha(str)
assert result == 'h e l l o'
str = 'London, UK'
result = prepareLowerAlpha(str)
assert result == 'london uk'
result = extractWords(result)
print result
def testPackArguments(self):
result = packArguments('hello',hi='yoyoo')[1]
print result.hii
assert False
def testCallAllCombinations(self):
query = 'I spend half my time in London and half my time in Tel Aviv, Israel'
query = prepareLowerAlpha(query)
queries = extractWords(query)
def func(query):
query = ' '.join(query)
print query
callAllCombinations(queries,4,func)
def testSplitList(self):
l = [1,2,3,4,5,6,7,8,9,10]
l = splitList(l,3)
assert len(l) == 4
assert len(l[0]) == 3
assert len(l[1]) == 3
assert len(l[2]) == 3
assert len(l[3]) == 1
def testTimer(self):
loopTimer = Timer(10000,False)
timer = Timer(2000,True)
while not loopTimer.ticked():
if timer.waitForTick():
print 'ticked'
print 'waiting'
loopTimer = Timer(5000,False)
timer = Timer.rate_limited(18,30000,True)
while not loopTimer.ticked():
time.sleep(0.01)
if timer.ticked():
print 'ticked rate limited'
def testDistance(self):
p1 = 5,5
p2 = 20,25
result1 = getDistance(p1,p2)
result2 = getDistance(p2,p1)
assert result1 == 25.0
assert result1 == result2
def testOrderedDictEx(self):
dic = OrderedDictEx(fifo = True, maxSize = 5)
dic[1] = 1
dic[2] = 2
dic[3] = 300
assert dic[1] == 1
assert dic[2] == 2
assert dic[3] == 300
assert dic.removeOrderedItem() == (1,1)
assert len(dic) == 2
dic[3] = 300
dic[4] = 4000
dic[5] = 50
dic[6] = 70
dic[7] = 9000
assert dic.get(1) is None
assert dic.get(2) is None
assert dic[3] == 300
assert dic[4] == 4000
assert dic[5] == 50
assert dic[6] == 70
assert dic[7] == 9000
assert len(dic) == 5
if __name__ == '__main__':
eventTimer = EventFrequencyCounter(50,100)
runTimer = Timer(60000, False)
triggerEventTimer = Timer(100,True)
switchToOtherTimer = Timer(10000, False)
otherTriggerEventTimer = Timer(200, False)
useOtherTimer = False
while not runTimer.ticked():
if useOtherTimer is False:
if triggerEventTimer.ticked():
eventTimer.onEvent()
r = eventTimer.time_period_count_updated
if r is not None:
print 'Event timer result: %s' % r
else:
if otherTriggerEventTimer.ticked():
eventTimer.onEvent()
r = eventTimer.time_period_count_updated
if r is not None:
print 'Event timer result: %s' % r
if switchToOtherTimer.ticked():
print 'Switching timer'
useOtherTimer = not useOtherTimer
time.sleep(0.01)
| 25.732582 | 132 | 0.575154 |
4beaadb3622924ea8b1fcc80081ffa679bbfcff2 | 1,155 | py | Python | setup.py | castlabs/pyjip | 947f615d591a940438316e6d21291f730bfcda66 | [
"BSD-3-Clause"
] | null | null | null | setup.py | castlabs/pyjip | 947f615d591a940438316e6d21291f730bfcda66 | [
"BSD-3-Clause"
] | 2 | 2021-03-09T10:35:46.000Z | 2021-03-09T11:02:26.000Z | setup.py | castlabs/pyjip | 947f615d591a940438316e6d21291f730bfcda66 | [
"BSD-3-Clause"
] | null | null | null | try:
from setuptools import setup, Extension
except:
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, Extension
name = 'pyjip'
version = '0.5'
description = 'JIP pipeline library'
author_email = "thasso.griebel@gmail.com"
url = ""
packages = ['jip', 'jip.cli', 'jip.vendor', 'jip.scripts', 'jip.dispatcher']
try:
with open('Readme.rst') as rf:
readme = rf.read()
except:
readme = ''
dispatcher_ext = Extension('jip.dispatcher',
['jip/dispatcher/jip_binding.c',
'jip/dispatcher/jip_dispatcher.c'])
setup(
name=name,
version=version,
description=description,
author_email=author_email,
url=url,
license="BSD",
long_description=readme,
packages=packages,
package_data={
'jip.scripts': ['*.jip']
},
install_requires=["sqlalchemy>=0.8.2",
"jinja2>=2.7",
"argparse"
],
ext_modules=[dispatcher_ext],
entry_points={
"console_scripts": [
'jip = jip.cli.jip_main:main'
]
}
)
| 24.574468 | 76 | 0.580087 |
ee7c02465494882ca371df7e6fd343b672bedae7 | 1,835 | py | Python | tests/integration/python_modular/distribution.py | srgnuclear/shogun | 33c04f77a642416376521b0cd1eed29b3256ac13 | [
"Ruby",
"MIT"
] | 1 | 2015-11-05T18:31:14.000Z | 2015-11-05T18:31:14.000Z | tests/integration/python_modular/distribution.py | waderly/shogun | 9288b6fa38e001d63c32188f7f847dadea66e2ae | [
"Ruby",
"MIT"
] | null | null | null | tests/integration/python_modular/distribution.py | waderly/shogun | 9288b6fa38e001d63c32188f7f847dadea66e2ae | [
"Ruby",
"MIT"
] | null | null | null | """
Test Distribution
"""
from numpy import inf, nan
from modshogun import *
import util
def _evaluate (indata):
prefix='distribution_'
feats=util.get_features(indata, prefix)
if indata[prefix+'name']=='HMM':
distribution=HMM(feats['train'], indata[prefix+'N'],
indata[prefix+'M'], indata[prefix+'pseudo'])
distribution.train()
distribution.baum_welch_viterbi_train(BW_NORMAL)
else:
dfun=eval(indata[prefix+'name'])
distribution=dfun(feats['train'])
distribution.train()
likelihood=distribution.get_log_likelihood_sample()
num_examples=feats['train'].get_num_vectors()
num_param=distribution.get_num_model_parameters()
derivatives=0
for i in range(num_param):
for j in range(num_examples):
val=distribution.get_log_derivative(i, j)
if val!=-inf and val!=nan: # only consider sparse matrix!
derivatives+=val
derivatives=abs(derivatives-indata[prefix+'derivatives'])
likelihood=abs(likelihood-indata[prefix+'likelihood'])
if indata[prefix+'name']=='HMM':
best_path=0
best_path_state=0
for i in range(indata[prefix+'num_examples']):
best_path+=distribution.best_path(i)
for j in range(indata[prefix+'N']):
best_path_state+=distribution.get_best_path_state(i, j)
best_path=abs(best_path-indata[prefix+'best_path'])
best_path_state=abs(best_path_state-\
indata[prefix+'best_path_state'])
return util.check_accuracy(indata[prefix+'accuracy'],
derivatives=derivatives, likelihood=likelihood,
best_path=best_path, best_path_state=best_path_state)
else:
return util.check_accuracy(indata[prefix+'accuracy'],
derivatives=derivatives, likelihood=likelihood)
########################################################################
# public
########################################################################
def test (indata):
return _evaluate(indata)
| 29.126984 | 72 | 0.695913 |
0757dbfce312a805a07ddff7db02316a283b8aa5 | 78,112 | py | Python | scicite/resources/lexicons.py | se4en/scicite | 67ed3114f0d0d971a501562e4eaa8d13115a04cc | [
"Apache-2.0"
] | 89 | 2019-04-05T03:13:54.000Z | 2022-03-23T05:21:41.000Z | scicite/resources/lexicons.py | se4en/scicite | 67ed3114f0d0d971a501562e4eaa8d13115a04cc | [
"Apache-2.0"
] | 15 | 2019-04-07T03:01:31.000Z | 2021-09-22T09:09:07.000Z | scicite/resources/lexicons.py | se4en/scicite | 67ed3114f0d0d971a501562e4eaa8d13115a04cc | [
"Apache-2.0"
] | 8 | 2019-04-10T02:05:28.000Z | 2021-08-31T08:33:05.000Z | # Based on (Jurgens et al 2018)
# pylint: disable=line-too-long
# These include manual lexicons defined by Teufel 2006 and Jurgens 2018
# that help identifying the citation intents.
# They also include linguistic patterns and rules that are leveraged as features for the classifier
# From: Jurgens et al. (2018). Measuring the Evolution of a Scientific Field through Citation Frames.
# Transactions of the Association for Computational Linguistics, 6, 391-406.
ALL_LEXICONS = \
{
"AFFECT": ["afford","believe","decide","feel","hope","imagine","regard","trust","think"],
"ARGUMENTATION": ["agree","accept","advocate","argue","claim","conclude","comment","defend","embrace","hypothesize","imply","insist","posit","postulate","reason","recommend","speculate","stipulate","suspect"],
"AWARE": ["be unaware","be familiar with","be aware","be not aware","know of"],
"BETTER_SOLUTION": ["boost","enhance","defeat","improve","go beyond","perform better","outperform","outweigh","surpass"],
"CHANGE": ["adapt","adjust","augment","combine","change","decrease","elaborate","expand","expand on","extend","derive","incorporate","increase","manipulate","modify","optimize","optimise","refine","render","replace","revise","substitute","tailor","upgrade"],
"COMPARISON": ["compare","compete","evaluate","test"],
"DENOTATION": ["be","denote","represent"],
"CONTINUE": ["adopt","base","be base on","base on","derive from","originate in","borrow","build on","follow","following","originate from","originate in","start from","proceed from"],
"CONTRAST": ["be different from","be distinct from","conflict","contrast","clash","differ from","distinguish","differentiate","disagree","disagreeing","dissent","oppose"],
"FUTURE_INTEREST": ["be interest in","plan on","plan to","expect to","intend to","hope to"],
"HEDGING_MODALS": ["could","might","may","should"],
"INCREASE": ["increase","grow","intensify","build up","explode"],
"INTEREST": ["aim","ask","address","attempt","be concern","be interest","be motivat","concern","concern","concern","consider","concentrate on","explore","focus","intend to","like to","look at how","pursue","seek","study","try","target","want","wish","wonder"],
"NEED": ["be dependent on","be reliant on","depend on","lack","need","necessitate","require","rely on"],
"PRESENTATION": ["describe","discuss","give","introduce","note","notice","point out","present","propose","put forward","recapitulate","remark","report","say","show","sketch","state","suggest","talk about"],
"PROBLEM": ["abound","aggravate","arise","be cursed","be incapable of","be force to","be limite to","be problematic","be restrict to","be trouble","be unable to","contradict","damage","degrade","degenerate","fail","fall prey","fall short","force","force","hinder","impair","impede","inhibit","misclassify","misjudge","mistake","misuse","neglect","obscure","overestimate","over-estimate","overfit","over-fit","overgeneralize","over-generalize","overgeneralise","over-generalise","overgenerate","over-generate","overlook","pose","plague","preclude","prevent","remain","resort to","restrain","run into","settle for","spoil","suffer from","threaten","thwart","underestimate","under-estimate","undergenerate","under-generate","violate","waste","worsen"],
"RESEARCH": ["apply","analyze","analyse","build","calculate","categorize","categorise","characterize","characterise","choose","check","classify","collect","compose","compute","conduct","confirm","construct","count","define","delineate","design","detect","determine","equate","estimate","examine","expect","formalize","formalise","formulate","gather","identify","implement","indicate","inspect","integrate","interpret","investigate","isolate","maximize","maximise","measure","minimize","minimise","observe","predict","realize","realise","reconfirm","revalidate","simulate","select","specify","test","verify","work on"],
"SEE": ["see","view","treat","consider"],
"SIMILAR": ["bear comparison","be analogous to","be alike","be related to","be closely relate to","be reminiscent of","be the same as","be similar to","be in a similar vein to","have much in common with","have a lot in common with","pattern with","resemble"],
"SOLUTION": ["accomplish","account for","achieve","apply to","answer","alleviate","allow for","allow","allow","avoid","benefit","capture","clarify","circumvent","contribute","cope with","cover","cure","deal with","demonstrate","develop","devise","discover","elucidate","escape","explain","fix","gain","go a long way","guarantee","handle","help","implement","justify","lend itself","make progress","manage","mend","mitigate","model","obtain","offer","overcome","perform","preserve","prove","provide","realize","realise","rectify","refrain from","remedy","resolve","reveal","scale up","sidestep","solve","succeed","tackle","take care of","take into account","treat","warrant","work well","yield"],
"TEXTSTRUCTURE": ["begin by","illustrate","conclude by","organize","organise","outline","return to","review","start by","structure","summarize","summarise","turn to"],
"USE": ["apply","employ","use","make use","utilize","implement","resort to"],
"NEGATION": ["no","not","nor","non","neither","none","never","aren't","can't","cannot","hadn't","hasn't","haven't","isn't","didn't","don't","doesn't","n't","wasn't","weren't","nothing","nobody","less","least","little","scant","scarcely","rarely","hardly","few","rare","unlikely"],
"3RD_PERSON_PRONOUN_(NOM)": ["they","he","she","theirs","hers","his"],
"OTHERS_NOM": ["they","he","she","theirs","hers","his"],
"3RD_PERSON)PRONOUN_(ACC)": ["her","him","them"],
"OTHERS_ACC": ["her","him","them"],
"3RD_POSS_PRONOUN": ["their","his","her"],
"OTHERS_POSS": ["their","his","her","they"],
"3RD_PERSON_REFLEXIVE": ["themselves","himself","herself"],
"1ST_PERSON_PRONOUN_(NOM)": ["we","i","ours","mine"],
"SELF_NOM": ["we","i","ours","mine"],
"REFERENTIAL": ["this","that","those","these"],
"REFLEXIVE": ["itself ourselves","myself","themselves","himself","herself"],
"QUESTION": ["?","how","why","whether","wonder"],
"GIVEN": ["noted","mentioned","addressed","illustrated","described","discussed","given","outlined","presented","proposed","reported","shown","taken"],
"PROFESSIONALS": ["collegues","community","computer scientists","computational linguists","discourse analysts","expert","investigators","linguists","logicians","philosophers","psycholinguists","psychologists","researchers","scholars","semanticists","scientists"],
"DISCIPLINE": ["computerscience","computer linguistics","computational linguistics","discourse analysis","logics","linguistics","psychology","psycholinguistics","philosophy","semantics","lexical semantics","several disciplines","various disciplines"],
"TEXT_NOUN": ["paragraph","section","subsection","chapter"],
"SIMILAR_ADJ": ["similar","comparable","analogous","kindred"],
"COMPARISON_NOUN": ["accuracy","baseline","comparison","competition","evaluation","inferiority","measure","measurement","performance","precision","optimum","recall","superiority"],
"CONTRAST_NOUN": ["contrast","conflict","clash","clashes","difference","point of departure"],
"AIM_NOUN": ["aim","direction","goal","intention","objective","purpose","task","theme","topic"],
"ARGUMENTATION_NOUN": ["assumption","belief","hypothesis","hypotheses","claim","conclusion","confirmation","opinion","recommendation","stipulation","view"],
"PROBLEM_NOUN": ["Achilles heel","caveat","challenge","complication","contradiction","damage","danger","deadlock","defect","detriment","difficulty","dilemma","disadvantage","disregard","doubt","downside","drawback","error","failure","fault","foil","flaw","handicap","hindrance","hurdle","ill","inflexibility","impediment","imperfection","intractability","inefficiency","inadequacy","inability","lapse","limitation","malheur","mishap","mischance","mistake","obstacle","oversight","pitfall","problem","shortcoming","threat","trouble","vulnerability","absence","dearth","deprivation","lack","loss","fraught","proliferation","spate"],
"QUESTION_NOUN": ["question","conundrum","enigma","paradox","phenomena","phenomenon","puzzle","riddle"],
"SOLUTION_NOUN": ["answer","accomplishment","achievement","advantage","benefit","breakthrough","contribution","explanation","idea","improvement","innovation","insight","justification","proposal","proof","remedy","solution","success","triumph","verification","victory"],
"RESEARCH_NOUN": ["evidence","experiment","finding","progress","observation","outcome","result"],
"RESULT_NOUN": ["evidence","experiment","finding","progress","observation","outcome","result"],
"METRIC_NOUN": ["bleu","F-score","F1-score","F score","F1 score","precision","recall","accuracy","correlation"],
"CHANGE_NOUN": ["adaptation","enhancement","extension","generalization","development","modification","refinement","version","variant","variation"],
"PRESENTATION_NOUN": ["article","draft","manuscript","paper","project","report","study"],
"WORK_NOUN": ["account","algorithm","analysis","analyses","approach","approaches","application","architecture","characterization","characterisation","component","design","extension","formalism","formalization","formalisation","framework","implementation","investigation","machinery","method","methodology","model","module","moduls","process","procedure","program","prototype","research","researches","strategy","system","technique","theory","tool","treatment","work"],
"TRADITION_NOUN": ["acceptance","community","convention","disciples","disciplines","folklore","literature","mainstream","school","tradition","textbook"],
"GOOD_ADJ": ["adequate","advantageous","appealing","appropriate","attractive","automatic","beneficial","capable","cheerful","clean","clear","compact","compelling","competitive","comprehensive","consistent","convenient","convincing","constructive","correct","desirable","distinctive","efficient","effective","elegant","encouraging","exact","faultless","favourable","feasible","flawless","good","helpful","impeccable","innovative","insightful","intensive","meaningful","neat","perfect","plausible","positive","polynomial","powerful","practical","preferable","precise","principled","promising","pure","realistic","reasonable","reliable","right","robust","satisfactory","simple","sound","successful","sufficient","systematic","tractable","usable","useful","valid","unlimited","well worked out","well","enough","well-motivated"],
"BEFORE_ADJ": ["earlier","initial","past","previous","prior"],
"CONTRAST_ADJ": ["different","distinguishing","contrary","competing","rival"],
"CONTRAST_ADV": ["differently","distinguishingly","contrarily","otherwise","other than","contrastingly","imcompatibly","on the other hand"],
"TRADITION_ADJ": ["better known","better-known","cited","classic","common","conventional","current","customary","established","existing","extant","available","favourite","fashionable","general","obvious","long-standing","mainstream","modern","naive","orthodox","popular","prevailing","prevalent","published","quoted","seminal","standard","textbook","traditional","trivial","typical","well-established","well-known","widelyassumed","unanimous","usual"],
"MANY": ["a number of","a body of","a substantial number of","a substantial body of","most","many","several","various"],
"HELP_NOUN": ["help","aid","assistance","support"],
"GRAPHIC_NOUN": ["table","tab","figure","fig","example"],
"COMPARISON_ADJ": ["evaluative","superior","inferior","optimal","better","best","worse","worst","greater","larger","faster","weaker","stronger"],
"PROBLEM_ADJ": ["demanding","difficult","hard","non-trivial","nontrivial"],
"RESEARCH_ADJ": ["empirical","experimental","exploratory","ongoing","quantitative","qualitative","preliminary","statistical","underway"],
"AWARE_ADJ": ["unnoticed","understood","unexplored"],
"NEED_ADJ": ["necessary","indispensable","requisite"],
"NEW_ADJ": ["new","novel","state-of-the-art","state of the art","leading-edge","leading edge","enhanced"],
"HEDGE_ADJ": ["possible","potential","conceivable","viable"],
"MAIN_ADJ": ["main","key","basic","central","crucial","critical","essential","eventual","fundamental","great","important","key","largest","main","major","overall","primary","principle","serious","substantial","ultimate"],
"CURRENT_ADV": ["currently","presently","at present"],
"TEMPORAL_ADV": ["finally","briefly","next"],
"STARSEM_NEGATION": ["contrary","without","n't","none","nor","nothing","nowhere","refused","nobody","means","never","neither","absence","except","rather","no","for","fail","not","neglected","less","prevent"],
"DOWNTONERS": ["almost","barely","hardly","merely","mildly","nearly","only","partially","partly","practically","scarcely","slightly","somewhat"],
"AMPLIFIERS": ["absolutely","altogether","completely","enormously","entirely","extremely","fully","greatly","highly","intensely","strongly","thoroughly","totally","utterly","very"],
"PUBLIC_VERBS": ["acknowledge","admit","agree","assert","claim","complain","declare","deny","explain","hint","insist","mention","proclaim","promise","protest","remark","reply","report","say","suggest","swear","write"],
"PRIVATE_VERBS": ["anticipate","assume","believe","conclude","decide","demonstrate","determine","discover","doubt","estimate","fear","feel","find","forget","guess","hear","hope","imagine","imply","indicate","infer","know","learn","mean","notice","prove","realize","recognize","remember","reveal","see","show","suppose","think","understand"],
"SUASIVE_VERBS": ["agree","arrange","ask","beg","command","decide","demand","grant","insist","instruct","ordain","pledge","pronounce","propose","recommend","request","stipulate","suggest","urge"]
}
ALL_ACTION_LEXICONS = {
"AFFECT": ["afford", "believe", "decide", "feel", "hope", "imagine", "regard", "trust", "think"],
"ARGUMENTATION": ["agree", "accept", "advocate", "argue", "claim", "conclude", "comment", "defend", "embrace", "hypothesize", "imply", "insist", "posit", "postulate", "reason", "recommend", "speculate", "stipulate", "suspect"],
"AWARE": ["be unaware", "be familiar with", "be aware", "be not aware", "know of"],
"BETTER_SOLUTION": ["boost", "enhance", "defeat", "improve", "go beyond", "perform better", "outperform", "outweigh", "surpass"],
"CHANGE": ["adapt", "adjust", "augment", "combine", "change", "decrease", "elaborate", "expand", "expand on", "extend", "derive", "incorporate", "increase", "manipulate", "modify", "optimize", "optimise", "refine", "render", "replace", "revise", "substitute", "tailor", "upgrade"],
"COMPARISON": ["compare", "compete", "evaluate", "test"],
"DENOTATION": ["be", "denote", "represent"],
"INSPIRATION": ["inspire", "motivate"],
"AGREE": ["agree with", "side with"],
"CONTINUE": ["adopt", "base", "be base on", 'base on', "derive from", "originate in", "borrow", "build on", "follow", "following", "originate from", "originate in", 'start from', 'proceed from'],
"CONTRAST": ["be different from", "be distinct from", "conflict", "contrast", "clash", "differ from", "distinguish", "differentiate", "disagree", "disagreeing", "dissent", "oppose"],
"FUTURE_INTEREST": ["be interest in", "plan on", "plan to", "expect to", "intend to", "hope to"],
"HEDGING_MODALS": ["could", "might", "may", "should"],
"FUTURE_MODALS": ["will", "going to"],
"SHOULD": ["should"],
"INCREASE": ["increase", "grow", "intensify", "build up", "explode"],
"INTEREST": ["aim", "ask", "address", "attempt", "be concern", "be interest", "be motivat", "concern", "concern", "concern", "consider", "concentrate on", "explore", "focus", "intend to", "like to", "look at how", "pursue", "seek", "study", "try", "target", "want", "wish", "wonder"],
"NEED": ["be dependent on", "be reliant on", "depend on", "lack", "need", "necessitate", "require", "rely on"],
"PRESENTATION": ["describe", "discuss", "give", "introduce", "note", "notice", "point out", "present", "propose", "put forward", "recapitulate", "remark", "report", "say", "show", "sketch", "state", "suggest", "talk about"],
"PROBLEM": ["abound", "aggravate", "arise", "be cursed", "be incapable of", "be force to", "be limite to", "be problematic", "be restrict to", "be trouble", "be unable to", "contradict", "damage", "degrade", "degenerate", "fail", "fall prey", "fall short", "force", "force", "hinder", "impair", "impede", "inhibit", "misclassify", "misjudge", "mistake", "misuse", "neglect", "obscure", "overestimate", "over-estimate", "overfit", "over-fit", "overgeneralize", "over-generalize", "overgeneralise", "over-generalise", "overgenerate", "over-generate", "overlook", "pose", "plague", "preclude", "prevent", "remain", "resort to", "restrain", "run into", "settle for", "spoil", "suffer from", "threaten", "thwart", "underestimate", "under-estimate", "undergenerate", "under-generate", "violate", "waste", "worsen"],
"RESEARCH": ["apply", "analyze", "analyse", "build", "calculate", "categorize", "categorise", "characterize", "characterise", "choose", "check", "classify", "collect", "compose", "compute", "conduct", "confirm", "construct", "count", "define", "delineate", "design", "detect", "determine", "equate", "estimate", "examine", "expect", "formalize", "formalise", "formulate", "gather", "identify", "implement", "indicate", "inspect", "integrate", "interpret", "investigate", "isolate", "maximize", "maximise", "measure", "minimize", "minimise", "observe", "predict", "realize", "realise", "reconfirm", "revalidate", "simulate", "select", "specify", "test", "verify", "work on"],
"SEE": ["see", "view", "treat", "consider"],
"SIMILAR": ["bear comparison", "be analogous to", "be alike", "be related to", "be closely relate to", "be reminiscent of", "be the same as", "be similar to", "be in a similar vein to", "have much in common with", "have a lot in common with", "pattern with", "resemble"],
"SOLUTION": ["accomplish", "account for", "achieve", "apply to", "answer", "alleviate", "allow for", "allow", "allow", "avoid", "benefit", "capture", "clarify", "circumvent", "contribute", "cope with", "cover", "cure", "deal with", "demonstrate", "develop", "devise", "discover", "elucidate", "escape", "explain", "fix", "gain", "go a long way", "guarantee", "handle", "help", "implement", "justify", "lend itself", "make progress", "manage", "mend", "mitigate", "model", "obtain", "offer", "overcome", "perform", "preserve", "prove", "provide", "realize", "realise", "rectify", "refrain from", "remedy", "resolve", "reveal", "scale up", "sidestep", "solve", "succeed", "tackle", "take care of", "take into account", "treat", "warrant", "work well", "yield"],
"TEXTSTRUCTURE": ["begin by", "illustrate", "conclude by", "organize", "organise", "outline", "return to", "review", "start by", "structure", "summarize", "summarise", "turn to"],
"USE": ["apply", "employ", "use", "make use", "utilize", "implement", 'resort to']
}
ALL_CONCEPT_LEXICONS = {
"NEGATION": ["no", "not", "nor", "non", "neither", "none", "never", "aren't", "can't", "cannot", "hadn't", "hasn't", "haven't", "isn't", "didn't", "don't", "doesn't", "n't", "wasn't", "weren't", "nothing", "nobody", "less", "least", "little", "scant", "scarcely", "rarely", "hardly", "few", "rare", "unlikely"],
"3RD_PERSON_PRONOUN_(NOM)": ["they", "he", "she", "theirs", "hers", "his"],
"OTHERS_NOM": ["they", "he", "she", "theirs", "hers", "his"],
"3RD_PERSON)PRONOUN_(ACC)": ["her", "him", "them"],
"OTHERS_ACC": ["her", "him", "them"],
"3RD_POSS_PRONOUN": ["their", "his", "her"],
# "OTHERS_POSS": ["their", "his", "her"],
"OTHERS_POSS": ["their", "his", "her", "they"],
"3RD_PERSON_REFLEXIVE": ["themselves", "himself", "herself"],
"1ST_PERSON_PRONOUN_(NOM)": ["we", "i", "ours", "mine"],
"SELF_NOM": ["we", "i", "ours", "mine"],
"1ST_PERSON_PRONOUN_(ACC)": ["us", "me"],
"SELF_ACC": ["us", "me"],
"1ST_POSS_PRONOUN": ["my", "our"],
"SELF_POSS": ["my", "our"],
"1ST_PERSON_REFLEXIVE ": ["ourselves", "myself"],
"REFERENTIAL": ["this", "that", "those", "these"],
"REFLEXIVE": ["itself ourselves", "myself", "themselves", "himself", "herself"],
"QUESTION": ["?", "how", "why", "whether", "wonder"],
"GIVEN": ["noted", "mentioned", "addressed", "illustrated", "described", "discussed", "given", "outlined", "presented", "proposed", "reported", "shown", "taken"],
"PROFESSIONALS": ["collegues", "community", "computer scientists", "computational linguists", "discourse analysts", "expert", "investigators", "linguists", "logicians", "philosophers", "psycholinguists", "psychologists", "researchers", "scholars", "semanticists", "scientists"],
"DISCIPLINE": ["computerscience", "computer linguistics", "computational linguistics", "discourse analysis", "logics", "linguistics", "psychology", "psycholinguistics", "philosophy", "semantics", "lexical semantics", "several disciplines", "various disciplines"],
"TEXT_NOUN": ["paragraph", "section", "subsection", "chapter"],
"SIMILAR_NOUN": ["analogy", "similarity"],
"SIMILAR_ADJ": ["similar", "comparable", "analogous", "kindred"],
"COMPARISON_NOUN": ["accuracy", "baseline", "comparison", "competition", "evaluation", "inferiority", "measure", "measurement", "performance", "precision", "optimum", "recall", "superiority"],
"CONTRAST_NOUN": ["contrast", "conflict", "clash", "clashes", "difference", "point of departure"],
"AIM_NOUN": ["aim", "direction", "goal", "intention", "objective", "purpose", "task", "theme", "topic"],
"ARGUMENTATION_NOUN": ["assumption", "belief", "hypothesis", "hypotheses", "claim", "conclusion", "confirmation", "opinion", "recommendation", "stipulation", "view"],
"PROBLEM_NOUN": ["Achilles heel", "caveat", "challenge", "complication", "contradiction", "damage", "danger", "deadlock", "defect", "detriment", "difficulty", "dilemma", "disadvantage", "disregard", "doubt", "downside", "drawback", "error", "failure", "fault", "foil", "flaw", "handicap", "hindrance", "hurdle", "ill", "inflexibility", "impediment", "imperfection", "intractability", "inefficiency", "inadequacy", "inability", "lapse", "limitation", "malheur", "mishap", "mischance", "mistake", "obstacle", "oversight", "pitfall", "problem", "shortcoming", "threat", "trouble", "vulnerability", "absence", "dearth", "deprivation", "lack", "loss", "fraught", "proliferation", "spate"],
"QUESTION_NOUN": ["question", "conundrum", "enigma", "paradox", "phenomena", "phenomenon", "puzzle", "riddle"],
"SOLUTION_NOUN": ["answer", "accomplishment", "achievement", "advantage", "benefit", "breakthrough", "contribution", "explanation", "idea", "improvement", "innovation", "insight", "justification", "proposal", "proof", "remedy", "solution", "success", "triumph", "verification", "victory"],
"INTEREST_NOUN": ["attention", "quest"],
# Not sure if this one is used
"RESEARCH_NOUN": ["evidence", "experiment", "finding", "progress", "observation", "outcome", "result"],
"RESULT_NOUN": ["evidence", "experiment", "finding", "progress", "observation", "outcome", "result"],
"METRIC_NOUN": ["bleu", "F-score", "F1-score", "F score", "F1 score", "precision", "recall", "accuracy", "correlation"],
"CHANGE_NOUN": ["adaptation", "enhancement", "extension", "generalization", "development", "modification", "refinement", "version", "variant", "variation"],
"PRESENTATION_NOUN": ["article", "draft", "manuscript", "paper", "project", "report", "study"],
"NEED_NOUN": ["necessity", "motivation"],
"WORK_NOUN": ["account", "algorithm", "analysis", "analyses", "approach", "approaches", "application", "architecture", "characterization", "characterisation", "component", "design", "extension", "formalism", "formalization", "formalisation", "framework", "implementation", "investigation", "machinery", "method", "methodology", "model", "module", "moduls", "process", "procedure", "program", "prototype", "research", "researches", "strategy", "system", "technique", "theory", "tool", "treatment", "work"],
"TRADITION_NOUN": ["acceptance", "community", "convention", "disciples", "disciplines", "folklore", "literature", "mainstream", "school", "tradition", "textbook"],
"CHANGE_ADJ": ["alternate", "alternative"],
"GOOD_ADJ": ["adequate", "advantageous", "appealing", "appropriate", "attractive", "automatic", "beneficial", "capable", "cheerful", "clean", "clear", "compact", "compelling", "competitive", "comprehensive", "consistent", "convenient", "convincing", "constructive", "correct", "desirable", "distinctive", "efficient", "effective", "elegant", "encouraging", "exact", "faultless", "favourable", "feasible", "flawless", "good", "helpful", "impeccable", "innovative", "insightful", "intensive", "meaningful", "neat", "perfect", "plausible", "positive", "polynomial", "powerful", "practical", "preferable", "precise", "principled", "promising", "pure", "realistic", "reasonable", "reliable", "right", "robust", "satisfactory", "simple", "sound", "successful", "sufficient", "systematic", "tractable", "usable", "useful", "valid", "unlimited", "well worked out", "well", "enough", "well-motivated"],
"BAD_ADJ": ["absent", "ad-hoc", "adhoc", "ad hoc", "annoying", "ambiguous", "arbitrary", "awkward", "bad", "brittle", "brute-force", "brute force", "careless", "confounding", "contradictory", "defect", "defunct", "disturbing", "elusive", "erraneous", "expensive", "exponential", "false", "fallacious", "frustrating", "haphazard", "ill-defined", "imperfect", "impossible", "impractical", "imprecise", "inaccurate", "inadequate", "inappropriate", "incomplete", "incomprehensible", "inconclusive", "incorrect", "inelegant", "inefficient", "inexact", "infeasible", "infelicitous", "inflexible", "implausible", "inpracticable", "improper", "insufficient", "intractable", "invalid", "irrelevant", "labour-intensive", "laborintensive", "labour intensive", "labor intensive", "laborious", "limited-coverage", "limited coverage", "limited", "limiting", "meaningless", "modest", "misguided", "misleading", "nonexistent", "NP-hard", "NP-complete", "NP hard", "NP complete", "questionable", "pathological", "poor", "prone", "protracted", "restricted", "scarce", "simplistic", "suspect", "time-consuming", "time consuming", "toy", "unacceptable", "unaccounted for", "unaccounted-for", "unaccounted", "unattractive", "unavailable", "unavoidable", "unclear", "uncomfortable", "unexplained", "undecidable", "undesirable", "unfortunate", "uninnovative", "uninterpretable", "unjustified", "unmotivated", "unnatural", "unnecessary", "unorthodox", "unpleasant", "unpractical", "unprincipled", "unreliable", "unsatisfactory", "unsound", "unsuccessful", "unsuited", "unsystematic", "untractable", "unwanted", "unwelcome", "useless", "vulnerable", "weak", "wrong", "too", "overly", "only"],
"BEFORE_ADJ": ["earlier", "initial", "past", "previous", "prior"],
"CONTRAST_ADJ": ["different", "distinguishing", "contrary", "competing", "rival"],
"CONTRAST_ADV": ["differently", "distinguishingly", "contrarily", "otherwise", "other than", "contrastingly", "imcompatibly", "on the other hand", ],
"TRADITION_ADJ": ["better known", "better-known", "cited", "classic", "common", "conventional", "current", "customary", "established", "existing", "extant", "available", "favourite", "fashionable", "general", "obvious", "long-standing", "mainstream", "modern", "naive", "orthodox", "popular", "prevailing", "prevalent", "published", "quoted", "seminal", "standard", "textbook", "traditional", "trivial", "typical", "well-established", "well-known", "widelyassumed", "unanimous", "usual"],
"MANY": ["a number of", "a body of", "a substantial number of", "a substantial body of", "most", "many", "several", "various"],
"HELP_NOUN": ['help', 'aid', 'assistance', 'support'],
"SENSE_NOUN": ['sense', 'spirit', ],
"GRAPHIC_NOUN": ['table', 'tab', 'figure', 'fig', 'example'],
"COMPARISON_ADJ": ["evaluative", "superior", "inferior", "optimal", "better", "best", "worse", "worst", "greater", "larger", "faster", "weaker", "stronger"],
"PROBLEM_ADJ": ["demanding", "difficult", "hard", "non-trivial", "nontrivial"],
"RESEARCH_ADJ": ["empirical", "experimental", "exploratory", "ongoing", "quantitative", "qualitative", "preliminary", "statistical", "underway"],
"AWARE_ADJ": ["unnoticed", "understood", "unexplored"],
"NEED_ADJ": ["necessary", "indispensable", "requisite"],
"NEW_ADJ": ["new", "novel", "state-of-the-art", "state of the art", "leading-edge", "leading edge", "enhanced"],
"FUTURE_ADJ": ["further", "future"],
"HEDGE_ADJ": ["possible", "potential", "conceivable", "viable"],
"MAIN_ADJ": ["main", "key", "basic", "central", "crucial", "critical", "essential", "eventual", "fundamental", "great", "important", "key", "largest", "main", "major", "overall", "primary", "principle", "serious", "substantial", "ultimate"],
"CURRENT_ADV": ["currently", "presently", "at present"],
"TEMPORAL_ADV": ["finally", "briefly", "next"],
# "SPECULATION": [],
# "CONTRARY": [],
# "SUBJECTIVITY": [],
"STARSEM_NEGATION": ["contrary", "without", "n't", "none", "nor", "nothing", "nowhere", "refused", "nobody", "means", "never", "neither", "absence", "except", "rather", "no", "for", "fail", "not", "neglected", "less", "prevent",
],
'DOWNTONERS': ['almost', 'barely', 'hardly', 'merely', 'mildly', 'nearly', 'only', 'partially', 'partly', 'practically', 'scarcely', 'slightly', 'somewhat', ],
'AMPLIFIERS': ['absolutely', 'altogether', 'completely', 'enormously', 'entirely', 'extremely', 'fully', 'greatly', 'highly', 'intensely', 'strongly', 'thoroughly', 'totally', 'utterly', 'very', ],
'PUBLIC_VERBS': ['acknowledge', 'admit', 'agree', 'assert', 'claim', 'complain', 'declare', 'deny', 'explain', 'hint', 'insist', 'mention', 'proclaim', 'promise', 'protest', 'remark', 'reply', 'report', 'say', 'suggest', 'swear', 'write', ],
'PRIVATE_VERBS': ['anticipate', 'assume', 'believe', 'conclude', 'decide', 'demonstrate', 'determine', 'discover', 'doubt', 'estimate', 'fear', 'feel', 'find', 'forget', 'guess', 'hear', 'hope', 'imagine', 'imply', 'indicate', 'infer', 'know', 'learn', 'mean', 'notice', 'prove', 'realize', 'recognize', 'remember', 'reveal', 'see', 'show', 'suppose', 'think', 'understand', ],
'SUASIVE_VERBS': ['agree', 'arrange', 'ask', 'beg', 'command', 'decide', 'demand', 'grant', 'insist', 'instruct', 'ordain', 'pledge', 'pronounce', 'propose', 'recommend', 'request', 'stipulate', 'suggest', 'urge', ]
}
FORMULAIC_PATTERNS = {
"GENERAL_FORMULAIC": ["in @TRADITION_ADJ #JJ @WORK_NOUN",
"in @TRADITION_ADJ used @WORK_NOUN",
"in @TRADITION_ADJ @WORK_NOUN",
"in @MANY #JJ @WORK_NOUN",
"in @MANY @WORK_NOUN",
"in @BEFORE_ADJ #JJ @WORK_NOUN",
"in @BEFORE_ADJ @WORK_NOUN",
"in other #JJ @WORK_NOUN",
#"in other @WORK_NOUN",
"in such @WORK_NOUN"
],
"THEM_FORMULAIC": ["according to CITATION",
"like CITATION",
"such as CITATION",
"CITATION style",
"a la CITATION",
"CITATION - style"],
"US_PREVIOUS_FORMULAIC": ["@SELF_NOM have previously",
"@SELF_NOM have earlier"
"@SELF_NOM have elsewhere",
"@SELF_NOM elsewhere",
"@SELF_NOM previously",
"@SELF_NOM earlier",
"elsewhere @SELF_NOM",
"elswhere @SELF_NOM",
"elsewhere , @SELF_NOM",
"elswhere , @SELF_NOM",
"presented elswhere",
"presented elsewhere",
"@SELF_NOM have @ARGUMENTATION elsewhere",
"@SELF_NOM have @SOLUTION elsewhere",
"@SELF_NOM have argue elsewhere",
"@SELF_NOM have show elswhere_NOM",
"@SELF_NOM have argue elswhere_NOM",
"@SELF_NOM will show elsewhere",
"@SELF_NOM will show elswhere",
"@SELF_NOM will argue elsewhere",
"@SELF_NOM will argue elswhere",
"elsewhere SELFCITE",
"elswhere SELFCITE",
"in a @BEFORE_ADJ @PRESENTATION_NOUN",
"in an earlier @PRESENTATION_NOUN",
"another @PRESENTATION_NOUN"],
"TEXTSTRUCTURE_FORMULAIC": ["then @SELF_NOM describe",
"then , @SELF_NOM describe",
"next @SELF_NOM describe",
"next , @SELF_NOM describe",
"finally @SELF_NOM describe",
"finally , @SELF_NOM describe",
"then @SELF_NOM present",
"then , @SELF_NOM present",
"next @SELF_NOM present",
"next , @SELF_NOM present",
"finally @SELF_NOM present",
"finally , @SELF_NOM present",
"briefly describe",
"briefly introduce",
"briefly present",
"briefly discuss"],
"HERE_FORMULAIC": ["in this @PRESENTATION_NOUN",
"the present @PRESENTATION_NOUN",
"@SELF_NOM here",
"here @SELF_NOM",
"here , @SELF_NOM",
"@GIVEN here",
"@SELF_NOM now",
"now @SELF_NOM",
"now , @SELF_NOM",
"@GIVEN now",
"herein"],
"METHOD_FORMULAIC": ["a new @WORK_NOUN",
"a novel @WORK_NOUN",
"a @WORK_NOUN of",
"an @WORK_NOUN of",
"a #JJ @WORK_NOUN of",
"an #JJ @WORK_NOUN of",
"a #NN @WORK_NOUN of",
"an #NN @WORK_NOUN of",
"a #JJ #NN @WORK_NOUN of",
"an #JJ #NN @WORK_NOUN of",
"a @WORK_NOUN for",
"an @WORK_NOUN for",
"a #JJ @WORK_NOUN for",
"an #JJ @WORK_NOUN for",
"a #NN @WORK_NOUN for",
"an #NN @WORK_NOUN for",
"a #JJ #NN @WORK_NOUN for",
"an #JJ #NN @WORK_NOUN for",
"@WORK_NOUN design to #VV", # diff
"@WORK_NOUN intend for",
"@WORK_NOUN for #VV",
"@WORK_NOUN for the #NN",
"@WORK_NOUN design to #VV", # diff
"@WORK_NOUN to the #NN",
"@WORK_NOUN to #NN",
"@WORK_NOUN to #VV",
"@WORK_NOUN for #JJ #VV", # diff
"@WORK_NOUN for the #JJ #NN,"
"@WORK_NOUN to the #JJ #NN",
"@WORK_NOUN to #JJ #VV",
"the problem of #RB #VV",
"the problem of #VV", # diff
"the problem of how to"],
"CONTINUE_FORMULAIC": ["follow CITATION",
"follow the @WORK_NOUN of CITATION",
"follow the @WORK_NOUN give in CITATION",
"follow the @WORK_NOUN present in CITATION",
"follow the @WORK_NOUN propose in CITATION",
"follow the @WORK_NOUN discuss in CITATION",
"base on CITATION",
"@CONTINUE CITATION",
"@CONTINUE the @WORK_NOUN",
"@CONTINUE a @WORK_NOUN",
"@CONTINUE an @WORK_NOUN",
"@CONTINUE @OTHERS_POSS @WORK_NOUN",
"@CONTINUE @SELF_POSS @WORK_NOUN",
"@AGREE CITATION",
"@AGREE the @WORK_NOUN",
"@AGREE a @WORK_NOUN",
"@AGREE an @WORK_NOUN",
"@AGREE @OTHERS_POSS @WORK_NOUN",
"@AGREE @SELF_POSS @WORK_NOUN",
"base on the @WORK_NOUN of CITATION",
"base on the @WORK_NOUN give in CITATION",
"base on the @WORK_NOUN present in CITATION",
"base on the @WORK_NOUN propose in CITATION",
"base on the @WORK_NOUN discuss in CITATION",
"adopt CITATION",
"start point for @REFERENTIAL @WORK_NOUN",
"start point for @SELF_POSS @WORK_NOUN",
"as a start point",
"as start point",
"use CITATION", # dif
"base @SELF_POSS",
"support @SELF_POSS",
"support @OTHERS_POSS",
"lend support to @SELF_POSS",
"lend support to @OTHERS_POSS",
# new
"@CONTINUE the @WORK_NOUN of",
"@AGREE the @WORK_NOUN of"
],
"DISCOURSE_CONTRAST_FORMULAIC": ["however",
#"nevertheless",
#"nonetheless",
"unfortunately",
#"yet",
#"although",
"whereas"
],
"GRAPHIC_FORMULAIC": ["@GRAPHIC_NOUN #CD"],
"CONTRAST2_FORMULAIC": ["this @WORK_NOUN @CONTRAST",
"@SELF_POSS @WORK_NOUN @CONTRAST",
"this @PRESENTATION_NOUN @CONTRAST",
"@SELF_POSS @PRESENTATION_NOUN @CONTRAST",
"compare to @OTHERS_POSS @WORK_NOUN",
"compare to @OTHERS_POSS @PRESENTATION_NOUN",
"@OTHERS_POSS @WORK_NOUN @CONTRAST",
"that @WORK_NOUN @CONTRAST",
"that @PRESENTATION_NOUN @CONTRAST",
"@OTHERS_POSS @PRESENTATION_NOUN @CONTRAST",
],
"COMPARISON_FORMULAIC": ["in @COMPARISON with",
"in @COMPARISON to",
"@GIVEN #NN @SIMILAR",
"@SELF_POSS #NN @SIMILAR",
"@SELF_POSS @PRESENTATION @SIMILAR",
"a @SELF_POSS @PRESENTATION @SIMILAR",
"a @SIMILAR_ADJ @WORK_NOUN is",
"be closely relate to",
"be @SIMILAR_ADJ to",
"along the line of CITATION",
],
"CONTRAST_FORMULAIC": ["against CITATATION",
"against @SELF_ACC",
"against @SELF_POSS",
"against @OTHERS_ACC",
"against @OTHERS_POSS",
"against @BEFORE_ADJ @WORK_NOUN",
"against @MANY @WORK_NOUN",
"against @TRADITION_ADJ @WORK_NOUN",
"than CITATATION",
"than @SELF_ACC",
"than @SELF_POSS",
"than @OTHERS_ACC",
"than @OTHERS_POSS",
"than @TRADITION_ADJ @WORK_NOUN",
"than @BEFORE_ADJ @WORK_NOUN",
"than @MANY @WORK_NOUN",
"point of departure from @SELF_POSS",
"points of departure from @OTHERS_POSS",
"advantage over @OTHERS_ACC",
"advantage over @TRADITION_ADJ",
"advantage over @MANY @WORK_NOUN",
"advantage over @BEFORE_ADJ @WORK_NOUN",
"advantage over @OTHERS_POSS",
"advantage over CITATATION",
"advantage to @OTHERS_ACC",
"advantage to @OTHERS_POSS",
"advantage to CITATATION",
"advantage to @TRADITION_ADJ",
"advantage to @MANY @WORK_NOUN",
"advantage to @BEFORE_ADJ @WORK_NOUN",
"benefit over @OTHERS_ACC",
"benefit over @OTHERS_POSS",
"benefit over CITATATION",
"benefit over @TRADITION_ADJ",
"benefit over @MANY @WORK_NOUN",
"benefit over @BEFORE_ADJ @WORK_NOUN",
"difference to CITATATION",
"difference to @TRADITION_ADJ",
"difference to CITATATION",
"difference to @TRADITION_ADJ",
"difference to @MANY @WORK_NOUN",
"difference to @BEFORE_ADJ @WORK_NOUN",
"difference to @OTHERS_ACC",
"difference to @OTHERS_POSS",
"difference to @SELF_ACC",
"difference to @SELF_POSS",
"difference between CITATATION",
"difference between @TRADITION_ADJ",
"difference between @MANY @WORK_NOUN",
"difference between @BEFORE_ADJ @WORK_NOUN",
"difference between @OTHERS_ACC",
"difference between @OTHERS_POSS",
"difference between @SELF_ACC",
"difference between @SELF_POSS",
"contrast with CITATATION",
"contrast with @TRADITION_ADJ",
"contrast with @MANY @WORK_NOUN",
"contrast with @BEFORE_ADJ @WORK_NOUN",
"contrast with @OTHERS_ACC",
"contrast with @OTHERS_POSS",
"contrast with @SELF_ACC",
"contrast with @SELF_POSS",
"unlike @SELF_ACC",
"unlike @SELF_POSS",
"unlike CITATATION",
"unlike @TRADITION_ADJ",
"unlike @BEFORE_ADJ @WORK_NOUN",
"unlike @MANY @WORK_NOUN",
"unlike @OTHERS_ACC",
"unlike @OTHERS_POSS",
"in contrast to @SELF_ACC",
"in contrast to @SELF_POSS",
"in contrast to CITATATION",
"in contrast to @TRADITION_ADJ",
"in contrast to @MANY @WORK_NOUN",
"in contrast to @BEFORE_ADJ @WORK_NOUN",
"in contrast to @OTHERS_ACC",
"in contrast to @OTHERS_POSS",
"as oppose to @SELF_ACC",
"as oppose to @SELF_POSS",
"as oppose to CITATATION",
"as oppose to @TRADITION_ADJ",
"as oppose to @MANY @WORK_NOUN",
"as oppose to @BEFORE_ADJ @WORK_NOUN",
"as oppose to @OTHERS_ACC",
"as oppose to @OTHERS_POSS",
"contrary to @SELF_ACC",
"contrary to @SELF_POSS",
"contrary to CITATATION",
"contrary to @TRADITION_ADJ",
"contrary to @MANY @WORK_NOUN",
"contrary to @BEFORE_ADJ @WORK_NOUN",
"contrary to @OTHERS_ACC",
"contrary to @OTHERS_POSS",
"whereas @SELF_ACC",
"whereas @SELF_POSS",
"whereas CITATATION",
"whereas @TRADITION_ADJ",
"whereas @BEFORE_ADJ @WORK_NOUN",
"whereas @MANY @WORK_NOUN",
"whereas @OTHERS_ACC",
"whereas @OTHERS_POSS",
"compare to @SELF_ACC",
"compare to @SELF_POSS",
"compare to CITATATION",
#"compare to @TRADITION_ADJ",
"compare to @BEFORE_ADJ @WORK_NOUN",
"compare to @MANY @WORK_NOUN",
"compare to @OTHERS_ACC",
"compare to @OTHERS_POSS",
"in comparison to @SELF_ACC",
"in comparison to @SELF_POSS",
"in comparison to CITATATION",
"in comparison to @TRADITION_ADJ",
"in comparison to @MANY @WORK_NOUN",
"in comparison to @BEFORE_ADJ @WORK_NOUN",
"in comparison to @OTHERS_ACC",
"in comparison to @OTHERS_POSS",
"while @SELF_NOM",
"while @SELF_POSS",
"while CITATATION",
"while @TRADITION_ADJ",
"while @BEFORE_ADJ @WORK_NOUN",
"while @MANY @WORK_NOUN",
"while @OTHERS_NOM",
"while @OTHERS_POSS",
"this @WORK_NOUN @COMPARISON",
"@SELF_POSS @WORK_NOUN @COMPARISON",
"this @PRESENTATION_NOUN @COMPARISON",
"@SELF_POSS @PRESENTATION_NOUN @COMPARISON",
"compare to @OTHERS_POSS @WORK_NOUN",
"compare to @OTHERS_POSS @PRESENTATION_NOUN",
"@OTHERS_POSS @WORK_NOUN @COMPARISON",
"that @WORK_NOUN @COMPARISON",
"that @PRESENTATION_NOUN @COMPARISON",
"@OTHERS_POSS @PRESENTATION_NOUN @COMPARISON",
],
"ALIGN_FORMULAIC": ["in the @SENSE_NOUN of CITATION"],
"AFFECT_FORMULAIC": ["hopefully", "thankfully", "fortunately", "unfortunately"],
"GOOD_FORMULAIC": ["@GOOD_ADJ"],
#"BAD_FORMULAIC": [ "@BAD_ADJ" ],
"TRADITION_FORMULAIC": ["@TRADITION_ADJ"],
"IN_ORDER_TO_FORMULAIC": ["in order to"],
"DETAIL_FORMULAIC": ["@SELF_NOM have also",
"@SELF_NOM also",
"this @PRESENTATION_NOUN also",
"this @PRESENTATION_NOUN has also"],
"NO_TEXTSTRUCTURE_FORMULAIC": ["( @TEXT_NOUN CREF )",
"as explain in @TEXT_NOUN CREF",
"as explain in the @BEFORE_ADJ @TEXT_NOUN",
"as @GIVEN early in this @TEXT_NOUN",
"as @GIVEN below",
"as @GIVEN in @TEXT_NOUN CREF",
"as @GIVEN in the @BEFORE_ADJ @TEXT_NOUN",
"as @GIVEN in the next @TEXT_NOUN",
"#NN @GIVEN in @TEXT_NOUN CREF",
"#NN @GIVEN in the @BEFORE_ADJ @TEXT_NOUN",
"#NN @GIVEN in the next @TEXT_NOUN",
"#NN @GIVEN below",
"cf. @TEXT_NOUN CREF",
"cf. @TEXT_NOUN below",
"cf. the @TEXT_NOUN below",
"cf. the @BEFORE_ADJ @TEXT_NOUN",
"cf. @TEXT_NOUN above",
"cf. the @TEXT_NOUN above",
"cfXXX @TEXT_NOUN CREF",
"cfXXX @TEXT_NOUN below",
"cfXXX the @TEXT_NOUN below",
"cfXXX the @BEFORE_ADJ @TEXT_NOUN",
"cfXXX @TEXT_NOUN above",
"cfXXX the @TEXT_NOUN above",
"e. g. , @TEXT_NOUN CREF",
"e. g , @TEXT_NOUN CREF",
"e. g. @TEXT_NOUN CREF",
"e. g @TEXT_NOUN CREF",
"e.g., @TEXT_NOUN CREF",
"e.g. @TEXT_NOUN CREF",
"compare @TEXT_NOUN CREF",
"compare @TEXT_NOUN below",
"compare the @TEXT_NOUN below",
"compare the @BEFORE_ADJ @TEXT_NOUN",
"compare @TEXT_NOUN above",
"compare the @TEXT_NOUN above",
"see @TEXT_NOUN CREF",
"see the @BEFORE_ADJ @TEXT_NOUN",
"recall from the @BEFORE_ADJ @TEXT_NOUN",
"recall from the @TEXT_NOUN above",
"recall from @TEXT_NOUN CREF",
"@SELF_NOM shall see below",
"@SELF_NOM will see below",
"@SELF_NOM shall see in the next @TEXT_NOUN",
"@SELF_NOM will see in the next @TEXT_NOUN",
"@SELF_NOM shall see in @TEXT_NOUN CREF",
"@SELF_NOM will see in @TEXT_NOUN CREF",
"example in @TEXT_NOUN CREF",
"example CREF in @TEXT_NOUN CREF",
"example CREF and CREF in @TEXT_NOUN CREF",
"example in @TEXT_NOUN CREF"],
"USE_FORMULAIC": ["@SELF_NOM @USE",
#"@WORK_NOUN @USE",
"@SELF_NOM @RESEARCH",
#"be @USE to",
#"can be #VV use", #can be /solved/ using
#"@SELF_POSS @WORK_NOUN be @CONTINUE",
#"@SELF_POSS #JJ @WORK_NOUN be @CONTINUE",
"@SOLUTION with the @HELP_NOUN of",
"@SOLUTION with the @WORK_NOUN of",
],
"FUTURE_WORK_FORMULAIC": ["@FUTURE_ADJ @WORK_NOUN",
"@FUTURE_ADJ @AIM_NOUN",
"@FUTURE_ADJ @CHANGE_NOUN",
"a @HEDGE_ADJ @AIM_NOUN",
"one @HEDGE_ADJ @AIM_NOUN",
"#NN be also @HEDGE_ADJ",
"in the future",
"@SELF_NOM @FUTURE_INTEREST",
],
"HEDGING_FORMULAIC": ["@HEDGING_MODALS be @RESEARCH",
"@HEDGING_MODALS be @CHANGE",
"@HEDGING_MODALS be @SOLUTION",
],
"PRESENT_WORK_FORMULAIC": ["@SELF_NOM be @CURRENT_ADV @RESEARCH",
"@SELF_NOM be @RESEARCH @CURRENT_ADV"],
"EXTENDING_WORK_FORMULAIC": ["@CHANGE the @WORK_NOUN",
"@CHANGE this @WORK_NOUN",
"@SELF_POSS @WORK_NOUN be @CHANGE",
"@SELF_POSS #JJ @WORK_NOUN be @CHANGE",
"@SELF_POSS @WORK_NOUN @CHANGE",
"@SELF_POSS #JJ @WORK_NOUN @CHANGE",
"@CHANGE the #JJ @WORK_NOUN",
"@SELF_NOM @CHANGE"
],
"EXTENDING_WORK2_FORMULAIC": ["@SELF_NOM @CHANGE #DD @WORK_NOUN",
"@SELF_POSS @WORK_NOUN @CHANGE",
"@CHANGE from CITATION",
"@CHANGE from #NN of CITATION",
"@SELF_POSS @CHANGE_NOUN of CITATION",
"@SELF_POSS @WORK_NOUN @CONTINUE",
"@SELF_POSS @WORK_NOUN be #DD @CHANGE_NOUN",
"@SELF_POSS @WORK_NOUN be #VV #DD @CHANGE_NOUN",
"#NN be #DD @CHANGE_NOUN of",
"#NN be #DD #JJ @CHANGE_NOUN of",
"#DD #NN @DENOTATION #DD @CHANGE_NOUN of",
"@TEXT_NOUN @CONTINUE CITATION",
"#NN @CONTINUE #NN of CITATION",
"be @SEE as an @CHANGE_NOUN",
"@CHANGE #DD #NN of CITATION",
],
"USEFUL_FORMULAIC": ["have shown @GOOD_ADJ for"],
"MOTIVATING_FORMULAIC": ["as @PRESENTATION in CITATION",
"as @PRESENTATION by CITATION",
"this be a #JJ convention",
"this be a #RB #JJ convention",
"@CONTINUE the #NN result",
"@CONTINUE the #JJ result",
"@AGREE the #NN result",
"@AGREE the #JJ result",
#"@INSPRATION by the #NN result",
#"@INSPIRATION by the #JJ result",
"@INSPIRATION by",
"CITATION have @PRESENTATION that",
"have remain a @PROBLEM_NOUN",
"their importance have @INCREASE",
"#NN be @MAIN_ADJ in",
"#NN be @MAIN_ADJ for",
"it be @MAIN_ADJ not to",
"from CITATION , @SELF_NOM",
"@CONTINUE CITATION, @SELF_NOM",
"@AGREE CITATION, @SELF_NOM",
"@RESEARCH in @DISCIPLINE @PRESENTATION",
"@RESEARCH in #NN @PRESENTATION",
"@RESEARCH in #NN #NN @PRESENTATION",
"@RESEARCH in #JJ #NN @PRESENTATION",
"negative @RESULT_NOUN for",
"negative @RESULT_NOUN that",
# "have be @PRESENTATION", # SUPER NOISY :(
"it be well document",
"it have be well document",
"#NN need to @USE",
"CITATION have @RESEARCH it",
"CITATION have @PRESENTATION that",
"CITATATION @PRESENTATION that",
"CITATATION #RB @PRESENTATION that",
"prove to be @GOOD_ADJ in",
"@PRESENTATION to be @GOOD_ADJ in",
"prove to be @GOOD_ADJ for",
"@PRESENTATION to be @GOOD_ADJ for",
],
"PRIOR_WORK_FORMULAIC": ["@BEFORE_ADJ @PRESENTATION @SELF_NOM",
"@BEFORE_ADJ @PRESENTATION , @SELF_NOM",
"a @BEFORE_ADJ @PRESENTATION @SELF_NOM",
"a @BEFORE_ADJ @PRESENTATION , @SELF_NOM",
"@SELF_POSS @BEFORE_ADJ @PRESENTATION @SELF_NOM",
"@SELF_POSS @BEFORE_ADJ @PRESENTATION , @SELF_NOM",
"@SELF_POSS @BEFORE_ADJ @PRESENTATION CITATION",
"@SELF_POSS @BEFORE_ADJ @PRESENTATION SELFCITATION",
"@BEFORE_ADJ @PRESENTATION CITATION @SELF_NOM",
"@BEFORE_ADJ @PRESENTATION CITATION , @SELF_NOM",
"a @BEFORE_ADJ @PRESENTATION CITATION @SELF_NOM",
"a @BEFORE_ADJ @PRESENTATION CITATION , @SELF_NOM",
"first @PRESENTATION in CITATION",
"@PRESENTATION #RR in CITATION",
"@PRESENTATION #JJ in CITATION",
"@BEFORE_ADJ @CHANGE_NOUN of @SELF_POSS @WORK_NOUN",
"@CHANGE on @BEFORE_ADJ @PRESENTATION @PRESENTATION in SELFCITATION",
"@CHANGE @BEFORE_ADJ @PRESENTATION @PRESENTATION in SELFCITATION",
"@CHANGE @BEFORE_ADJ @PRESENTATION @PRESENTATION in SELFCITATION",
"@CHANGE @BEFORE_ADJ @PRESENTATION @PRESENTATION SELFCITATION",
"@CHANGE on @SELF_POSS @BEFORE_ADJ @PRESENTATION @PRESENTATION in SELFCITATION",
"@CHANGE @SELF_POSS @BEFORE_ADJ @PRESENTATION @PRESENTATION in SELFCITATION",
"@CHANGE @SELF_POSS @BEFORE_ADJ @PRESENTATION @PRESENTATION in SELFCITATION",
"@CHANGE @SELF_POSS @BEFORE_ADJ @PRESENTATION @PRESENTATION SELFCITATION",
"in @SELF_POSS @BEFORE_ADJ @PRESENTATION CITATION",
]
}
AGENT_PATTERNS = {
"US_AGENT": ["@SELF_NOM",
"@SELF_POSS #JJ @WORK_NOUN",
"@SELF_POSS #JJ @PRESENTATION_NOUN",
"@SELF_POSS #JJ @ARGUMENTATION_NOUN",
"@SELF_POSS #JJ @SOLUTION_NOUN",
"@SELF_POSS #JJ @RESULT_NOUN",
"@SELF_POSS @WORK_NOUN",
"@SELF_POSS @PRESENTATION_NOUN",
"@SELF_POSS @ARGUMENTATION_NOUN",
"@SELF_POSS @SOLUTION_NOUN",
"SELF_POSS @RESULT_NOUN",
"@WORK_NOUN @GIVEN here",
"WORK_NOUN @GIVEN below",
"@WORK_NOUN @GIVEN in this @PRESENTATION_NOUN",
"@WORK_NOUN @GIVEN in @SELF_POSS @PRESENTATION_NOUN",
"the @SOLUTION_NOUN @GIVEN here",
"the @SOLUTION_NOUN @GIVEN in this @PRESENTATION_NOUN",
"the first author",
"the second author",
"the third author",
"one of the authors",
"one of us"],
"REF_US_AGENT": ["this @PRESENTATION_NOUN",
"the present @PRESENTATION_NOUN",
"the current @PRESENTATION_NOUN",
"the present #JJ @PRESENTATION_NOUN",
"the current #JJ @PRESENTATION_NOUN",
"the @WORK_NOUN @GIVEN"],
"OUR_AIM_AGENT": ["@SELF_POSS @AIM_NOUN",
"the point of this @PRESENTATION_NOUN",
"the @AIM_NOUN of this @PRESENTATION_NOUN",
"the @AIM_NOUN of the @GIVEN @WORK_NOUN",
"the @AIM_NOUN of @SELF_POSS @WORK_NOUN",
"the @AIM_NOUN of @SELF_POSS @PRESENTATION_NOUN",
"the most @MAIN_ADJ feature of @SELF_POSS @WORK_NOUN",
"contribution of this @PRESENTATION_NOUN",
"contribution of the @GIVEN @WORK_NOUN",
"contribution of @SELF_POSS @WORK_NOUN",
"the question @GIVEN in this PRESENTATION_NOUN",
"the question @GIVEN here",
"@SELF_POSS @MAIN_ADJ @AIM_NOUN",
"@SELF_POSS @AIM_NOUN in this @PRESENTATION_NOUN",
"@SELF_POSS @AIM_NOUN here",
"the #JJ point of this @PRESENTATION_NOUN",
"the #JJ purpose of this @PRESENTATION_NOUN",
"the #JJ @AIM_NOUN of this @PRESENTATION_NOUN",
"the #JJ @AIM_NOUN of the @GIVEN @WORK_NOUN",
"the #JJ @AIM_NOUN of @SELF_POSS @WORK_NOUN",
"the #JJ @AIM_NOUN of @SELF_POSS @PRESENTATION_NOUN",
"the #JJ question @GIVEN in this PRESENTATION_NOUN",
"the #JJ question @GIVEN here"],
"AIM_REF_AGENT": ["its @AIM_NOUN",
"its #JJ @AIM_NOUN",
"@REFERENTIAL #JJ @AIM_NOUN",
"contribution of this @WORK_NOUN",
"the most important feature of this @WORK_NOUN",
"feature of this @WORK_NOUN",
"the @AIM_NOUN",
"the #JJ @AIM_NOUN"],
"US_PREVIOUS_AGENT": ["SELFCITATION",
"this @BEFORE_ADJ @PRESENTATION_NOUN",
"@SELF_POSS @BEFORE_ADJ @PRESENTATION_NOUN",
"@SELF_POSS @BEFORE_ADJ @WORK_NOUN",
"in CITATION , @SELF_NOM",
"in CITATION @SELF_NOM",
"the @WORK_NOUN @GIVEN in SELFCITATION",
"in @BEFORE_ADJ @PRESENTATION CITATION @SELF_NOM",
"in @BEFORE_ADJ @PRESENTATION CITATION , @SELF_NOM",
"in a @BEFORE_ADJ @PRESENTATION CITATION @SELF_NOM",
"in a @BEFORE_ADJ @PRESENTATION CITATION , @SELF_NOM",
],
"REF_AGENT": ["@REFERENTIAL #JJ @WORK_NOUN",
#"@REFERENTIAL @WORK_NOUN",
"this sort of @WORK_NOUN",
"this kind of @WORK_NOUN",
"this type of @WORK_NOUN",
"the current #JJ @WORK_NOUN",
"the current @WORK_NOUN",
"the @WORK_NOUN",
"the @PRESENTATION_NOUN",
"the author",
"the authors"],
"THEM_PRONOUN_AGENT": ["@OTHERS_NOM"],
"THEM_ACTIVE_AGENT": ["CITATION @PRESENTATION"],
"THEM_AGENT": ["CITATION",
"CITATION \"s #NN",
"CITATION \"s @PRESENTATION_NOUN",
"CITATION \"s @WORK_NOUN",
"CITATION \"s @ARGUMENTATION_NOUN",
"CITATION \"s #JJ @PRESENTATION_NOUN",
"CITATION \"s #JJ @WORK_NOUN",
"CITATION \"s #JJ @ARGUMENTATION_NOUN",
"the CITATION @WORK_NOUN",
"the @WORK_NOUN @GIVEN in CITATION",
"the @WORK_NOUN of CITATION",
"@OTHERS_POSS @PRESENTATION_NOUN",
"@OTHERS_POSS @WORK_NOUN",
"@OTHERS_POSS @RESULT_NOUN",
"@OTHERS_POSS @ARGUMENTATION_NOUN",
"@OTHERS_POSS @SOLUTION_NOUN",
"@OTHERS_POSS #JJ @PRESENTATION_NOUN",
"@OTHERS_POSS #JJ @WORK_NOUN",
"@OTHERS_POSS #JJ @RESULT_NOUN",
"@OTHERS_POSS #JJ @ARGUMENTATION_NOUN",
"@OTHERS_POSS #JJ @SOLUTION_NOUN"],
"GAP_AGENT": ["none of these @WORK_NOUN",
"none of those @WORK_NOUN",
"no @WORK_NOUN",
"no #JJ @WORK_NOUN",
"none of these @PRESENTATION_NOUN",
"none of those @PRESENTATION_NOUN",
"no @PRESENTATION_NOUN",
"no #JJ @PRESENTATION_NOUN"],
"GENERAL_AGENT": ["@TRADITION_ADJ #JJ @WORK_NOUN",
"@TRADITION_ADJ use @WORK_NOUN",
"@TRADITION_ADJ @WORK_NOUN",
"@MANY #JJ @WORK_NOUN",
"@MANY @WORK_NOUN",
"@BEFORE_ADJ #JJ @WORK_NOUN",
"@BEFORE_ADJ @WORK_NOUN",
"@BEFORE_ADJ #JJ @PRESENTATION_NOUN",
"@BEFORE_ADJ @PRESENTATION_NOUN",
"other #JJ @WORK_NOUN",
"other @WORK_NOUN",
"such @WORK_NOUN",
"these #JJ @PRESENTATION_NOUN",
"these @PRESENTATION_NOUN",
"those #JJ @PRESENTATION_NOUN",
"those @PRESENTATION_NOUN",
"@REFERENTIAL authors",
"@MANY author",
"researcher in @DISCIPLINE",
"@PROFESSIONALS"],
"PROBLEM_AGENT": ["@REFERENTIAL #JJ @PROBLEM_NOUN",
"@REFERENTIAL @PROBLEM_NOUN",
"the @PROBLEM_NOUN"],
"SOLUTION_AGENT": ["@REFERENTIAL #JJ @SOLUTION_NOUN",
"@REFERENTIAL @SOLUTION_NOUN",
"the @SOLUTION_NOUN",
"the #JJ @SOLUTION_NOUN"],
"TEXTSTRUCTURE_AGENT": ["@TEXT_NOUN CREF",
"@TEXT_NOUN CREF and CREF",
"this @TEXT_NOUN",
"next @TEXT_NOUN",
"next #CD @TEXT_NOUN",
"concluding @TEXT_NOUN",
"@BEFORE_ADJ @TEXT_NOUN",
"@TEXT_NOUN above",
"@TEXT_NOUN below",
"following @TEXT_NOUN",
"remaining @TEXT_NOUN",
"subsequent @TEXT_NOUN",
"following #CD @TEXT_NOUN",
"remaining #CD @TEXT_NOUN",
"subsequent #CD @TEXT_NOUN",
"@TEXT_NOUN that follow",
"rest of this @PRESENTATION_NOUN",
"remainder of this @PRESENTATION_NOUN",
"in @TEXT_NOUN CREF , @SELF_NOM",
"in this @TEXT_NOUN , @SELF_NOM",
"in the next @TEXT_NOUN , @SELF_NOM",
"in @BEFORE_ADJ @TEXT_NOUN , @SELF_NOM",
"in the @BEFORE_ADJ @TEXT_NOUN , @SELF_NOM",
"in the @TEXT_NOUN above , @SELF_NOM",
"in the @TEXT_NOUN below , @SELF_NOM",
"in the following @TEXT_NOUN , @SELF_NOM",
"in the remaining @TEXT_NOUN , @SELF_NOM",
"in the subsequent @TEXT_NOUN , @SELF_NOM",
"in the @TEXT_NOUN that follow , @SELF_NOM",
"in the rest of this @PRESENTATION_NOUN , @SELF_NOM",
"in the remainder of this @PRESENTATION_NOUN , @SELF_NOM",
"below , @SELF_NOM",
"the @AIM_NOUN of this @TEXT_NOUN"]
}
AGENT_PATTERNS = {
'US_AGENT': ['@SELF_NOM',
'@SELF_POSS #JJ @WORK_NOUN',
'@SELF_POSS #JJ @PRESENTATION_NOUN',
'@SELF_POSS #JJ @ARGUMENTATION_NOUN',
'@SELF_POSS #JJ @SOLUTION_NOUN',
'@SELF_POSS #JJ @RESULT_NOUN',
'@SELF_POSS @WORK_NOUN',
'@SELF_POSS @PRESENTATION_NOUN',
'@SELF_POSS @ARGUMENTATION_NOUN',
'@SELF_POSS @SOLUTION_NOUN',
'SELF_POSS @RESULT_NOUN',
'@WORK_NOUN @GIVEN here',
'WORK_NOUN @GIVEN below',
'@WORK_NOUN @GIVEN in this @PRESENTATION_NOUN',
'@WORK_NOUN @GIVEN in @SELF_POSS @PRESENTATION_NOUN',
'the @SOLUTION_NOUN @GIVEN here',
'the @SOLUTION_NOUN @GIVEN in this @PRESENTATION_NOUN',
'the first author',
'the second author',
'the third author',
'one of the authors',
'one of us'],
'REF_US_AGENT': ['this @PRESENTATION_NOUN',
'the present @PRESENTATION_NOUN',
'the current @PRESENTATION_NOUN',
'the present #JJ @PRESENTATION_NOUN',
'the current #JJ @PRESENTATION_NOUN',
'the @WORK_NOUN @GIVEN'],
'OUR_AIM_AGENT': ['@SELF_POSS @AIM_NOUN',
'the point of this @PRESENTATION_NOUN',
'the @AIM_NOUN of this @PRESENTATION_NOUN',
'the @AIM_NOUN of the @GIVEN @WORK_NOUN',
'the @AIM_NOUN of @SELF_POSS @WORK_NOUN',
'the @AIM_NOUN of @SELF_POSS @PRESENTATION_NOUN',
'the most @MAIN_ADJ feature of @SELF_POSS @WORK_NOUN',
'contribution of this @PRESENTATION_NOUN',
'contribution of the @GIVEN @WORK_NOUN',
'contribution of @SELF_POSS @WORK_NOUN',
'the question @GIVEN in this PRESENTATION_NOUN',
'the question @GIVEN here',
'@SELF_POSS @MAIN_ADJ @AIM_NOUN',
'@SELF_POSS @AIM_NOUN in this @PRESENTATION_NOUN',
'@SELF_POSS @AIM_NOUN here',
'the #JJ point of this @PRESENTATION_NOUN',
'the #JJ purpose of this @PRESENTATION_NOUN',
'the #JJ @AIM_NOUN of this @PRESENTATION_NOUN',
'the #JJ @AIM_NOUN of the @GIVEN @WORK_NOUN',
'the #JJ @AIM_NOUN of @SELF_POSS @WORK_NOUN',
'the #JJ @AIM_NOUN of @SELF_POSS @PRESENTATION_NOUN',
'the #JJ question @GIVEN in this PRESENTATION_NOUN',
'the #JJ question @GIVEN here'],
'AIM_REF_AGENT': ['its @AIM_NOUN',
'its #JJ @AIM_NOUN',
'@REFERENTIAL #JJ @AIM_NOUN',
'contribution of this @WORK_NOUN',
'the most important feature of this @WORK_NOUN',
'feature of this @WORK_NOUN',
'the @AIM_NOUN',
'the #JJ @AIM_NOUN'],
'US_PREVIOUS_AGENT': ['SELFCITATION',
'this @BEFORE_ADJ @PRESENTATION_NOUN',
'@SELF_POSS @BEFORE_ADJ @PRESENTATION_NOUN',
'@SELF_POSS @BEFORE_ADJ @WORK_NOUN',
'in CITATION , @SELF_NOM',
'in CITATION @SELF_NOM',
'the @WORK_NOUN @GIVEN in SELFCITATION',
'in @BEFORE_ADJ @PRESENTATION CITATION @SELF_NOM',
'in @BEFORE_ADJ @PRESENTATION CITATION , @SELF_NOM',
'in a @BEFORE_ADJ @PRESENTATION CITATION @SELF_NOM',
'in a @BEFORE_ADJ @PRESENTATION CITATION , @SELF_NOM',
],
'REF_AGENT': ['@REFERENTIAL #JJ @WORK_NOUN',
#'@REFERENTIAL @WORK_NOUN',
'this sort of @WORK_NOUN',
'this kind of @WORK_NOUN',
'this type of @WORK_NOUN',
'the current #JJ @WORK_NOUN',
'the current @WORK_NOUN',
'the @WORK_NOUN',
'the @PRESENTATION_NOUN',
'the author',
'the authors'],
'THEM_PRONOUN_AGENT': ['@OTHERS_NOM'],
'THEM_ACTIVE_AGENT': ['CITATION @PRESENTATION'],
'THEM_AGENT': ['CITATION',
'CITATION \'s #NN',
'CITATION \'s @PRESENTATION_NOUN',
'CITATION \'s @WORK_NOUN',
'CITATION \'s @ARGUMENTATION_NOUN',
'CITATION \'s #JJ @PRESENTATION_NOUN',
'CITATION \'s #JJ @WORK_NOUN',
'CITATION \'s #JJ @ARGUMENTATION_NOUN',
'the CITATION @WORK_NOUN',
'the @WORK_NOUN @GIVEN in CITATION',
'the @WORK_NOUN of CITATION',
'@OTHERS_POSS @PRESENTATION_NOUN',
'@OTHERS_POSS @WORK_NOUN',
'@OTHERS_POSS @RESULT_NOUN',
'@OTHERS_POSS @ARGUMENTATION_NOUN',
'@OTHERS_POSS @SOLUTION_NOUN',
'@OTHERS_POSS #JJ @PRESENTATION_NOUN',
'@OTHERS_POSS #JJ @WORK_NOUN',
'@OTHERS_POSS #JJ @RESULT_NOUN',
'@OTHERS_POSS #JJ @ARGUMENTATION_NOUN',
'@OTHERS_POSS #JJ @SOLUTION_NOUN'],
'GAP_AGENT': ['none of these @WORK_NOUN',
'none of those @WORK_NOUN',
'no @WORK_NOUN',
'no #JJ @WORK_NOUN',
'none of these @PRESENTATION_NOUN',
'none of those @PRESENTATION_NOUN',
'no @PRESENTATION_NOUN',
'no #JJ @PRESENTATION_NOUN'],
'GENERAL_AGENT': ['@TRADITION_ADJ #JJ @WORK_NOUN',
'@TRADITION_ADJ use @WORK_NOUN',
'@TRADITION_ADJ @WORK_NOUN',
'@MANY #JJ @WORK_NOUN',
'@MANY @WORK_NOUN',
'@BEFORE_ADJ #JJ @WORK_NOUN',
'@BEFORE_ADJ @WORK_NOUN',
'@BEFORE_ADJ #JJ @PRESENTATION_NOUN',
'@BEFORE_ADJ @PRESENTATION_NOUN',
'other #JJ @WORK_NOUN',
'other @WORK_NOUN',
'such @WORK_NOUN',
'these #JJ @PRESENTATION_NOUN',
'these @PRESENTATION_NOUN',
'those #JJ @PRESENTATION_NOUN',
'those @PRESENTATION_NOUN',
'@REFERENTIAL authors',
'@MANY author',
'researcher in @DISCIPLINE',
'@PROFESSIONALS'],
'PROBLEM_AGENT': ['@REFERENTIAL #JJ @PROBLEM_NOUN',
'@REFERENTIAL @PROBLEM_NOUN',
'the @PROBLEM_NOUN'],
'SOLUTION_AGENT': ['@REFERENTIAL #JJ @SOLUTION_NOUN',
'@REFERENTIAL @SOLUTION_NOUN',
'the @SOLUTION_NOUN',
'the #JJ @SOLUTION_NOUN'],
'TEXTSTRUCTURE_AGENT': ['@TEXT_NOUN CREF',
'@TEXT_NOUN CREF and CREF',
'this @TEXT_NOUN',
'next @TEXT_NOUN',
'next #CD @TEXT_NOUN',
'concluding @TEXT_NOUN',
'@BEFORE_ADJ @TEXT_NOUN',
'@TEXT_NOUN above',
'@TEXT_NOUN below',
'following @TEXT_NOUN',
'remaining @TEXT_NOUN',
'subsequent @TEXT_NOUN',
'following #CD @TEXT_NOUN',
'remaining #CD @TEXT_NOUN',
'subsequent #CD @TEXT_NOUN',
'@TEXT_NOUN that follow',
'rest of this @PRESENTATION_NOUN',
'remainder of this @PRESENTATION_NOUN',
'in @TEXT_NOUN CREF , @SELF_NOM',
'in this @TEXT_NOUN , @SELF_NOM',
'in the next @TEXT_NOUN , @SELF_NOM',
'in @BEFORE_ADJ @TEXT_NOUN , @SELF_NOM',
'in the @BEFORE_ADJ @TEXT_NOUN , @SELF_NOM',
'in the @TEXT_NOUN above , @SELF_NOM',
'in the @TEXT_NOUN below , @SELF_NOM',
'in the following @TEXT_NOUN , @SELF_NOM',
'in the remaining @TEXT_NOUN , @SELF_NOM',
'in the subsequent @TEXT_NOUN , @SELF_NOM',
'in the @TEXT_NOUN that follow , @SELF_NOM',
'in the rest of this @PRESENTATION_NOUN , @SELF_NOM',
'in the remainder of this @PRESENTATION_NOUN , @SELF_NOM',
'below , @SELF_NOM',
'the @AIM_NOUN of this @TEXT_NOUN']
}
| 70.81777 | 1,671 | 0.510242 |
c07874eef434f5a16ceaec2eea260b2b990ae8c8 | 9,418 | py | Python | chaco/scales/scales_test_case.py | janvonrickenbach/Chaco_wxPhoenix_py3 | 21a10cfd81100f28e3fbc273357ac45642519f33 | [
"BSD-3-Clause"
] | null | null | null | chaco/scales/scales_test_case.py | janvonrickenbach/Chaco_wxPhoenix_py3 | 21a10cfd81100f28e3fbc273357ac45642519f33 | [
"BSD-3-Clause"
] | null | null | null | chaco/scales/scales_test_case.py | janvonrickenbach/Chaco_wxPhoenix_py3 | 21a10cfd81100f28e3fbc273357ac45642519f33 | [
"BSD-3-Clause"
] | null | null | null | from traits.testing.unittest_tools import unittest
from numpy import array
from .formatters import BasicFormatter, OffsetFormatter
from .scales import Pow10Scale, FixedScale, LogScale, DefaultScale, ScaleSystem, frange
class TicksTestCase(unittest.TestCase):
""" Base class for scale and scale system unit tests """
def assert_empty(self, arg):
self.assertEqual(len(arg), 0)
def check_ticks(self, ticks1, ticks2):
self.assertEqual(len(ticks1), len(ticks2))
for t1, t2 in zip(ticks1, ticks2):
self.assertAlmostEqual(t1, t2, 6)
def check_labels(self, labels1, labels2):
self.assertEqual(len(labels1), len(labels2))
for t1, t2, in zip(labels1, labels2):
self.assertEqual(t1, t2)
class ScalesTestCase(TicksTestCase):
def test_pow10(self):
scale = Pow10Scale()
ticks = scale.ticks(5, 15, 8)
self.check_ticks(ticks, frange(5, 15, 1.0))
ticks = scale.ticks(5, 105, 8)
self.check_ticks(ticks, frange(10, 100, 10.0))
def test_log_scale_subdecade(self):
# Test cases where log_interval is less than 1.
scale = LogScale()
ticks = scale.ticks(1.0, 2.0)
self.check_ticks(ticks,
array((1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0)))
ticks = scale.ticks(0.9, 2.1)
self.check_ticks(ticks, array((1.0, 1.25, 1.5, 1.75, 2.0)))
ticks = scale.ticks(1.1, 9.9)
self.check_ticks(ticks,
array((2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0)))
def test_log_scale_interval1(self):
# Test the case where 1 < log_interval < desired_ticks, and interval=1
# is the case that generates the ticks.
scale = LogScale()
ticks = scale.ticks(1.0, 10.1)
self.check_ticks(
ticks, array((1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0)))
ticks = scale.ticks(9.3, 99.9)
self.check_ticks(ticks,
array((10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0,
90.0)))
ticks = scale.ticks(9.9, 100.0)
self.check_ticks(ticks,
array((10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0,
90.0, 100.0)))
def test_log_scale(self):
scale = LogScale()
ticks = scale.ticks(0.1, 10.0)
self.check_ticks(ticks,
array((0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 2.0, 4.0, 6.0,
8.0, 10.0)))
ticks = scale.ticks(10.0, 1000.0)
self.check_ticks(ticks,
array((10.0, 20.0, 40.0, 60.0, 80.0, 100.0, 200.0,
400.0, 600.0, 800.0, 1000.0)))
ticks = scale.ticks(9.9, 1000.0)
self.check_ticks(ticks,
array((10.0, 20.0, 40.0, 60.0, 80.0, 100.0, 200.0,
400.0, 600.0, 800.0, 1000.0)))
ticks = scale.ticks(5.0, 4300)
self.check_ticks(ticks, array((5, 10, 50, 100, 500, 1000)))
# Test case when the log_interval is greater than 8 (the
# default desired_ticks)
ticks = scale.ticks(1e-3, 1e6)
self.check_ticks(
ticks, array((1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6)))
class ScaleSystemTestCase(TicksTestCase):
def test_defaults(self):
ticker = ScaleSystem()
ticks = ticker.ticks(5, 30, 10)
self.check_ticks(ticks, frange(5.0, 30.0, 2.5))
def test_fixed_scales(self):
scales = [
FixedScale(resolution=1.0), FixedScale(resolution=10.0),
FixedScale(resolution=100.0)
]
ticker = ScaleSystem(default_scale=None, *scales)
self.check_ticks(ticker.ticks(5, 35, 3), (10.0, 20.0, 30.0))
self.check_ticks(ticker.ticks(5, 35, 20), frange(5.0, 35.0, 1.0))
self.check_ticks(
ticker.ticks(5, 614, 10), (100, 200, 300, 400, 500, 600))
def test_revert_to_default(self):
scales = [
FixedScale(resolution=1.0), FixedScale(resolution=10.0),
FixedScale(resolution=100.0)
]
ticker = ScaleSystem(*scales)
ticks = ticker.ticks(2.0, 3.0, 10)
self.check_ticks(ticks, frange(2.0, 3.0, 0.1))
def test_translation(self):
pass
class BasicFormatterTestCase(TicksTestCase):
def test_format(self):
fmt = BasicFormatter()
# test with a fixed scale
scale = FixedScale(resolution=1.0)
start, end = 12.0, 18.0
numlabels = 8
ticks = scale.ticks(start, end, numlabels)
labels = fmt.format(ticks, numlabels, None)
# desired = [str(float(x)) for x in range(12, 19)]
## This test fails when desired is created with str(float(x)).
## The format function returns "12",...,"18", not "12.0",...,"18.0".
desired = ["12", "13", "14", "15", "16", "17", "18"]
self.check_labels(labels, desired)
def test_format_small_numbers(self):
fmt = BasicFormatter()
numlabels = 8
# test with small numbers
scale = FixedScale(resolution=1e-4)
start, end = 5e-5, 8.5e-4
ticks = scale.ticks(start, end, numlabels)
labels = fmt.format(ticks, numlabels, None)
desired = [str(float(i)) + "e-4" for i in range(1, 9)]
self.check_labels(labels, desired)
def test2_nice_sci(self):
# The table of numerical values and their proper representation
# given a certain number of mantissa digits
vals = [
(3.14159e10, (2, "3e10"), (3, '3.1e10'), (5, '3.141e10')),
(123456789, (3, '1.2e8'), (5, '1.234e8')),
(-123456, (2, "-1e5"), (3, "-1e5"), (4, "-1.2e5")),
(123, (2, "1e2"), (3, "1.2e2"), (4, "1.23e2")),
(1.234, (2, "1"), (3, "1.2"), (4, "1.23")),
]
fmt = BasicFormatter()
for lst in vals:
val = lst[0]
for mdigits, desired in lst[1:]:
s = fmt._nice_sci(val, mdigits)
if s != desired:
print("Mismatch for", val, "; desired:", desired,
"actual:", s)
def test_estimate_default_scale(self):
fmt = BasicFormatter()
scale = DefaultScale()
# Test using numlabels
test_intervals = (
(12., 18., 8),
(-4., 16., 10),
(5e-5, 8.5e-4, 8),
(3e8, 6e8, 8), )
for start, end, numlabels in test_intervals:
estimate = fmt.estimate_width(
start, end, numlabels, ticker=scale)[1]
ticks = scale.ticks(start, end, numlabels)
labels = fmt.format(ticks, numlabels, None)
actual = sum(map(len, labels))
err = abs(estimate - actual) / actual
self.assertLess(err, 0.5)
return
def test_width_based_default_scale(self):
scale = ScaleSystem()
test_intervals = (
(1, 100, 80),
(1, 100, 40),
(1, 100, 20), )
print()
for start, end, width in test_intervals:
labels = scale.labels(start, end, char_width=width)
print("(%d,%d)" % (start, end), " avail:", width, end=' ')
print(" used:", sum([len(x[1]) for x in labels]))
return
def test_scale_system(self):
scale = ScaleSystem(
FixedScale(resolution=1.0),
FixedScale(resolution=2.5),
FixedScale(resolution=5.0),
FixedScale(resolution=10.0),
FixedScale(resolution=20.0),
FixedScale(resolution=100.0))
test_intervals = (
(1, 100, 200),
(1, 100, 80),
(1, 100, 40),
(1, 100, 20),
(1, 100, 5),
(1, 10, 100),
(1, 10, 50),
(1, 10, 20), )
print()
for start, end, width in test_intervals:
labels = scale.labels(start, end, char_width=width)
print("(%d,%d)" % (start, end), " avail:", width, end=' ')
print(" used:", sum([len(x[1]) for x in labels]), end=' ')
print(list(zip(*labels))[1])
return
class OffsetFormatterTestCase(TicksTestCase):
def test_format(self):
test_ranges = [(12003, 12015, 1.0), (1.2003, 1.2015, 1e-4),
(-1.2015, -1.2003, 1e-4)]
for start, end, resol in test_ranges:
fmt = OffsetFormatter()
fmt.use_offset = True
fmt.offset_format = "decimal"
fmt.end_label_format = "sci"
scale = FixedScale(resolution=resol)
numlabels = 12
ticks = scale.ticks(start, end, numlabels)
print("range:", start, end)
labels = fmt.format(ticks, numlabels, None)
print("Labels:", labels, "\n")
print("estimated width:",
fmt.estimate_width(start, end, numlabels))
print("actual width:", sum(map(len, labels)))
if __name__ == "__main__":
import nose
nose.run()
| 37.373016 | 88 | 0.510406 |
67595ceff07d293eab94d86319b781fed66c9856 | 1,354 | py | Python | autoio-interfaces/elstruct/reader/_psi4/__init__.py | lpratalimaffei/autoio | 57be6e4882af1841153c19e7353e2531e64ce47f | [
"Apache-2.0"
] | null | null | null | autoio-interfaces/elstruct/reader/_psi4/__init__.py | lpratalimaffei/autoio | 57be6e4882af1841153c19e7353e2531e64ce47f | [
"Apache-2.0"
] | 1 | 2022-02-15T19:35:14.000Z | 2022-02-15T19:35:14.000Z | autoio-interfaces/elstruct/reader/_psi4/__init__.py | lpratalimaffei/autoio | 57be6e4882af1841153c19e7353e2531e64ce47f | [
"Apache-2.0"
] | 13 | 2020-06-24T05:21:11.000Z | 2021-05-05T19:58:30.000Z | """ psi4 output reading module """
from elstruct.reader._psi4.energ import energy
from elstruct.reader._psi4.surface import gradient
from elstruct.reader._psi4.surface import hessian
from elstruct.reader._psi4.surface import harmonic_frequencies
from elstruct.reader._psi4.surface import irc_points
from elstruct.reader._psi4.surface import irc_path
from elstruct.reader._psi4.molecule import opt_geometry
from elstruct.reader._psi4.molecule import opt_zmatrix
from elstruct.reader._psi4.molecule import inp_zmatrix
from elstruct.reader._psi4.prop import dipole_moment
from elstruct.reader._psi4.prop import polarizability
from elstruct.reader._psi4.status import has_normal_exit_message
from elstruct.reader._psi4.status import error_list
from elstruct.reader._psi4.status import has_error_message
from elstruct.reader._psi4.status import check_convergence_messages
from elstruct.reader._psi4.version import program_name
from elstruct.reader._psi4.version import program_version
__all__ = [
'energy',
'gradient',
'hessian',
'harmonic_frequencies',
'irc_points',
'irc_path',
'opt_geometry',
'opt_zmatrix',
'inp_zmatrix',
'dipole_moment',
'polarizability',
'has_normal_exit_message',
'error_list',
'has_error_message',
'check_convergence_messages',
'program_name',
'program_version'
]
| 33.85 | 67 | 0.796898 |
8d9ed684ac8ecf9feba491a315963d05508d9dab | 215 | py | Python | nose2_kflag/tests/scenario/doctests/pymodule.py | stefanholek/nose2-kflag | 8236cc43d0f06afabdb401b111af45d5d8fd9a49 | [
"BSD-2-Clause"
] | 1 | 2020-06-14T13:54:15.000Z | 2020-06-14T13:54:15.000Z | nose2_kflag/tests/scenario/doctests/pymodule.py | stefanholek/nose2-kflag | 8236cc43d0f06afabdb401b111af45d5d8fd9a49 | [
"BSD-2-Clause"
] | 1 | 2021-02-02T05:04:05.000Z | 2021-02-02T05:04:05.000Z | nose2_kflag/tests/scenario/doctests/pymodule.py | stefanholek/nose2-kflag | 8236cc43d0f06afabdb401b111af45d5d8fd9a49 | [
"BSD-2-Clause"
] | null | null | null | """
>>> print('foo')
foo
"""
def func_foo():
"""
>>> print('foo')
foo
"""
def func_bar():
"""
>>> print('bar')
bar
"""
def func_baz():
"""
>>> print('baz')
baz
"""
| 9.347826 | 20 | 0.35814 |
136c950caceff332f1066bad1046124a2fcfc2c7 | 1,863 | py | Python | main.py | joaocg/pandas-sqlalchemy-tutorial | 5382ef0159251e7f204c348d843af61bfbfe64b6 | [
"MIT"
] | null | null | null | main.py | joaocg/pandas-sqlalchemy-tutorial | 5382ef0159251e7f204c348d843af61bfbfe64b6 | [
"MIT"
] | null | null | null | main.py | joaocg/pandas-sqlalchemy-tutorial | 5382ef0159251e7f204c348d843af61bfbfe64b6 | [
"MIT"
] | null | null | null | """Main script."""
from os import environ
from sqlalchemy import create_engine
from sqlalchemy.types import Integer, Text, String, DateTime
import pandas as pd
db_URI = engine = 'mysql+pymysql://usuariosfiec:Pll%V!o4J.L3@localhost/cienciadedados'
db_schema = environ.get('SQLALCHEMY_DB_SCHEMA')
engine = create_engine(db_URI)
def prepare_data():
"""Create DataFrame and clean column names."""
jobs_DF = pd.read_csv('data/teste.csv')
new_columns = [column.replace(' ', '_').lower() for column in jobs_DF]
jobs_DF.columns = new_columns
return jobs_DF
def upload_dataframe_to_sql(jobs_DF):
"""Upload data to db with proper dtypes."""
jobs_DF.to_sql("nyc_jobs",
engine,
if_exists='append',
schema=db_schema,
index=False,
chunksize=500,
dtype={"job_id": Integer,
"agency": Text,
"business_title": Text,
"job_category": Text,
"salary_range_from": Integer,
"salary_range_to": Integer,
"salary_frequency": String(50),
"work_location": Text,
"division/work_unit": Text,
"job_description": Text,
"posting_date": DateTime,
"posting_updated": DateTime})
def get_dataframe_from_sql(table_name):
"""Create DataFrame form SQL table."""
sql_DF = pd.read_sql_table(table_name,
con=engine,
parse_dates=['posting_date', 'posting_updated'])
return sql_DF
jobs_DF = prepare_data()
print(jobs_DF.info())
upload_dataframe_to_sql(jobs_DF)
get_dataframe_from_sql('nyc_jobs')
| 33.872727 | 86 | 0.56146 |
ed3c1cba7ebcbaead9d6f69eeb8405e81d0aed66 | 466 | py | Python | deepmanagerhelper/__init__.py | matteo-ronchetti/deepmanager-helper | b11f3f3ff5099e1276fd948d83bb6a045fe2e9d3 | [
"MIT"
] | null | null | null | deepmanagerhelper/__init__.py | matteo-ronchetti/deepmanager-helper | b11f3f3ff5099e1276fd948d83bb6a045fe2e9d3 | [
"MIT"
] | null | null | null | deepmanagerhelper/__init__.py | matteo-ronchetti/deepmanager-helper | b11f3f3ff5099e1276fd948d83bb6a045fe2e9d3 | [
"MIT"
] | null | null | null | import os
import json
class DeepManagerHelper:
@staticmethod
def input(path):
return os.path.join("input", path)
@staticmethod
def output(path):
return os.path.join("output", path)
@staticmethod
def _to_json(obj):
try:
return json.dumps(obj)
except:
return "{}"
@staticmethod
def log(**kwargs):
print("@deepmanager", DeepManagerHelper._to_json(kwargs), flush=True)
| 19.416667 | 77 | 0.598712 |
ac76f1891cd2e5ad95eb47c834bf37f487cae34e | 7,741 | py | Python | userbot/modules/system_stats.py | itsdzl/Man-Userbot | c247d04d3ee7cd2be334febfd286491ffa07b7be | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2021-12-23T18:17:44.000Z | 2021-12-23T18:17:44.000Z | userbot/modules/system_stats.py | itsdzl/Man-Userbot | c247d04d3ee7cd2be334febfd286491ffa07b7be | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/system_stats.py | itsdzl/Man-Userbot | c247d04d3ee7cd2be334febfd286491ffa07b7be | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for System Stats commands """
import asyncio
import platform
import sys
import time
from asyncio import create_subprocess_exec as asyncrunapp
from asyncio.subprocess import PIPE as asyncPIPE
from datetime import datetime
from os import remove
from platform import python_version
from shutil import which
import psutil
from pytgcalls import __version__ as pytgcalls
from telethon import __version__, version
from userbot import ALIVE_EMOJI, ALIVE_LOGO, ALIVE_TEKS_CUSTOM, BOT_VER, CHANNEL
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, GROUP, StartTime, bot
from userbot.utils import bash, edit_or_reply, man_cmd
from .ping import get_readable_time
try:
from carbonnow import Carbon
except ImportError:
Carbon = None
modules = CMD_HELP
emoji = ALIVE_EMOJI
alive_text = ALIVE_TEKS_CUSTOM
@man_cmd(
pattern="sysinfo$",
)
async def _(e):
xxnx = await edit_or_reply(e, "`Processing...`")
x, y = await bash("neofetch|sed 's/\x1B\\[[0-9;\\?]*[a-zA-Z]//g' >> neo.txt")
with open("neo.txt", "r") as neo:
p = (neo.read()).replace("\n\n", "")
ok = Carbon(base_url="https://carbonara.vercel.app/api/cook", code=p)
haa = await ok.memorize("neofetch")
await e.reply(file=haa)
await xxnx.delete()
remove("neo.txt")
@man_cmd(pattern=r"spc")
async def psu(event):
uname = platform.uname()
softw = "**Informasi Sistem**\n"
softw += f"`Sistem : {uname.system}`\n"
softw += f"`Rilis : {uname.release}`\n"
softw += f"`Versi : {uname.version}`\n"
softw += f"`Mesin : {uname.machine}`\n"
# Boot Time
boot_time_timestamp = psutil.boot_time()
bt = datetime.fromtimestamp(boot_time_timestamp)
softw += f"`Waktu Hidup: {bt.day}/{bt.month}/{bt.year} {bt.hour}:{bt.minute}:{bt.second}`\n"
# CPU Cores
cpuu = "**Informasi CPU**\n"
cpuu += "`Physical cores : " + str(psutil.cpu_count(logical=False)) + "`\n"
cpuu += "`Total cores : " + str(psutil.cpu_count(logical=True)) + "`\n"
# CPU frequencies
cpufreq = psutil.cpu_freq()
cpuu += f"`Max Frequency : {cpufreq.max:.2f}Mhz`\n"
cpuu += f"`Min Frequency : {cpufreq.min:.2f}Mhz`\n"
cpuu += f"`Current Frequency: {cpufreq.current:.2f}Mhz`\n\n"
# CPU usage
cpuu += "**CPU Usage Per Core**\n"
for i, percentage in enumerate(psutil.cpu_percent(percpu=True)):
cpuu += f"`Core {i} : {percentage}%`\n"
cpuu += "**Total CPU Usage**\n"
cpuu += f"`Semua Core: {psutil.cpu_percent()}%`\n"
# RAM Usage
svmem = psutil.virtual_memory()
memm = "**Memori Digunakan**\n"
memm += f"`Total : {get_size(svmem.total)}`\n"
memm += f"`Available : {get_size(svmem.available)}`\n"
memm += f"`Used : {get_size(svmem.used)}`\n"
memm += f"`Percentage: {svmem.percent}%`\n"
# Bandwidth Usage
bw = "**Bandwith Digunakan**\n"
bw += f"`Unggah : {get_size(psutil.net_io_counters().bytes_sent)}`\n"
bw += f"`Download: {get_size(psutil.net_io_counters().bytes_recv)}`\n"
help_string = f"{softw}\n"
help_string += f"{cpuu}\n"
help_string += f"{memm}\n"
help_string += f"{bw}\n"
help_string += "**Informasi Mesin**\n"
help_string += f"`Python {sys.version}`\n"
help_string += f"`Telethon {__version__}`"
await edit_or_reply(event, help_string)
def get_size(bytes, suffix="B"):
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
@man_cmd(pattern="sysd$")
async def sysdetails(sysd):
if not sysd.text[0].isalpha() and sysd.text[0] not in ("/", "#", "@", "!"):
try:
fetch = await asyncrunapp(
"neofetch",
"--stdout",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await fetch.communicate()
result = str(stdout.decode().strip()) + str(stderr.decode().strip())
await edit_or_reply(sysd, "`" + result + "`")
except FileNotFoundError:
await edit_or_reply(sysd, "**Install neofetch Terlebih dahulu!!**")
@man_cmd(pattern="botver$")
async def bot_ver(event):
if event.text[0].isalpha() or event.text[0] in ("/", "#", "@", "!"):
return
if which("git") is not None:
ver = await asyncrunapp(
"git",
"describe",
"--all",
"--long",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await ver.communicate()
verout = str(stdout.decode().strip()) + str(stderr.decode().strip())
rev = await asyncrunapp(
"git",
"rev-list",
"--all",
"--count",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await rev.communicate()
revout = str(stdout.decode().strip()) + str(stderr.decode().strip())
await edit_or_reply(
event,
"✥ **Userbot Versi :** " f"`{verout}`" "\n✥ **Revisi :** " f"`{revout}`",
)
else:
await edit_or_reply(
event, "anda tidak memiliki git, Anda Menjalankan Bot - 'v1.beta.4'!"
)
@man_cmd(pattern="(?:alive|on)\s?(.)?")
async def amireallyalive(alive):
user = await bot.get_me()
uptime = await get_readable_time((time.time() - StartTime))
output = (
f"**╭─━━━━━━━━━━━─╮** \n"
f"** ⚡ 𝗗 𝗭 𝗟 ⚡ ** \n"
f"**╰─━━━━━━━━━━━─╯** \n\n"
f"**★ Bot Is Running Up...★ ** \n\n"
f"{emoji} **x_x :** [{user.first_name}](tg://user?id={user.id}) \n"
f"{emoji} **Modules :** `{len(modules)} Modules` \n"
f"{emoji} **Bot Version:** `{BOT_VER}` \n"
f"{emoji} **Python :** `{python_version()}` \n"
f"{emoji} **Pytgcalls :** `{pytgcalls.__version__}` \n"
f"{emoji} **Telethon :** `{version.__version__}` \n"
f"{emoji} **Bot Uptime :** `{uptime}` \n\n"
f" **[Instagram](https://instagram.com/adtyanrr_?utm_medium=copy_link)** | **[Eunoia](https://t.me/{CHANNEL})** | **[Dizz](tg://user?id={user.id})**"
)
if ALIVE_LOGO:
try:
logo = ALIVE_LOGO
await alive.delete()
msg = await bot.send_file(alive.chat_id, logo, caption=output)
await asyncio.sleep(800)
await msg.delete()
except BaseException:
await alive.edit(
output + "\n\n ***Logo yang diberikan tidak valid."
"\nPastikan link diarahkan ke gambar logo**"
)
await asyncio.sleep(100)
await alive.delete()
else:
await edit_or_reply(alive, output)
CMD_HELP.update(
{
"system": f"**Plugin : **`system`.\
\n\n • **Syntax :** `{cmd}sysinfo`\
\n • **Function : **Informasi sistem menggunakan neofetch mengirim sebagai gambar.\
\n\n • **Syntax :** `{cmd}sysd`\
\n • **Function : **Informasi sistem menggunakan neofetch.\
\n\n\n • **Syntax :** `{cmd}botver`\
\n • **Function : **Menampilkan versi userbot.\
\n\n • **Syntax :** `{cmd}spc`\
\n • **Function : **Menampilkan spesifikasi sistem secara lengkap.\
"
}
)
CMD_HELP.update(
{
"alive": f"**Plugin : **`alive`\
\n\n • **Syntax :** `{cmd}alive` atau `{cmd}on`\
\n • **Function : **Untuk melihat apakah bot Anda berfungsi atau tidak.\
"
}
)
| 33.951754 | 160 | 0.569048 |
4ee02f9bfbe502b851977cc997b2c3e74ebbe7d8 | 62 | py | Python | Python Programs/sum_function.py | manvikri22/hacktoberfest-2021 | 7b169746788835b9dacfdd4e64f3b25f17453178 | [
"MIT"
] | 21 | 2021-10-01T01:52:56.000Z | 2021-11-08T13:01:26.000Z | Python Programs/sum_function.py | manvikri22/hacktoberfest-2021 | 7b169746788835b9dacfdd4e64f3b25f17453178 | [
"MIT"
] | 30 | 2021-09-30T18:28:07.000Z | 2021-10-03T05:23:45.000Z | Python Programs/sum_function.py | manvikri22/hacktoberfest-2021 | 7b169746788835b9dacfdd4e64f3b25f17453178 | [
"MIT"
] | 71 | 2021-09-30T17:32:43.000Z | 2021-10-21T05:26:51.000Z | number = [1, 3, 7, 14]
print(sum(number))
print(sum(number,5)) | 20.666667 | 22 | 0.645161 |
b9ef9ed6c79d74d9d6226bbd313f0ff035af870d | 1,807 | py | Python | code/tests/functional/tests/test_auth.py | CiscoSecurity/tr-05-serverless-google-chronicle | b11c65748eaed7eb424b32c2663b70e71c527c22 | [
"MIT"
] | 1 | 2020-06-19T18:42:40.000Z | 2020-06-19T18:42:40.000Z | code/tests/functional/tests/test_auth.py | CiscoSecurity/tr-05-serverless-google-chronicle | b11c65748eaed7eb424b32c2663b70e71c527c22 | [
"MIT"
] | 1 | 2020-10-15T10:54:37.000Z | 2020-10-15T10:54:37.000Z | code/tests/functional/tests/test_auth.py | CiscoSecurity/tr-05-serverless-google-chronicle | b11c65748eaed7eb424b32c2663b70e71c527c22 | [
"MIT"
] | 1 | 2022-03-04T15:04:28.000Z | 2022-03-04T15:04:28.000Z | import pytest
@pytest.mark.skip('Changed of functionality to get token')
def test_relay_auth_positive(relay_api):
"""Perform testing for relay health endpoint to check
status auth for Google Chronicle
ID: CCTRI-769-0cc7805e-297d-4700-872b-dbf82f267326
Steps:
1. Send request to relay endpoint with right token
Expectedresults:
1. Check that response has status 200
Importance: Critical
"""
response = relay_api.health('')
assert response.status_code == 200
assert response.json()['data'] == {'status': 'ok'}
@pytest.mark.skip('Changed of functionality to get token')
# @pytest.mark.parametrize(
# 'wrong_token,message,code',
# (
# ('123', 'Authorization failed: Wrong JWT structure',
# 'authorization error'),
# (os.environ['ANOTHER_KEY'],
# 'Unexpected response from Google Chronicle: '
# 'Backstory API has not been used in project ',
# 'permission denied')
# )
# )
def test_relay_auth_negative(relay_api_without_token, wrong_token, message,
code):
"""Perform testing for relay health endpoint to check
status auth for Google Chronicle with wrong token
ID: CCTRI-769-86f8a6c7-4356-4fe8-b504-8403c2be7e41
Steps:
1. Send request to relay endpoint with wrong tokens
Expectedresults:
1. Check that response has status 200, and error message
Importance: Critical
"""
response = relay_api_without_token.health(
'',
**{'headers': {'Authorization': 'Bearer {}'.format(wrong_token)}}
)
assert response.status_code == 200
error = response.json()["errors"][0]
assert error['type'] == 'fatal'
assert error['code'] == code
assert error['message'].startswith(message)
| 29.622951 | 75 | 0.653569 |
12026db2a5b9119d26f108c545d94995b4d82ac3 | 3,208 | py | Python | swig/python/osgeo/utils/gdalimport.py | gajgeospatial/gdal-3.2.2 | f03032b8b734f611d5b3039c0e5cdbf81adc306e | [
"Apache-2.0"
] | null | null | null | swig/python/osgeo/utils/gdalimport.py | gajgeospatial/gdal-3.2.2 | f03032b8b734f611d5b3039c0e5cdbf81adc306e | [
"Apache-2.0"
] | null | null | null | swig/python/osgeo/utils/gdalimport.py | gajgeospatial/gdal-3.2.2 | f03032b8b734f611d5b3039c0e5cdbf81adc306e | [
"Apache-2.0"
] | 1 | 2022-02-21T06:31:07.000Z | 2022-02-21T06:31:07.000Z | #!/usr/bin/env python3
# ******************************************************************************
# $Id: gdalimport.py 124baa7f71f15396a661014a81b6c5b0c82c8004 2020-10-14 17:29:39 +0300 Idan Miara $
#
# Name: gdalimport
# Project: GDAL Python Interface
# Purpose: Import a GDAL supported file to Tiled GeoTIFF, and build overviews
# Author: Frank Warmerdam, warmerdam@pobox.com
#
# ******************************************************************************
# Copyright (c) 2000, Frank Warmerdam
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
import os.path
import sys
from osgeo import gdal
def progress_cb(complete, message, cb_data):
print('%s %d' % (cb_data, complete))
def main(argv):
gdal.AllRegister()
argv = gdal.GeneralCmdLineProcessor(argv)
if argv is None:
sys.exit(0)
if len(argv) < 2:
print("Usage: gdalimport.py [--help-general] source_file [newfile]")
sys.exit(1)
filename = argv[1]
dataset = gdal.Open(filename)
if dataset is None:
print('Unable to open %s' % filename)
sys.exit(1)
geotiff = gdal.GetDriverByName("GTiff")
if geotiff is None:
print('GeoTIFF driver not registered.')
sys.exit(1)
if len(argv) < 3:
newbase, ext = os.path.splitext(os.path.basename(filename))
newfile = newbase + ".tif"
i = 0
while os.path.isfile(newfile):
i = i + 1
newfile = newbase + "_" + str(i) + ".tif"
else:
newfile = argv[2]
print('Importing to Tiled GeoTIFF file: %s' % newfile)
new_dataset = geotiff.CreateCopy(newfile, dataset, 0,
['TILED=YES', ],
callback=progress_cb,
callback_data='Translate: ')
dataset = None
print('Building overviews')
new_dataset.BuildOverviews("average", callback=progress_cb,
callback_data='Overviews: ')
new_dataset = None
print('Done')
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 35.644444 | 101 | 0.606608 |
3408885894ff29f524cf6c9733b4a4c7e68cc869 | 3,388 | py | Python | venv/lib/python3.6/site-packages/examples/example_proxied.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 465 | 2016-05-07T00:22:59.000Z | 2022-03-31T08:36:24.000Z | venv/lib/python3.6/site-packages/examples/example_proxied.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 129 | 2016-05-17T08:00:15.000Z | 2022-03-31T23:09:36.000Z | venv/lib/python3.6/site-packages/examples/example_proxied.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 167 | 2016-05-09T16:19:27.000Z | 2022-03-31T07:19:18.000Z | #!/usr/bin/env python
"""Cloudflare API code - example"""
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import CloudFlare
def main():
"""Change the proxied value on a FQDN"""
try:
zone_name = sys.argv[1]
dns_name = sys.argv[2]
if sys.argv[3] == 'false':
new_r_proxied_flag = False
elif sys.argv[3] == 'true':
new_r_proxied_flag = True
else:
raise ValueError('bad arg')
except IndexError:
exit('usage: ./example-make-zone-proxied.py zone dns_record [true|false]')
except ValueError:
exit('usage: ./example-make-zone-proxied.py zone dns_record [true|false]')
cf = CloudFlare.CloudFlare()
# grab the zone identifier
try:
params = {'name':zone_name, 'per_page':1}
zones = cf.zones.get(params=params)
except CloudFlare.exceptions.CloudFlareAPIError as e:
exit('/zones.get %d %s - api call failed' % (e, e))
except Exception as e:
exit('/zones.get - %s - api call failed' % (e))
if len(zones) != 1:
exit('/zones.get - %s - api call returned %d items' % (zone_name, len(zones)))
# there should only be one zone
zone = zones[0]
zone_name = zone['name']
zone_id = zone['id']
print("Zone:\t%s %s" % (zone_id, zone_name))
try:
params = {'name': dns_name}
dns_records = cf.zones.dns_records.get(zone_id, params=params)
except CloudFlare.exceptions.CloudFlareAPIError as e:
exit('/zones/dns_records.get %d %s - api call failed' % (e, e))
if len(dns_records) == 0:
exit('/zones.dns_records.get - %s - no records found' % (dns_name))
for dns_record in dns_records:
r_zone_id = dns_record['zone_id']
r_id = dns_record['id']
r_name = dns_record['name']
r_type = dns_record['type']
r_content = dns_record['content']
r_ttl = dns_record['ttl']
r_proxied = dns_record['proxied']
r_proxiable = dns_record['proxiable']
print('Record:\t%s %s %s %6d %-5s %s ; proxied=%s proxiable=%s' % (
r_zone_id, r_id, r_name, r_ttl, r_type, r_content, r_proxied, r_proxiable
))
if r_proxied == new_r_proxied_flag:
# Nothing to do
continue
dns_record_id = dns_record['id']
new_dns_record = {
'zone_id': r_zone_id,
'id': r_id,
'type': r_type,
'name': r_name,
'content': r_content,
'ttl': r_ttl,
'proxied': new_r_proxied_flag
}
try:
dns_record = cf.zones.dns_records.put(zone_id, dns_record_id, data=new_dns_record)
except CloudFlare.exceptions.CloudFlareAPIError as e:
exit('/zones/dns_records.put %d %s - api call failed' % (e, e))
r_zone_id = dns_record['zone_id']
r_id = dns_record['id']
r_name = dns_record['name']
r_type = dns_record['type']
r_content = dns_record['content']
r_ttl = dns_record['ttl']
r_proxied = dns_record['proxied']
r_proxiable = dns_record['proxiable']
print('Record:\t%s %s %s %6d %-5s %s ; proxied=%s proxiable=%s <<-- after' % (
r_zone_id, r_id, r_name, r_ttl, r_type, r_content, r_proxied, r_proxiable
))
exit(0)
if __name__ == '__main__':
main()
| 31.082569 | 94 | 0.580283 |
f579c7005daf1276df94752b16cc6ad703cce37f | 686 | py | Python | appengine/standard/mailgun/appengine_config.py | yshalabi/python-docs-samples | 591787c01d94102ba9205f998d95a05b39ccad2f | [
"Apache-2.0"
] | 5,938 | 2015-05-18T05:04:37.000Z | 2022-03-31T20:16:39.000Z | appengine/standard/mailgun/appengine_config.py | yshalabi/python-docs-samples | 591787c01d94102ba9205f998d95a05b39ccad2f | [
"Apache-2.0"
] | 4,730 | 2015-05-07T19:00:38.000Z | 2022-03-31T21:59:41.000Z | appengine/standard/mailgun/appengine_config.py | yshalabi/python-docs-samples | 591787c01d94102ba9205f998d95a05b39ccad2f | [
"Apache-2.0"
] | 6,734 | 2015-05-05T17:06:20.000Z | 2022-03-31T12:02:51.000Z | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.ext import vendor
# Add any libraries installed in the "lib" folder.
vendor.add('lib')
| 36.105263 | 74 | 0.760933 |
8a948fa6ef151066597b671d54c697c444eac9f4 | 26,312 | py | Python | vspk/v4_0/nuvm.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | null | null | null | vspk/v4_0/nuvm.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | null | null | null | vspk/v4_0/nuvm.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUVMResyncsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUAlarmsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUVMInterfacesFetcher
from .fetchers import NUVRSsFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUVM(NURESTObject):
""" Represents a VM in the VSD
Notes:
Read only API that can retrieve the VMs associated with a domain, zone or subnet for mediation created VM's for REST created VM's you need to set the additional proxy user header in http request : X-Nuage-ProxyUservalue of the header has to be either :1) enterpriseName@UserName (example : Alcatel Lucent@bob), or 2) external ID of user in VSD, typically is UUID generally decided by the CMS tool in questionUser needs to have CMS privileges to use proxy user header.
"""
__rest_name__ = "vm"
__resource_name__ = "vms"
## Constants
CONST_REASON_TYPE_SHUTDOWN_UNKNOWN = "SHUTDOWN_UNKNOWN"
CONST_REASON_TYPE_CRASHED_UNKNOWN = "CRASHED_UNKNOWN"
CONST_REASON_TYPE_PAUSED_IOERROR = "PAUSED_IOERROR"
CONST_STATUS_SHUTDOWN = "SHUTDOWN"
CONST_REASON_TYPE_SHUTDOWN_LAST = "SHUTDOWN_LAST"
CONST_STATUS_DELETE_PENDING = "DELETE_PENDING"
CONST_REASON_TYPE_RUNNING_UNKNOWN = "RUNNING_UNKNOWN"
CONST_STATUS_RUNNING = "RUNNING"
CONST_REASON_TYPE_RUNNING_LAST = "RUNNING_LAST"
CONST_REASON_TYPE_RUNNING_UNPAUSED = "RUNNING_UNPAUSED"
CONST_REASON_TYPE_PAUSED_FROM_SNAPSHOT = "PAUSED_FROM_SNAPSHOT"
CONST_REASON_TYPE_PAUSED_MIGRATION = "PAUSED_MIGRATION"
CONST_REASON_TYPE_RUNNING_BOOTED = "RUNNING_BOOTED"
CONST_REASON_TYPE_UNKNOWN = "UNKNOWN"
CONST_STATUS_UNREACHABLE = "UNREACHABLE"
CONST_STATUS_BLOCKED = "BLOCKED"
CONST_REASON_TYPE_SHUTOFF_DESTROYED = "SHUTOFF_DESTROYED"
CONST_REASON_TYPE_SHUTOFF_FROM_SNAPSHOT = "SHUTOFF_FROM_SNAPSHOT"
CONST_REASON_TYPE_SHUTOFF_UNKNOWN = "SHUTOFF_UNKNOWN"
CONST_STATUS_NOSTATE = "NOSTATE"
CONST_REASON_TYPE_PAUSED_DUMP = "PAUSED_DUMP"
CONST_REASON_TYPE_CRASHED_LAST = "CRASHED_LAST"
CONST_STATUS_CRASHED = "CRASHED"
CONST_REASON_TYPE_PAUSED_LAST = "PAUSED_LAST"
CONST_REASON_TYPE_BLOCKED_LAST = "BLOCKED_LAST"
CONST_REASON_TYPE_SHUTOFF_LAST = "SHUTOFF_LAST"
CONST_STATUS_SHUTOFF = "SHUTOFF"
CONST_REASON_TYPE_SHUTOFF_SHUTDOWN = "SHUTOFF_SHUTDOWN"
CONST_REASON_TYPE_NOSTATE_UNKNOWN = "NOSTATE_UNKNOWN"
CONST_REASON_TYPE_PAUSED_SAVE = "PAUSED_SAVE"
CONST_REASON_TYPE_RUNNING_FROM_SNAPSHOT = "RUNNING_FROM_SNAPSHOT"
CONST_STATUS_UNKNOWN = "UNKNOWN"
CONST_REASON_TYPE_PAUSED_UNKNOWN = "PAUSED_UNKNOWN"
CONST_REASON_TYPE_SHUTOFF_FAILED = "SHUTOFF_FAILED"
CONST_REASON_TYPE_SHUTOFF_SAVED = "SHUTOFF_SAVED"
CONST_REASON_TYPE_SHUTOFF_MIGRATED = "SHUTOFF_MIGRATED"
CONST_STATUS_LAST = "LAST"
CONST_REASON_TYPE_RUNNING_MIGRATED = "RUNNING_MIGRATED"
CONST_REASON_TYPE_RUNNING_SAVE_CANCELED = "RUNNING_SAVE_CANCELED"
CONST_REASON_TYPE_SHUTDOWN_USER = "SHUTDOWN_USER"
CONST_REASON_TYPE_RUNNING_MIGRATION_CANCELED = "RUNNING_MIGRATION_CANCELED"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_STATUS_PAUSED = "PAUSED"
CONST_STATUS_INIT = "INIT"
CONST_REASON_TYPE_BLOCKED_UNKNOWN = "BLOCKED_UNKNOWN"
CONST_REASON_TYPE_NOSTATE_LAST = "NOSTATE_LAST"
CONST_REASON_TYPE_RUNNING_RESTORED = "RUNNING_RESTORED"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_REASON_TYPE_SHUTOFF_CRASHED = "SHUTOFF_CRASHED"
CONST_REASON_TYPE_PAUSED_USER = "PAUSED_USER"
CONST_DELETE_MODE_TIMER = "TIMER"
CONST_REASON_TYPE_PAUSED_WATCHDOG = "PAUSED_WATCHDOG"
CONST_REASON_TYPE_PAUSED_SHUTTING_DOWN = "PAUSED_SHUTTING_DOWN"
def __init__(self, **kwargs):
""" Initializes a VM instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> vm = NUVM(id=u'xxxx-xxx-xxx-xxx', name=u'VM')
>>> vm = NUVM(data=my_dict)
"""
super(NUVM, self).__init__()
# Read/Write Attributes
self._l2_domain_ids = None
self._vrsid = None
self._uuid = None
self._name = None
self._last_updated_by = None
self._reason_type = None
self._delete_expiry = None
self._delete_mode = None
self._resync_info = None
self._site_identifier = None
self._interfaces = None
self._enterprise_id = None
self._enterprise_name = None
self._entity_scope = None
self._domain_ids = None
self._zone_ids = None
self._orchestration_id = None
self._user_id = None
self._user_name = None
self._status = None
self._subnet_ids = None
self._external_id = None
self._hypervisor_ip = None
self.expose_attribute(local_name="l2_domain_ids", remote_name="l2DomainIDs", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="vrsid", remote_name="VRSID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="uuid", remote_name="UUID", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="reason_type", remote_name="reasonType", attribute_type=str, is_required=False, is_unique=False, choices=[u'BLOCKED_LAST', u'BLOCKED_UNKNOWN', u'CRASHED_LAST', u'CRASHED_UNKNOWN', u'NOSTATE_LAST', u'NOSTATE_UNKNOWN', u'PAUSED_DUMP', u'PAUSED_FROM_SNAPSHOT', u'PAUSED_IOERROR', u'PAUSED_LAST', u'PAUSED_MIGRATION', u'PAUSED_SAVE', u'PAUSED_SHUTTING_DOWN', u'PAUSED_UNKNOWN', u'PAUSED_USER', u'PAUSED_WATCHDOG', u'RUNNING_BOOTED', u'RUNNING_FROM_SNAPSHOT', u'RUNNING_LAST', u'RUNNING_MIGRATED', u'RUNNING_MIGRATION_CANCELED', u'RUNNING_RESTORED', u'RUNNING_SAVE_CANCELED', u'RUNNING_UNKNOWN', u'RUNNING_UNPAUSED', u'SHUTDOWN_LAST', u'SHUTDOWN_UNKNOWN', u'SHUTDOWN_USER', u'SHUTOFF_CRASHED', u'SHUTOFF_DESTROYED', u'SHUTOFF_FAILED', u'SHUTOFF_FROM_SNAPSHOT', u'SHUTOFF_LAST', u'SHUTOFF_MIGRATED', u'SHUTOFF_SAVED', u'SHUTOFF_SHUTDOWN', u'SHUTOFF_UNKNOWN', u'UNKNOWN'])
self.expose_attribute(local_name="delete_expiry", remote_name="deleteExpiry", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="delete_mode", remote_name="deleteMode", attribute_type=str, is_required=False, is_unique=False, choices=[u'TIMER'])
self.expose_attribute(local_name="resync_info", remote_name="resyncInfo", attribute_type=dict, is_required=False, is_unique=False)
self.expose_attribute(local_name="site_identifier", remote_name="siteIdentifier", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="interfaces", remote_name="interfaces", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_name", remote_name="enterpriseName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="domain_ids", remote_name="domainIDs", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="zone_ids", remote_name="zoneIDs", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="orchestration_id", remote_name="orchestrationID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="user_id", remote_name="userID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="user_name", remote_name="userName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="status", remote_name="status", attribute_type=str, is_required=False, is_unique=False, choices=[u'BLOCKED', u'CRASHED', u'DELETE_PENDING', u'INIT', u'LAST', u'NOSTATE', u'PAUSED', u'RUNNING', u'SHUTDOWN', u'SHUTOFF', u'UNKNOWN', u'UNREACHABLE'])
self.expose_attribute(local_name="subnet_ids", remote_name="subnetIDs", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="hypervisor_ip", remote_name="hypervisorIP", attribute_type=str, is_required=False, is_unique=False)
# Fetchers
self.vm_resyncs = NUVMResyncsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.alarms = NUAlarmsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vm_interfaces = NUVMInterfacesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vrss = NUVRSsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def l2_domain_ids(self):
""" Get l2_domain_ids value.
Notes:
Array of IDs of the l2 domain that the VM is connected to
This attribute is named `l2DomainIDs` in VSD API.
"""
return self._l2_domain_ids
@l2_domain_ids.setter
def l2_domain_ids(self, value):
""" Set l2_domain_ids value.
Notes:
Array of IDs of the l2 domain that the VM is connected to
This attribute is named `l2DomainIDs` in VSD API.
"""
self._l2_domain_ids = value
@property
def vrsid(self):
""" Get vrsid value.
Notes:
Id of the VRS that this VM is attached to.
This attribute is named `VRSID` in VSD API.
"""
return self._vrsid
@vrsid.setter
def vrsid(self, value):
""" Set vrsid value.
Notes:
Id of the VRS that this VM is attached to.
This attribute is named `VRSID` in VSD API.
"""
self._vrsid = value
@property
def uuid(self):
""" Get uuid value.
Notes:
UUID of the VM
This attribute is named `UUID` in VSD API.
"""
return self._uuid
@uuid.setter
def uuid(self, value):
""" Set uuid value.
Notes:
UUID of the VM
This attribute is named `UUID` in VSD API.
"""
self._uuid = value
@property
def name(self):
""" Get name value.
Notes:
Name of the VM
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the VM
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def reason_type(self):
""" Get reason_type value.
Notes:
Reason of the event associated with the VM.
This attribute is named `reasonType` in VSD API.
"""
return self._reason_type
@reason_type.setter
def reason_type(self, value):
""" Set reason_type value.
Notes:
Reason of the event associated with the VM.
This attribute is named `reasonType` in VSD API.
"""
self._reason_type = value
@property
def delete_expiry(self):
""" Get delete_expiry value.
Notes:
reflects the VM Deletion expiry timer in secs , deleteMode needs to be non-null value for deleteExpiry to be taken in to effect. CMS created VM's will always have deleteMode set to TIMER
This attribute is named `deleteExpiry` in VSD API.
"""
return self._delete_expiry
@delete_expiry.setter
def delete_expiry(self, value):
""" Set delete_expiry value.
Notes:
reflects the VM Deletion expiry timer in secs , deleteMode needs to be non-null value for deleteExpiry to be taken in to effect. CMS created VM's will always have deleteMode set to TIMER
This attribute is named `deleteExpiry` in VSD API.
"""
self._delete_expiry = value
@property
def delete_mode(self):
""" Get delete_mode value.
Notes:
reflects the mode of VM Deletion - TIMER Possible values are TIMER, .
This attribute is named `deleteMode` in VSD API.
"""
return self._delete_mode
@delete_mode.setter
def delete_mode(self, value):
""" Set delete_mode value.
Notes:
reflects the mode of VM Deletion - TIMER Possible values are TIMER, .
This attribute is named `deleteMode` in VSD API.
"""
self._delete_mode = value
@property
def resync_info(self):
""" Get resync_info value.
Notes:
Information of the status of the resync operation of a VM
This attribute is named `resyncInfo` in VSD API.
"""
return self._resync_info
@resync_info.setter
def resync_info(self, value):
""" Set resync_info value.
Notes:
Information of the status of the resync operation of a VM
This attribute is named `resyncInfo` in VSD API.
"""
self._resync_info = value
@property
def site_identifier(self):
""" Get site_identifier value.
Notes:
This property specifies the site the VM belongs to, for Geo-redundancy.
This attribute is named `siteIdentifier` in VSD API.
"""
return self._site_identifier
@site_identifier.setter
def site_identifier(self, value):
""" Set site_identifier value.
Notes:
This property specifies the site the VM belongs to, for Geo-redundancy.
This attribute is named `siteIdentifier` in VSD API.
"""
self._site_identifier = value
@property
def interfaces(self):
""" Get interfaces value.
Notes:
List of VM interfaces associated with the VM
"""
return self._interfaces
@interfaces.setter
def interfaces(self, value):
""" Set interfaces value.
Notes:
List of VM interfaces associated with the VM
"""
self._interfaces = value
@property
def enterprise_id(self):
""" Get enterprise_id value.
Notes:
ID of the enterprise that this VM belongs to
This attribute is named `enterpriseID` in VSD API.
"""
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
""" Set enterprise_id value.
Notes:
ID of the enterprise that this VM belongs to
This attribute is named `enterpriseID` in VSD API.
"""
self._enterprise_id = value
@property
def enterprise_name(self):
""" Get enterprise_name value.
Notes:
Name of the enterprise that this VM belongs to
This attribute is named `enterpriseName` in VSD API.
"""
return self._enterprise_name
@enterprise_name.setter
def enterprise_name(self, value):
""" Set enterprise_name value.
Notes:
Name of the enterprise that this VM belongs to
This attribute is named `enterpriseName` in VSD API.
"""
self._enterprise_name = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def domain_ids(self):
""" Get domain_ids value.
Notes:
Array of IDs of the domain that the VM is connected to
This attribute is named `domainIDs` in VSD API.
"""
return self._domain_ids
@domain_ids.setter
def domain_ids(self, value):
""" Set domain_ids value.
Notes:
Array of IDs of the domain that the VM is connected to
This attribute is named `domainIDs` in VSD API.
"""
self._domain_ids = value
@property
def zone_ids(self):
""" Get zone_ids value.
Notes:
Array of IDs of the zone that this VM is attached to
This attribute is named `zoneIDs` in VSD API.
"""
return self._zone_ids
@zone_ids.setter
def zone_ids(self, value):
""" Set zone_ids value.
Notes:
Array of IDs of the zone that this VM is attached to
This attribute is named `zoneIDs` in VSD API.
"""
self._zone_ids = value
@property
def orchestration_id(self):
""" Get orchestration_id value.
Notes:
Orchestration ID
This attribute is named `orchestrationID` in VSD API.
"""
return self._orchestration_id
@orchestration_id.setter
def orchestration_id(self, value):
""" Set orchestration_id value.
Notes:
Orchestration ID
This attribute is named `orchestrationID` in VSD API.
"""
self._orchestration_id = value
@property
def user_id(self):
""" Get user_id value.
Notes:
ID of the user that created this VM
This attribute is named `userID` in VSD API.
"""
return self._user_id
@user_id.setter
def user_id(self, value):
""" Set user_id value.
Notes:
ID of the user that created this VM
This attribute is named `userID` in VSD API.
"""
self._user_id = value
@property
def user_name(self):
""" Get user_name value.
Notes:
Username of the user that created this VM
This attribute is named `userName` in VSD API.
"""
return self._user_name
@user_name.setter
def user_name(self, value):
""" Set user_name value.
Notes:
Username of the user that created this VM
This attribute is named `userName` in VSD API.
"""
self._user_name = value
@property
def status(self):
""" Get status value.
Notes:
Status of the VM.
"""
return self._status
@status.setter
def status(self, value):
""" Set status value.
Notes:
Status of the VM.
"""
self._status = value
@property
def subnet_ids(self):
""" Get subnet_ids value.
Notes:
Array of IDs of the subnets that the VM is connected to
This attribute is named `subnetIDs` in VSD API.
"""
return self._subnet_ids
@subnet_ids.setter
def subnet_ids(self, value):
""" Set subnet_ids value.
Notes:
Array of IDs of the subnets that the VM is connected to
This attribute is named `subnetIDs` in VSD API.
"""
self._subnet_ids = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def hypervisor_ip(self):
""" Get hypervisor_ip value.
Notes:
IP address of the hypervisor that this VM is currently running in
This attribute is named `hypervisorIP` in VSD API.
"""
return self._hypervisor_ip
@hypervisor_ip.setter
def hypervisor_ip(self, value):
""" Set hypervisor_ip value.
Notes:
IP address of the hypervisor that this VM is currently running in
This attribute is named `hypervisorIP` in VSD API.
"""
self._hypervisor_ip = value
| 29.934016 | 906 | 0.601285 |
e23ad8f4cb556f0933bd7804bee2ebaf1daaaf13 | 3,616 | py | Python | pyqt5/main.py | Javascript-void0/Java | 37ad42304b1b3a36fcb3a5a2f3171ab20cee9d81 | [
"MIT"
] | null | null | null | pyqt5/main.py | Javascript-void0/Java | 37ad42304b1b3a36fcb3a5a2f3171ab20cee9d81 | [
"MIT"
] | null | null | null | pyqt5/main.py | Javascript-void0/Java | 37ad42304b1b3a36fcb3a5a2f3171ab20cee9d81 | [
"MIT"
] | null | null | null | import PyQt5.QtWidgets as qtw
class MainWindow(qtw.QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('Calculator')
self.setLayout(qtw.QVBoxLayout())
self.keypad()
self.temp_nums = []
self.fin_nums = []
self.show()
def keypad(self):
container = qtw.QWidget()
container.setLayout(qtw.QGridLayout())
self.result_field = qtw.QLineEdit()
btn_result = qtw.QPushButton('Enter', clicked = self.func_result)
btn_clear = qtw.QPushButton('Clear', clicked = self.clear_calc)
btn_9 = qtw.QPushButton('9', clicked = lambda:self.num_press('9'))
btn_8 = qtw.QPushButton('8', clicked = lambda:self.num_press('8'))
btn_7 = qtw.QPushButton('7', clicked = lambda:self.num_press('7'))
btn_6 = qtw.QPushButton('6', clicked = lambda:self.num_press('6'))
btn_5 = qtw.QPushButton('5', clicked = lambda:self.num_press('5'))
btn_4 = qtw.QPushButton('4', clicked = lambda:self.num_press('4'))
btn_3 = qtw.QPushButton('3', clicked = lambda:self.num_press('3'))
btn_2 = qtw.QPushButton('2', clicked = lambda:self.num_press('2'))
btn_1 = qtw.QPushButton('1', clicked = lambda:self.num_press('1'))
btn_0 = qtw.QPushButton('0', clicked = lambda:self.num_press('0'))
btn_plus = qtw.QPushButton('+', clicked = lambda:self.num_press('+'))
btn_mins = qtw.QPushButton('-', clicked = lambda:self.num_press('-'))
btn_mult = qtw.QPushButton('*', clicked = lambda:self.num_press('*'))
btn_divd = qtw.QPushButton('/', clicked = lambda:self.num_press('/'))
container.layout().addWidget(self.result_field, 0, 0, 1, 4)
container.layout().addWidget(btn_result, 1, 0, 1, 2)
container.layout().addWidget(btn_clear, 1, 2, 1, 2)
container.layout().addWidget(btn_9, 2, 0)
container.layout().addWidget(btn_8, 2, 1)
container.layout().addWidget(btn_7, 2, 2)
container.layout().addWidget(btn_plus, 2, 3)
container.layout().addWidget(btn_6, 3, 0)
container.layout().addWidget(btn_5, 3, 1)
container.layout().addWidget(btn_4, 3, 2)
container.layout().addWidget(btn_mins, 3, 3)
container.layout().addWidget(btn_3, 4, 0)
container.layout().addWidget(btn_2, 4, 1)
container.layout().addWidget(btn_1, 4, 2)
container.layout().addWidget(btn_mult, 4, 3)
container.layout().addWidget(btn_0, 5, 0, 1, 3)
container.layout().addWidget(btn_divd, 5, 3)
self.layout().addWidget(container)
def num_press(self, key_number):
self.temp_nums.append(key_number)
temp_string = ''.join(self.temp_nums)
if self.fin_nums:
self.result_field.setText(''.join(self.fin_nums) + temp_string)
else:
self.result_field.setText(temp_string)
def func_press(self, operator):
temp_string = ''.join(self.temp_nums)
self.fin_nums.append(temp_string)
self.fin_nums.append(operator)
self.temp_nums = []
self.result_field.setText(''.join(self.fin_nums))
def func_result(self):
fin_string = ''.join(self.fin_nums) + ''.join(self.temp_nums)
result_string = eval(fin_string)
fin_string += '='
fin_string += str(result_string)
self.result_field.setText(fin_string)
def clear_calc(self):
self.result_field.clear()
self.temp_nums = []
self.fin_nums = []
app = qtw.QApplication([])
mw = MainWindow()
app.setStyle(qtw.QStyleFactory.create('Fusion'))
app.exec_() | 44.641975 | 77 | 0.626936 |
320201156ab702e92e713d799650223bbc9cb5e5 | 3,224 | py | Python | data/external/repositories/202553/Grasp-and-Lift-master/carl/nn2/stacknn2.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/202553/Grasp-and-Lift-master/carl/nn2/stacknn2.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/202553/Grasp-and-Lift-master/carl/nn2/stacknn2.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | from xgb_classifier import xgb_classifier
import pandas as pd
import numpy as np
from sklearn.cross_validation import KFold
from sklearn import preprocessing
from collections import Counter
from sklearn import metrics
import h5py
########################################
# this stacking uses the base model vali1_cv.csv
# which uses past feature only
# vali1_cv auc: 0.911
# after stacking
# stack1_cv auc: 0.931
#
########################################
def train_predict(X,y,Xt,yt=[],c=1):
if c==1:
#clf=xgb_classifier(num_round=45,eta=0.1,min_child_weight=5,depth=10, subsample=0.5,col=1)
clf=xgb_classifier(num_round=45,eta=0.1,min_child_weight=20,depth=20, subsample=0.1,col=0.7)
return clf.train_predict(X,y,Xt,yt)
import pickle
#pickle.dump(rf,open('yxgbc_fea1.p','w'))
def myauc(y,pred):
fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=1)
return metrics.auc(fpr, tpr)
import sys
#subname='../btb/cv13_15_19_smooth.csv'
subname='subm_cv.csv' # this has future information
sub=pd.read_csv(subname,index_col=0)
print 'pred', sub.shape
subjects = range(1,13)
real=[]
for subject in subjects:
fnames = ['../../data/train/subj%d_series%d_events.csv' % (subject,i) for i in range(7,9)]
for fname in fnames:
labels= pd.read_csv(fname,index_col=0)
real.append(labels)
print fname,labels.shape
real=pd.concat(real)
print 'combined', real.shape
def gendata(X):
m1,m2,m3,m4=150,300,600,1000
Xm=np.zeros(X.shape)
Xs=np.zeros(X.shape)
Xt=np.zeros(X.shape)
Xu=np.zeros(X.shape)
for i in range(X.shape[0]):
if i<m1:
pass
elif i<m2:
Xu[i,:]=np.mean(X[i-m1:i,:],axis=0)
elif i<m3:
Xm[i,:]=np.mean(X[i-m2:i-m1,:],axis=0)
Xu[i,:]=np.mean(X[i-m1:i,:],axis=0)
elif i<m4:
Xm[i,:]=np.mean(X[i-m2:i-m1,:],axis=0)
Xu[i,:]=np.mean(X[i-m1:i,:],axis=0)
Xt[i,:]=np.mean(X[i-m3:i-m2,:],axis=0)
else:
Xm[i,:]=np.mean(X[i-m2:i-m1,:],axis=0)
Xu[i,:]=np.mean(X[i-m1:i,:],axis=0)
Xt[i,:]=np.mean(X[i-m3:i-m2,:],axis=0)
Xs[i,:]=np.mean(X[i-m4:i-m3,:],axis=0)
return np.hstack((X,Xm,Xt,Xu,Xs))
#X=np.array(sub[real.columns.values])
#X=gendata(X)
#h5f=h5py.File('h5file/stacknn2.h5','w')
#h5f.create_dataset('dataset_1', data=X)
#h5f.close()
h5f=h5py.File('h5file/stacknn2.h5','r')
X=h5f['dataset_1'][:]
h5f.close()
h5f=h5py.File('../vali-stack8/h5file/stack8cv.h5','r')
tmp=h5f['dataset_1'][:]
h5f.close()
X=np.hstack((X,tmp)) # sub11, stack8
#next time we run it, just load the data
#h5f=h5py.File('h5file/stack1cv.h5','r')
#X=h5f['dataset_1'][:]
#h5f.close()
X1=X[:X.shape[0]/2]
X2=X[X.shape[0]/2:]
print 'done',X.shape
xx=[]
subx=sub.copy()
for name in real.columns.values:
y=np.array(real[name])
y1=y[:X.shape[0]/2]
y2=y[X.shape[0]/2:]
yr2=train_predict(X1,y1,X2,yt=y2,c=1)
xx.append(myauc(y2,yr2))
print name, xx[-1]
yr1=train_predict(X2,y2,X1,yt=y1,c=1)
xx.append(myauc(y1,yr1))
subx[name]=np.concatenate((yr1,yr2))
print name, xx[-1]
print 'average',np.mean(xx)
subx.to_csv('stacknn2.csv')
| 27.092437 | 100 | 0.608561 |
7a1463f649ad23cf78fe438bfa94cdabbc25fe46 | 2,297 | py | Python | src/user/models.py | saurabh1e/FlaskStructure | 5291e2c6d994863b7962b07a9fab8b8580405c56 | [
"MIT"
] | 3 | 2016-07-19T14:55:23.000Z | 2022-02-28T03:27:32.000Z | src/user/models.py | saurabh1e/FlaskStructure | 5291e2c6d994863b7962b07a9fab8b8580405c56 | [
"MIT"
] | null | null | null | src/user/models.py | saurabh1e/FlaskStructure | 5291e2c6d994863b7962b07a9fab8b8580405c56 | [
"MIT"
] | null | null | null | from datetime import datetime
from sqlalchemy.ext.hybrid import hybrid_property
from flask_security import RoleMixin, UserMixin
from src import db, BaseMixin, ReprMixin
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
class User(db.Model, BaseMixin, UserMixin, ReprMixin):
email = db.Column(db.String(127), unique=True, nullable=False)
password = db.Column(db.String(255), default='', nullable=False)
username = db.Column(db.String(127), nullable=True)
user_type = db.Column(db.Enum('student', 'counsellor'), default='counsellor')
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
last_login_at = db.Column(db.DateTime())
current_login_at = db.Column(db.DateTime())
last_login_ip = db.Column(db.String(45))
current_login_ip = db.Column(db.String(45))
login_count = db.Column(db.Integer)
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
@hybrid_property
def name(self):
if self.user_profile and self.user_profile.first_name:
if self.user_profile.last_name:
return self.user_profile.first_name + self.user_profile.last_name
return self.user_profile.first_name
class UserProfile(db.Model, BaseMixin):
first_name = db.Column(db.String(255))
last_name = db.Column(db.String(255))
gender = db.Column(db.Enum('male', 'female', 'ns'), default='ns')
dob = db.Column(db.DateTime, default=db.func.current_timestamp(), nullable=True)
profile_picture = db.Column(db.String(512), nullable=True)
address = db.Column(db.Integer)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship('User', lazy='subquery', backref='user_profile')
@hybrid_property
def age(self):
if self.dob:
return datetime.now().year - self.dob.year
else:
return 0
| 38.283333 | 84 | 0.668263 |
f21ae6b4a504c5a18a9d25124423aedc9a7aff88 | 9,062 | py | Python | examples/pytorch/graphsage/train_sampling.py | jinghuix/dgl | fae26dd15caac92458a08ad34889086e1e333ddd | [
"Apache-2.0"
] | null | null | null | examples/pytorch/graphsage/train_sampling.py | jinghuix/dgl | fae26dd15caac92458a08ad34889086e1e333ddd | [
"Apache-2.0"
] | null | null | null | examples/pytorch/graphsage/train_sampling.py | jinghuix/dgl | fae26dd15caac92458a08ad34889086e1e333ddd | [
"Apache-2.0"
] | null | null | null | import dgl
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
import dgl.function as fn
import dgl.nn.pytorch as dglnn
import time
import argparse
from _thread import start_new_thread
from functools import wraps
from dgl.data import RedditDataset
import tqdm
import traceback
from load_graph import load_reddit, load_ogb, inductive_split
class SAGE(nn.Module):
def __init__(self,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout):
super().__init__()
self.n_layers = n_layers
self.n_hidden = n_hidden
self.n_classes = n_classes
self.layers = nn.ModuleList()
self.layers.append(dglnn.SAGEConv(in_feats, n_hidden, 'mean'))
for i in range(1, n_layers - 1):
self.layers.append(dglnn.SAGEConv(n_hidden, n_hidden, 'mean'))
self.layers.append(dglnn.SAGEConv(n_hidden, n_classes, 'mean'))
self.dropout = nn.Dropout(dropout)
self.activation = activation
def forward(self, blocks, x):
h = x
for l, (layer, block) in enumerate(zip(self.layers, blocks)):
h = layer(block, h)
if l != len(self.layers) - 1:
h = self.activation(h)
h = self.dropout(h)
return h
def inference(self, g, x, batch_size, device):
"""
Inference with the GraphSAGE model on full neighbors (i.e. without neighbor sampling).
g : the entire graph.
x : the input of entire node set.
The inference code is written in a fashion that it could handle any number of nodes and
layers.
"""
# During inference with sampling, multi-layer blocks are very inefficient because
# lots of computations in the first few layers are repeated.
# Therefore, we compute the representation of all nodes layer by layer. The nodes
# on each layer are of course splitted in batches.
# TODO: can we standardize this?
for l, layer in enumerate(self.layers):
y = th.zeros(g.number_of_nodes(), self.n_hidden if l != len(self.layers) - 1 else self.n_classes)
sampler = dgl.sampling.MultiLayerNeighborSampler([None])
dataloader = dgl.sampling.NodeDataLoader(
g,
th.arange(g.number_of_nodes()),
sampler,
batch_size=args.batch_size,
shuffle=True,
drop_last=False,
num_workers=args.num_workers)
for input_nodes, output_nodes, blocks in tqdm.tqdm(dataloader):
block = blocks[0]
block = block.to(device)
h = x[input_nodes].to(device)
h = layer(block, h)
if l != len(self.layers) - 1:
h = self.activation(h)
h = self.dropout(h)
y[output_nodes] = h.cpu()
x = y
return y
def compute_acc(pred, labels):
"""
Compute the accuracy of prediction given the labels.
"""
labels = labels.long()
return (th.argmax(pred, dim=1) == labels).float().sum() / len(pred)
def evaluate(model, g, inputs, labels, val_nid, batch_size, device):
"""
Evaluate the model on the validation set specified by ``val_nid``.
g : The entire graph.
inputs : The features of all the nodes.
labels : The labels of all the nodes.
val_nid : the node Ids for validation.
batch_size : Number of nodes to compute at the same time.
device : The GPU device to evaluate on.
"""
model.eval()
with th.no_grad():
pred = model.inference(g, inputs, batch_size, device)
model.train()
return compute_acc(pred[val_nid], labels[val_nid])
def load_subtensor(g, seeds, input_nodes, device):
"""
Copys features and labels of a set of nodes onto GPU.
"""
batch_inputs = g.ndata['features'][input_nodes].to(device)
batch_labels = g.ndata['labels'][seeds].to(device)
return batch_inputs, batch_labels
#### Entry point
def run(args, device, data):
# Unpack data
in_feats, n_classes, train_g, val_g, test_g = data
train_nid = th.nonzero(train_g.ndata['train_mask'], as_tuple=True)[0]
val_nid = th.nonzero(val_g.ndata['val_mask'], as_tuple=True)[0]
test_nid = th.nonzero(~(test_g.ndata['train_mask'] | test_g.ndata['val_mask']), as_tuple=True)[0]
# Create PyTorch DataLoader for constructing blocks
sampler = dgl.sampling.MultiLayerNeighborSampler(
[int(fanout) for fanout in args.fan_out.split(',')])
dataloader = dgl.sampling.NodeDataLoader(
train_g,
train_nid,
sampler,
batch_size=args.batch_size,
shuffle=True,
drop_last=False,
num_workers=args.num_workers)
# Define model and optimizer
model = SAGE(in_feats, args.num_hidden, n_classes, args.num_layers, F.relu, args.dropout)
model = model.to(device)
loss_fcn = nn.CrossEntropyLoss()
loss_fcn = loss_fcn.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# Training loop
avg = 0
iter_tput = []
for epoch in range(args.num_epochs):
tic = time.time()
# Loop over the dataloader to sample the computation dependency graph as a list of
# blocks.
for step, (input_nodes, seeds, blocks) in enumerate(dataloader):
tic_step = time.time()
# Load the input features as well as output labels
batch_inputs, batch_labels = load_subtensor(train_g, seeds, input_nodes, device)
blocks = [block.to(device) for block in blocks]
# Compute loss and prediction
batch_pred = model(blocks, batch_inputs)
loss = loss_fcn(batch_pred, batch_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_tput.append(len(seeds) / (time.time() - tic_step))
if step % args.log_every == 0:
acc = compute_acc(batch_pred, batch_labels)
gpu_mem_alloc = th.cuda.max_memory_allocated() / 1000000 if th.cuda.is_available() else 0
print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MiB'.format(
epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:]), gpu_mem_alloc))
toc = time.time()
print('Epoch Time(s): {:.4f}'.format(toc - tic))
if epoch >= 5:
avg += toc - tic
if epoch % args.eval_every == 0 and epoch != 0:
eval_acc = evaluate(model, val_g, val_g.ndata['features'], val_g.ndata['labels'], val_nid, args.batch_size, device)
print('Eval Acc {:.4f}'.format(eval_acc))
test_acc = evaluate(model, test_g, test_g.ndata['features'], test_g.ndata['labels'], test_nid, args.batch_size, device)
print('Test Acc: {:.4f}'.format(test_acc))
print('Avg epoch time: {}'.format(avg / (epoch - 4)))
if __name__ == '__main__':
argparser = argparse.ArgumentParser("multi-gpu training")
argparser.add_argument('--gpu', type=int, default=0,
help="GPU device ID. Use -1 for CPU training")
argparser.add_argument('--dataset', type=str, default='reddit')
argparser.add_argument('--num-epochs', type=int, default=20)
argparser.add_argument('--num-hidden', type=int, default=16)
argparser.add_argument('--num-layers', type=int, default=2)
argparser.add_argument('--fan-out', type=str, default='10,25')
argparser.add_argument('--batch-size', type=int, default=1000)
argparser.add_argument('--log-every', type=int, default=20)
argparser.add_argument('--eval-every', type=int, default=5)
argparser.add_argument('--lr', type=float, default=0.003)
argparser.add_argument('--dropout', type=float, default=0.5)
argparser.add_argument('--num-workers', type=int, default=0,
help="Number of sampling processes. Use 0 for no extra process.")
argparser.add_argument('--inductive', action='store_true',
help="Inductive learning setting")
args = argparser.parse_args()
if args.gpu >= 0:
device = th.device('cuda:%d' % args.gpu)
else:
device = th.device('cpu')
if args.dataset == 'reddit':
g, n_classes = load_reddit()
elif args.dataset == 'ogb-product':
g, n_classes = load_ogb('ogbn-products')
else:
raise Exception('unknown dataset')
in_feats = g.ndata['features'].shape[1]
g = dgl.as_heterograph(g)
if args.inductive:
train_g, val_g, test_g = inductive_split(g)
else:
train_g = val_g = test_g = g
train_g.create_format_()
val_g.create_format_()
test_g.create_format_()
# Pack data
data = in_feats, n_classes, train_g, val_g, test_g
run(args, device, data)
| 37.916318 | 137 | 0.621717 |
20da5bd136e173c2bea7ae2a9f689aecb0d8d23d | 6,529 | py | Python | assignment3/cs231n/simclr/utils.py | shambhu1998/cs231n | cf169f6fea090187787a585c51c624ccd4d9b721 | [
"MIT"
] | null | null | null | assignment3/cs231n/simclr/utils.py | shambhu1998/cs231n | cf169f6fea090187787a585c51c624ccd4d9b721 | [
"MIT"
] | null | null | null | assignment3/cs231n/simclr/utils.py | shambhu1998/cs231n | cf169f6fea090187787a585c51c624ccd4d9b721 | [
"MIT"
] | null | null | null | import pandas as pd
import torch
import torch.optim as optim
from thop import profile, clever_format
from torch.utils.data import DataLoader
from tqdm import tqdm
from .contrastive_loss import *
def train(model, data_loader, train_optimizer, epoch, epochs, batch_size=32, temperature=0.5, device='cuda'):
"""Trains the model defined in ./model.py with one epoch.
Inputs:
- model: Model class object as defined in ./model.py.
- data_loader: torch.utils.data.DataLoader object; loads in training data. You can assume the loaded data has been augmented.
- train_optimizer: torch.optim.Optimizer object; applies an optimizer to training.
- epoch: integer; current epoch number.
- epochs: integer; total number of epochs.
- batch_size: Number of training samples per batch.
- temperature: float; temperature (tau) parameter used in simclr_loss_vectorized.
- device: the device name to define torch tensors.
Returns:
- The average loss.
"""
model.train()
total_loss, total_num, train_bar = 0.0, 0, tqdm(data_loader)
for data_pair in train_bar:
x_i, x_j, target = data_pair
x_i, x_j = x_i.to(device), x_j.to(device)
out_left, out_right, loss = None, None, None
##############################################################################
# TODO: Start of your code. #
# #
# Take a look at the model.py file to understand the model's input and output.
# Run x_i and x_j through the model to get out_left, out_right. #
# Then compute the loss using simclr_loss_vectorized. #
##############################################################################
_, out_left = model.forward(x_i)
_, out_right = model.forward(x_j)
loss = simclr_loss_vectorized(out_left, out_right, temperature, device)
##############################################################################
# END OF YOUR CODE #
##############################################################################
train_optimizer.zero_grad()
loss.backward()
train_optimizer.step()
total_num += batch_size
total_loss += loss.item() * batch_size
train_bar.set_description('Train Epoch: [{}/{}] Loss: {:.4f}'.format(epoch, epochs, total_loss / total_num))
return total_loss / total_num
def train_val(model, data_loader, train_optimizer, epoch, epochs, device='cuda'):
is_train = train_optimizer is not None
model.train() if is_train else model.eval()
loss_criterion = torch.nn.CrossEntropyLoss()
total_loss, total_correct_1, total_correct_5, total_num, data_bar = 0.0, 0.0, 0.0, 0, tqdm(data_loader)
with (torch.enable_grad() if is_train else torch.no_grad()):
for data, target in data_bar:
data, target = data.to(device), target.to(device)
out = model(data)
loss = loss_criterion(out, target)
if is_train:
train_optimizer.zero_grad()
loss.backward()
train_optimizer.step()
total_num += data.size(0)
total_loss += loss.item() * data.size(0)
prediction = torch.argsort(out, dim=-1, descending=True)
total_correct_1 += torch.sum((prediction[:, 0:1] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()
total_correct_5 += torch.sum((prediction[:, 0:5] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()
data_bar.set_description('{} Epoch: [{}/{}] Loss: {:.4f} ACC@1: {:.2f}% ACC@5: {:.2f}%'
.format('Train' if is_train else 'Test', epoch, epochs, total_loss / total_num,
total_correct_1 / total_num * 100, total_correct_5 / total_num * 100))
return total_loss / total_num, total_correct_1 / total_num * 100, total_correct_5 / total_num * 100
def test(model, memory_data_loader, test_data_loader, epoch, epochs, c, temperature=0.5, k=200, device='cuda'):
model.eval()
total_top1, total_top5, total_num, feature_bank = 0.0, 0.0, 0, []
with torch.no_grad():
# generate feature bank
for data, _, target in tqdm(memory_data_loader, desc='Feature extracting'):
feature, out = model(data.to(device))
feature_bank.append(feature)
# [D, N]
feature_bank = torch.cat(feature_bank, dim=0).t().contiguous()
# [N]
feature_labels = torch.tensor(memory_data_loader.dataset.targets, device=feature_bank.device)
# loop test data to predict the label by weighted knn search
test_bar = tqdm(test_data_loader)
for data, _, target in test_bar:
data, target = data.to(device), target.to(device)
feature, out = model(data)
total_num += data.size(0)
# compute cos similarity between each feature vector and feature bank ---> [B, N]
sim_matrix = torch.mm(feature, feature_bank)
# [B, K]
sim_weight, sim_indices = sim_matrix.topk(k=k, dim=-1)
# [B, K]
sim_labels = torch.gather(feature_labels.expand(data.size(0), -1), dim=-1, index=sim_indices)
sim_weight = (sim_weight / temperature).exp()
# counts for each class
one_hot_label = torch.zeros(data.size(0) * k, c, device=device)
# [B*K, C]
one_hot_label = one_hot_label.scatter(dim=-1, index=sim_labels.view(-1, 1), value=1.0)
# weighted score ---> [B, C]
pred_scores = torch.sum(one_hot_label.view(data.size(0), -1, c) * sim_weight.unsqueeze(dim=-1), dim=1)
pred_labels = pred_scores.argsort(dim=-1, descending=True)
total_top1 += torch.sum((pred_labels[:, :1] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()
total_top5 += torch.sum((pred_labels[:, :5] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()
test_bar.set_description('Test Epoch: [{}/{}] Acc@1:{:.2f}% Acc@5:{:.2f}%'
.format(epoch, epochs, total_top1 / total_num * 100, total_top5 / total_num * 100))
return total_top1 / total_num * 100, total_top5 / total_num * 100
| 49.462121 | 129 | 0.566856 |
482a16e6957cf27de79d36d130c6f98523ce4b52 | 557 | py | Python | activity_periods_api/migrations/0004_auto_20200505_1503.py | narendra119/user-activity-api | 74e3ab0d6da668ec9151f9dd70447fc360aef613 | [
"MIT"
] | null | null | null | activity_periods_api/migrations/0004_auto_20200505_1503.py | narendra119/user-activity-api | 74e3ab0d6da668ec9151f9dd70447fc360aef613 | [
"MIT"
] | 10 | 2020-06-05T23:41:31.000Z | 2022-03-12T00:27:36.000Z | activity_periods_api/migrations/0004_auto_20200505_1503.py | narendra119/user-activity-api | 74e3ab0d6da668ec9151f9dd70447fc360aef613 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.2 on 2020-05-05 15:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('activity_periods_api', '0003_auto_20200505_1138'),
]
operations = [
migrations.AlterField(
model_name='activityperiod',
name='member',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='activity_periods', to='activity_periods_api.Member'),
),
]
| 27.85 | 149 | 0.651706 |
857834e847cf78be9dffde4a474acf237db481a9 | 7,405 | py | Python | src/covid_model_seiir_pipeline/pipeline/diagnostics/task/scatters.py | ihmeuw/covid-model-seiir-pipeline | 9ec71e4156fe47c14379127936c5131636544b0d | [
"BSD-3-Clause"
] | 23 | 2020-05-25T00:20:32.000Z | 2022-01-18T10:32:09.000Z | src/covid_model_seiir_pipeline/pipeline/diagnostics/task/scatters.py | ihmeuw/covid-model-seiir-pipeline | 9ec71e4156fe47c14379127936c5131636544b0d | [
"BSD-3-Clause"
] | 15 | 2020-06-15T16:34:22.000Z | 2021-08-15T22:11:37.000Z | src/covid_model_seiir_pipeline/pipeline/diagnostics/task/scatters.py | ihmeuw/covid-model-seiir-pipeline | 9ec71e4156fe47c14379127936c5131636544b0d | [
"BSD-3-Clause"
] | 11 | 2020-05-24T21:57:29.000Z | 2021-09-07T18:21:15.000Z | from pathlib import Path
import click
import matplotlib.lines as mlines
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import pandas as pd
import tqdm
from covid_model_seiir_pipeline.lib import (
cli_tools,
static_vars,
)
from covid_model_seiir_pipeline.pipeline.diagnostics.specification import (
DiagnosticsSpecification,
ScattersAxisSpecification,
)
from covid_model_seiir_pipeline.pipeline.postprocessing import (
PostprocessingSpecification,
PostprocessingDataInterface,
)
import matplotlib.pyplot as plt
COLOR_MAP = ['#7F3C8D', '#11A579',
'#3969AC', '#F2B701',
'#E73F74', '#80BA5A',
'#E68310', '#008695',
'#CF1C90', '#f97b72',
'#4b4b8f', '#A5AA99'].__getitem__
AX_LABEL_FONTSIZE = 16
TITLE_FONTSIZE = 24
FIG_SIZE = (20, 8)
GRID_SPEC_MARGINS = {'top': 0.92, 'bottom': 0.08}
logger = cli_tools.task_performance_logger
def run_scatters(diagnostics_version: str, name: str, progress_bar: bool) -> None:
logger.info(f'Starting scatters for version {diagnostics_version}, name {name}.', context='setup')
diagnostics_spec = DiagnosticsSpecification.from_path(
Path(diagnostics_version) / static_vars.DIAGNOSTICS_SPECIFICATION_FILE
)
scatters_spec = [spec for spec in diagnostics_spec.scatters if spec.name == name].pop()
logger.info('Loading plotting data.', context='read')
pp_spec = PostprocessingSpecification.from_path(
Path(scatters_spec.x_axis.version) / static_vars.POSTPROCESSING_SPECIFICATION_FILE
)
pp_di = PostprocessingDataInterface.from_specification(pp_spec)
if pp_di.is_counties_run():
logger.info('No scatters for counties')
return
hierarchy = pp_di.load_hierarchy()
name_map = hierarchy.set_index('location_id').location_ascii_name
deaths_x = get_deaths(scatters_spec.x_axis)
deaths_y = get_deaths(scatters_spec.y_axis)
logger.info('Processing inputs.', context='transform')
plotting_data = pd.concat([deaths_x, deaths_y], axis=1)
pc = np.abs((deaths_y - deaths_x) / deaths_x)
plotting_data['Above 25'] = pc > 0.25
plotting_data['Above 45'] = pc > 0.45
plotting_data['location_name'] = name_map.reindex(plotting_data.index)
with PdfPages(f'{diagnostics_version}/cumulative_deaths_scatters_{name}.pdf') as pdf:
make_scatter_pages(plotting_data, hierarchy, deaths_x.name, deaths_y.name, progress_bar, pdf)
logger.report()
def get_deaths(axis_spec: ScattersAxisSpecification):
pp_spec = PostprocessingSpecification.from_path(
Path(axis_spec.version) / static_vars.POSTPROCESSING_SPECIFICATION_FILE
)
pp_di = PostprocessingDataInterface.from_specification(pp_spec)
data_date = pp_di.load_full_data_unscaled().reset_index().date.max()
data_date = data_date if not axis_spec.date else pd.Timestamp(axis_spec.date)
deaths = pp_di.load_output_summaries(axis_spec.scenario, 'cumulative_deaths').reset_index()
deaths = (deaths
.loc[deaths.date == data_date]
.set_index('location_id')['mean']
.rename(f'{axis_spec.label.replace("_", " ").title()} {str(data_date.date())}'))
return deaths
def make_ax_plot(ax, data, xlabel, ylabel, threshold_col, color):
line_data = [0.0, 1.25 * data[xlabel].max()]
ax.plot(line_data, line_data, color='k', linewidth=3)
if threshold_col:
t_data = data[data[threshold_col]]
data = data[~data[threshold_col]]
else:
t_data = pd.DataFrame(columns=data.columns)
ax.scatter(data[xlabel], data[ylabel], color='k')
ax.scatter(t_data[xlabel], t_data[ylabel], color=color)
for i, row in t_data.iterrows():
ax.annotate(row.location_name, (row[xlabel] + 0.005 * line_data[1], row[ylabel] + 0.005 * line_data[1]),
color=color, fontsize=12)
ax.set_xlim(*line_data)
ax.set_xlabel(xlabel, fontsize=AX_LABEL_FONTSIZE)
ax.set_ylim(*line_data)
ax.set_ylabel(ylabel, fontsize=AX_LABEL_FONTSIZE)
def make_legend_handles(threshold_col, color):
if threshold_col:
colors_and_labels = [('k', f"{threshold_col.replace('Above', 'Below')}%"), (color, f"{threshold_col}%")]
else:
colors_and_labels = [('k', 'Data')]
handles = [mlines.Line2D([], [], color=color, label=label, linestyle='', marker='o') for color, label in
colors_and_labels]
return handles
def make_scatter_page(data, fig_title, xlabel, ylabel, threshold_col='', pdf=None):
fig = plt.figure(figsize=FIG_SIZE)
gs = fig.add_gridspec(
nrows=1, ncols=2,
wspace=0.2,
)
gs.update(**GRID_SPEC_MARGINS)
color = COLOR_MAP(0) if threshold_col == 'Above 25' else COLOR_MAP(1)
ax_normal = fig.add_subplot(gs[0])
make_ax_plot(ax_normal, data, xlabel, ylabel, threshold_col, color)
log_data = data.copy()
log_xlabel, log_ylabel = f'{xlabel} (log)', f'{ylabel} (log)'
log_data[[log_xlabel, log_ylabel]] = np.log10(log_data[[xlabel, ylabel]])
ax_log = fig.add_subplot(gs[1])
make_ax_plot(ax_log, log_data, log_xlabel, log_ylabel, threshold_col, color)
fig.suptitle(fig_title, fontsize=TITLE_FONTSIZE)
fig.legend(handles=make_legend_handles(threshold_col, color),
loc='lower center',
bbox_to_anchor=(0.5, 0),
fontsize=AX_LABEL_FONTSIZE,
frameon=False,
ncol=2 if threshold_col else 1)
if pdf is not None:
pdf.savefig(fig)
plt.close(fig)
else:
plt.show()
def make_scatter_pages(plotting_data, hierarchy, xlabel, ylabel, progress_bar: bool = False, pdf=None):
filters_labels_and_thresholds = [
([1], 'Global', 'Above 45'),
(plotting_data.index.isin(hierarchy[hierarchy.level == 3].location_id.tolist()), 'National', 'Above 45')
]
agg_locations = (hierarchy[(hierarchy.most_detailed == 0) & (hierarchy.level >= 2)]
.sort_values(['level', 'sort_order'])
.location_id
.tolist())
name_map = hierarchy.set_index('location_id').location_ascii_name
for agg_location in agg_locations:
child_locs = hierarchy[hierarchy.parent_id == agg_location].location_id.tolist()
filters_labels_and_thresholds.append((
plotting_data.index.isin(child_locs), name_map.loc[agg_location], 'Above 25'
))
for loc_filter, label, threshold_col in tqdm.tqdm(filters_labels_and_thresholds, disable=not progress_bar):
make_scatter_page(
plotting_data.loc[loc_filter],
f'Cumulative Deaths Compare {label}',
xlabel, ylabel,
threshold_col=threshold_col,
pdf=pdf
)
@click.command()
@cli_tools.with_task_diagnostics_version
@cli_tools.with_name
@cli_tools.with_progress_bar
@cli_tools.add_verbose_and_with_debugger
def scatters(diagnostics_version: str, name: str,
progress_bar: bool, verbose: int, with_debugger: bool):
"""Produce scatters corresponding to the configuration associated with NAME"""
cli_tools.configure_logging_to_terminal(verbose)
run = cli_tools.handle_exceptions(run_scatters, logger, with_debugger)
run(diagnostics_version=diagnostics_version,
name=name,
progress_bar=progress_bar)
if __name__ == '__main__':
scatters()
| 37.025 | 112 | 0.687914 |
7547568bd63236e5b4bb5528659d3389a5835aec | 808 | py | Python | uvicore/contracts/routes-OLD.py | coboyoshi/uvicore | 9cfdeeac83000b156fe48f068b4658edaf51c8de | [
"MIT"
] | 11 | 2021-03-22T22:07:49.000Z | 2022-03-08T16:18:33.000Z | uvicore/contracts/routes-OLD.py | coboyoshi/uvicore | 9cfdeeac83000b156fe48f068b4658edaf51c8de | [
"MIT"
] | 12 | 2021-03-04T05:51:24.000Z | 2021-09-22T05:16:18.000Z | uvicore/contracts/routes-OLD.py | coboyoshi/uvicore | 9cfdeeac83000b156fe48f068b4658edaf51c8de | [
"MIT"
] | 2 | 2021-03-25T14:49:56.000Z | 2021-11-17T23:20:29.000Z | from abc import ABC, abstractmethod
from typing import Any, Dict, Generic, List, TypeVar
from uvicore.contracts import Application, Package
# Generic Router (APIRouter or WebRouter)
R = TypeVar('R')
# class Routes(ABC):
# @abstractmethod
# def register()
# Old direct routeint
# class Routes(Generic[R], ABC):
# @property
# @abstractmethod
# def app(self) -> Application: pass
# @property
# @abstractmethod
# def package(self) -> Package: pass
# @property
# @abstractmethod
# def Router(self) -> R: pass
# @property
# @abstractmethod
# def prefix(self) -> str: pass
# @abstractmethod
# def include(self, module, *, prefix: str = '', tags: List[str] = None) -> None:
# """Include a new router object"""
# pass
| 17.565217 | 85 | 0.618812 |
bb5e8d8c2d2e0f2d2871ef9712931947e41d37c5 | 1,297 | py | Python | examples/stream/playtoredis(stream)/Blockchain_to_Redis.py | Ucen-Blockchain/streamplay | 3864881d5960fa8915af197ada4bf1225a6742f9 | [
"MIT"
] | null | null | null | examples/stream/playtoredis(stream)/Blockchain_to_Redis.py | Ucen-Blockchain/streamplay | 3864881d5960fa8915af197ada4bf1225a6742f9 | [
"MIT"
] | 4 | 2019-01-30T09:10:30.000Z | 2019-02-01T08:57:52.000Z | examples/stream/playtoredis(stream)/Blockchain_to_Redis.py | Ucen-Blockchain/streamplay | 3864881d5960fa8915af197ada4bf1225a6742f9 | [
"MIT"
] | null | null | null | import ast
import configparser
import os
import sys
from steem.steem import Steemd
from steem.blockchain import Blockchain
from streamplay.db import redisdb
from streamplay.utils import read_config, silence_stdout
def read_config():
""" assuming all values are set properly, missing data etc can be handled
later """
config = configparser.ConfigParser()
config.read('config.ini')
endpoints = ast.literal_eval(config.get('steem-blockchain', 'endpoints'))
hostname = config.get('redis-server', 'hostname')
portnumber = config.getint('redis-server', 'portnumber')
password = config.get('redis-server', 'password')
return endpoints, hostname, portnumber, password
def connect_to_redis(hostname, portnumber, password=''):
""" get a redisdb instance """
r = redisdb.RedisDB(hostname=hostname,
portnumber=portnumber,
password=password)
""" connect to db """
r.connect_to_db()
return r # this is RedisDB object
if __name__ == "__main__":
endpoints, hostname, portnumber, password = read_config()
r = connect_to_redis(hostname, portnumber)
s = Steemd(nodes=endpoints)
b = Blockchain(steemd_instance=s)
try:
r.pull_and_store_stream(b)
except:
pass
| 28.195652 | 77 | 0.683886 |
c02340feb81b20a8869932c429878797b2caed62 | 958 | py | Python | Udemy/Secao3/aula57.py | rafaelgama/Curso_Python | 908231de9de4a17f5aa829f2671fd88de9261eda | [
"MIT"
] | 1 | 2020-05-07T20:21:15.000Z | 2020-05-07T20:21:15.000Z | Udemy/Secao3/aula57.py | rafaelgama/Curso_Python | 908231de9de4a17f5aa829f2671fd88de9261eda | [
"MIT"
] | null | null | null | Udemy/Secao3/aula57.py | rafaelgama/Curso_Python | 908231de9de4a17f5aa829f2671fd88de9261eda | [
"MIT"
] | null | null | null | # Sistemas de perguntas e respostas com dicionários Python.
perg = {
'Pergunta 1': {
'pergunta':'Quanto é 2+2?',
'respostas': {'a':'1','b':'4','c':'5',},
'resposta_certa':'b',
},
'Pergunta 2': {
'pergunta':'Quanto é 3+2?',
'respostas': {'a':'4','b':'10','c':'6',},
'resposta_certa':'c',
},
}
res_certas = 0
for pk, pv in perg.items():
print(f'{pk}: {pv["pergunta"]}')
print('Respostas: ')
for rk, rv in pv['respostas'].items():
print(f'[{rk}]: {rv}')
rep_usr = input('Sua Resposta: ')
if rep_usr == pv['resposta_certa']:
print('EEEEHHH!!! Você acertou!!')
res_certas += 1
else:
print('IXIIIII!!! Voce Errou!')
print()
qtd_perg = len(perg)
perc_act = res_certas/qtd_perg * 100
print(f'total de perguntas são {qtd_perg}. ')
print(f'Voce acertou {res_certas} respostas.')
print(f'Sua porcentagem de acerto foi de {perc_act:.2f}%.')
| 25.891892 | 59 | 0.553236 |
5affd3850f4784315ddea5be853626b159b0c8f4 | 1,340 | py | Python | business.py | Russian-Dev/TargetFinder2000 | 976dd9dab7cc5319137cfeaac1929438e5ab2636 | [
"Unlicense"
] | null | null | null | business.py | Russian-Dev/TargetFinder2000 | 976dd9dab7cc5319137cfeaac1929438e5ab2636 | [
"Unlicense"
] | null | null | null | business.py | Russian-Dev/TargetFinder2000 | 976dd9dab7cc5319137cfeaac1929438e5ab2636 | [
"Unlicense"
] | null | null | null | import os as o
import json
from urllib.request import urlopen
def osClear():
rOS = p.system()
if rOS in ("Windows", "NT"):
o.system("cls")
else:
o.system("clear")
osClear()
print('____________________________________________________________________')
target = input (" Target IP: ")
print(f'+------------------------------------------------------------------+\n')
apiUrl = "https://ipinfo.io/" + target + "/json"
respone = urlopen(apiUrl)
ZGF0YQ = json.load(respone)
def totalPrint():
data = ZGF0YQ
IP = data['ip']
CITY = data['city']
STATE = data['region']
COUNTRY = data['country']
LATLONG = data['loc']
ORG = data['org']
POSTAL = data['postal']
TZ = data['timezone']
HN = data['hostname']
print( ' ' + IP + ' <--- Internet Protocol (V4) Address \n')
print( ' ' + CITY + ', ' + STATE + ', ' + COUNTRY + ' <--- Region \n')
print( ' ' + LATLONG + ' <--- Latitude & Longitude \n')
print( ' ' + ORG + ' <--- Organization \n')
print( ' ' + POSTAL + ' <--- Postal (or ZIP) Code \n')
print( ' ' + TZ + ' <--- Timezone \n')
print( ' ' + HN + ' <--- Hostname \n')
print('+------------------------------------------------------------------+')
print( '\n' + 'GitHub:' + '\n' + 'https://github.com/russian-dev \n')
totalPrint()
programPause = input("Press any key to continue!..")
| 27.916667 | 80 | 0.518657 |
288a0ec751cbf14713f2e6ac546fcbe9bf991667 | 426 | py | Python | marvin/frontpage/settings.py | programa-stic/marvin-django | 2dfd793f331e18952fc894f5d9cb02f22da6e1ae | [
"BSD-2-Clause"
] | 81 | 2016-02-17T22:48:52.000Z | 2020-12-31T08:57:11.000Z | marvin/frontpage/settings.py | programa-stic/marvin-django | 2dfd793f331e18952fc894f5d9cb02f22da6e1ae | [
"BSD-2-Clause"
] | 1 | 2016-09-08T09:09:31.000Z | 2016-09-08T14:39:50.000Z | marvin/frontpage/settings.py | programa-stic/marvin-django | 2dfd793f331e18952fc894f5d9cb02f22da6e1ae | [
"BSD-2-Clause"
] | 19 | 2016-02-17T23:28:34.000Z | 2022-03-30T18:35:22.000Z | from os import getcwd
root_dir = "/home/foo/marvin-django/marvin"
vuln_analysis_dir = "/home/foo/Marvin-static-Analyzer"
perms_list_file = root_dir + "/frontpage/weka/perms_list_nov2014"
model_file = root_dir + "/frontpage/weka/bayes.model"
root_apk_dir = "/mnt/apks/"
root_git_dir = "/mnt/apks/marvin.git"
gitlab_url = "http://192.168.0.1"
gitlab_token = "********************"
marvin_git_passwd = "********************"
| 28.4 | 65 | 0.676056 |
4dccc10dd794cbdb4b05756ef5aa6f64b1def3fb | 5,423 | py | Python | docs/source/conf.py | mristin/icontract-pathlib-poc | e86ee90b63a9ba484ae9295c8de5c60e94624473 | [
"MIT"
] | null | null | null | docs/source/conf.py | mristin/icontract-pathlib-poc | e86ee90b63a9ba484ae9295c8de5c60e94624473 | [
"MIT"
] | null | null | null | docs/source/conf.py | mristin/icontract-pathlib-poc | e86ee90b63a9ba484ae9295c8de5c60e94624473 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'icontract-pathlib-poc'
copyright = '2018, Marko Ristin'
author = 'Marko Ristin'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.0.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx_autodoc_typehints',
'sphinx_icontract'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'icontract-pathlib-pocdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'icontract-pathlib-poc.tex', 'icontract-pathlib-poc Documentation',
'Marko Ristin', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'icontract-pathlib-poc', 'icontract-pathlib-poc Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'icontract-pathlib-poc', 'icontract-pathlib-poc Documentation',
author, 'icontract-pathlib-poc', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
autoclass_content = 'both' | 30.127778 | 84 | 0.65296 |
92de732c00b0eb35a7451f292bd95167bccf4e28 | 4,585 | py | Python | tools/cache_preds.py | tusharc31/c3dpo_nrsfm | bffe61ddd84eb2aab8d6f18c3534107f616d0fd2 | [
"MIT"
] | 310 | 2019-10-08T00:10:45.000Z | 2022-03-30T07:32:22.000Z | tools/cache_preds.py | tusharc31/c3dpo_nrsfm | bffe61ddd84eb2aab8d6f18c3534107f616d0fd2 | [
"MIT"
] | 17 | 2019-10-29T03:34:34.000Z | 2021-05-24T04:21:44.000Z | tools/cache_preds.py | tusharc31/c3dpo_nrsfm | bffe61ddd84eb2aab8d6f18c3534107f616d0fd2 | [
"MIT"
] | 62 | 2019-10-08T00:32:28.000Z | 2022-02-21T23:54:27.000Z | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from torch._six import container_abcs, string_classes, int_classes
import re
import time
import sys
import torch
from tqdm import tqdm
from tools.utils import has_method, get_net_input
def cache_preds(model, loader, cache_vars=None, stats=None, n_extract=None):
print("caching model predictions: %s" % str(cache_vars))
model.eval()
trainmode = 'test'
t_start = time.time()
cached_preds = []
cache_size = 0. # in GB ... counts only cached tensor sizes
n_batches = len(loader)
if n_extract is not None:
n_batches = n_extract
with tqdm(total=n_batches, file=sys.stdout) as pbar:
for it, batch in enumerate(loader):
last_iter = it == n_batches-1
# move to gpu and cast to Var
net_input = get_net_input(batch)
with torch.no_grad():
preds = model(**net_input)
assert not any(k in preds for k in net_input.keys())
preds.update(net_input) # merge everything into one big dict
if stats is not None:
stats.update(preds, time_start=t_start, stat_set=trainmode)
assert stats.it[trainmode] == it, \
"inconsistent stat iteration number!"
# restrict the variables to cache
if cache_vars is not None:
preds = {k: preds[k] for k in cache_vars if k in preds}
# ... gather and log the size of the cache
preds, preds_size = gather_all(preds)
cache_size += preds_size
cached_preds.append(preds)
pbar.set_postfix(cache_size="%1.2f GB" % cache_size)
pbar.update(1)
if last_iter and n_extract is not None:
break
cached_preds_cat = concatenate_cache(cached_preds)
return cached_preds_cat
def gather_all(preds):
cache_size = 0
for k in preds:
if has_method(preds[k], 'cuda'):
preds[k] = preds[k].data.cpu()
cache_size += preds[k].numpy().nbytes / 1e9
elif type(preds[k]) == dict:
preds[k], size_now = gather_all(preds[k])
cache_size += size_now
return preds, cache_size
# cache concatenation - largely taken from pytorch default_collate()
np_str_obj_array_pattern = re.compile(r'[SaUO]')
error_msg_fmt = "batch must contain tensors, numbers, dicts or lists; found {}"
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
def concatenate_cache(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
return torch.cat(batch, 0, out=out) # the main difference is here
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(error_msg_fmt.format(elem.dtype))
return concatenate_cache([torch.from_numpy(b) for b in batch])
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(batch[0], int_classes):
return torch.tensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], container_abcs.Mapping):
return {key: concatenate_cache([d[key] for d in batch])
for key in batch[0]}
elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'):
return type(batch[0])(*(concatenate_cache(samples)
for samples in zip(*batch)))
elif isinstance(batch[0], container_abcs.Sequence): # also some diffs here
# just unpack
return [s_ for s in batch for s_ in s]
raise TypeError((error_msg_fmt.format(type(batch[0]))))
| 33.224638 | 79 | 0.631843 |
029e6027503b37145cabdc7f249c382bd5c5d406 | 2,941 | py | Python | utils.py | caiomarinhodev/base_django | 90eae151d4fb45926ad00d4956a7b17ba009fc00 | [
"MIT"
] | null | null | null | utils.py | caiomarinhodev/base_django | 90eae151d4fb45926ad00d4956a7b17ba009fc00 | [
"MIT"
] | 19 | 2018-03-06T13:14:01.000Z | 2019-05-29T01:14:38.000Z | utils.py | caiomarinhodev/base_django | 90eae151d4fb45926ad00d4956a7b17ba009fc00 | [
"MIT"
] | 2 | 2020-11-19T00:54:20.000Z | 2021-02-17T05:43:26.000Z | from django.db.models import (
CharField,
TextField,
IntegerField,
FloatField,
EmailField,
ForeignKey,
FileField,
DateTimeField,
DateField,
AutoField,
BooleanField,
ManyToManyField
)
from django.forms.widgets import (
Textarea,
NumberInput,
EmailInput,
Input,
Select,
TextInput,
FileInput,
DateTimeInput,
DateInput,
HiddenInput,
CheckboxInput,
CheckboxSelectMultiple,
)
import random
import string
import csv
def generate_random_string(n):
"""
Generates a random string of length n
:param n: Length of string
:return: Random string
"""
return ''.join(random.choices(string.ascii_lowercase, k=n))
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
"""
CSV reader for UTF-8 documents
:param unicode_csv_data: Data of CSV
:param dialect: Dialect of CSV
:param kwargs: Other args
:return:
"""
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [str(cell, 'utf-8') for cell in row]
def utf_8_encoder(unicode_csv_data):
"""
UTF-8 Encoder
:param unicode_csv_data:
:return: Generator of UTF-8 encoding
"""
for line in unicode_csv_data:
yield line.encode('utf-8')
def field_to_widget(field):
if type(field) is CharField:
if field.choices:
return Select(attrs={"class": "form-control"})
return TextInput(attrs={"class": "form-control", "rows": 1})
if type(field) is TextField:
return Textarea(attrs={"class": "form-control", "rows": 1})
if type(field) is AutoField:
return HiddenInput(attrs={"class": "form-control", "rows": 1})
if type(field) is IntegerField or type(field) is FloatField:
return NumberInput(attrs={"class": "form-control"})
if type(field) is EmailField:
return EmailInput(attrs={"class": "form-control"})
if type(field) is ForeignKey:
return Select(attrs={"class": "form-control"})
if type(field) is ManyToManyField:
return CheckboxSelectMultiple(attrs={"class": ""})
if type(field) is BooleanField:
return CheckboxInput(attrs={"class": "form-control"})
if type(field) is FileField:
return FileInput(attrs={"class": "form-control"})
if type(field) is DateField:
return DateInput(attrs={
"class": "form-control date",
"type": "date"
})
if type(field) is DateTimeField:
return DateTimeInput(attrs={"class": "form-control datetimepicker"})
return Input(attrs={"class": "form-control"})
def generate_bootstrap_widgets_for_all_fields(model):
return {x.name: field_to_widget(x) for x in model._meta.get_fields()}
| 28.278846 | 76 | 0.647399 |
eade42ea8db75ad6fc58d927077ec3448455f241 | 5,268 | py | Python | Simple Wave Analysis/CySmartLogMGMT.py | washingtonxr/OpenAlgorithm | e1898e36dead0c79b23e054587d92f378c5e8b10 | [
"Apache-2.0"
] | null | null | null | Simple Wave Analysis/CySmartLogMGMT.py | washingtonxr/OpenAlgorithm | e1898e36dead0c79b23e054587d92f378c5e8b10 | [
"Apache-2.0"
] | null | null | null | Simple Wave Analysis/CySmartLogMGMT.py | washingtonxr/OpenAlgorithm | e1898e36dead0c79b23e054587d92f378c5e8b10 | [
"Apache-2.0"
] | null | null | null | import os
import os.path
from functools import reduce
Datadir_path = "./data/"
Target_dir_path = "CysmartLogData"
Child_path = "Processed_data/"
Datafile_path = Datadir_path + Target_dir_path + "/"
data_package_eldID = 0
processed_file = ""
def Save_log(argv, argv2, mode = 0):
# print("Save_log = " + argv)
global processed_file
if(mode == 0): # Write data into file.
# print("Info: Write data to file.")
processed_file.write(argv + '\n')
elif(mode == 1): # Check duplicate.
# print("Info: Check duplicate and open file.")
target_file = Datafile_path + Child_path + 'Fixed_' + argv2
# print(target_file)
if os.path.exists(target_file):
# Remove method.
os.remove(target_file)
# os.unlink(target_file)
else:
print("Warning: No such file:%s" % target_file)
# File open.
processed_file = open(target_file, mode='a+', encoding='utf-8')
elif(mode == 2): # File close.
# print("Info: Close file.")
processed_file.close()
def Check_file(argv, argv2):
# print("Check file = " + argv)
global data_package_eldID
# Set a log file.
Save_log(0, argv2, 1)
with open(argv, mode='r', encoding='utf-8') as f2read:
# Read each line by a while process.
line = f2read.readline()
#print(line)
while line:
fix_line_loc = 0
for fix_line in line:
fix_line_loc = fix_line_loc + 1
# Just define fix_line_loc and len(line) manual.
if(fix_line == "[" and fix_line_loc > 100 and len(line) > 300):
#print(fix_line)
#print(fix_line_loc)
#print(line[fix_line_loc:len(line)-2])
# Spilt data for mess RAW data block.
data_line = line[fix_line_loc:len(line)-2]
# print(data_line)
# Get data's ID number.
data_ID = eval('0x' + line[len(line)-12:len(line)-10])
# print("Data ID = " + str(data_ID))
# print(str(data_package_eldID) + "vs" + str(data_ID))
if(data_ID != data_package_eldID):
data_package_eldID = data_ID
# print(str(data_package_eldID) + "vs" + str(data_ID))
# Get number of data exist in FIFO.
data_depth = int('0x' + line[len(line)-9:len(line)-7], 16)
# print("FIFO depth = " + str(data_depth))
# data_part_index = 0
data_raw_axis = 0
data_output = ""
for data_part_index in range(int(data_depth/2)):
# print(data_part_index)
# print(data_line[6*data_part_index + 1] + data_line[6*data_part_index + 2])
data_part_temp = data_line[6*data_part_index + 4] + data_line[6*data_part_index + 5] + \
data_line[6*data_part_index + 1] + data_line[6*data_part_index + 2]
data_part_temp2 = int('0x'+data_part_temp, 16)
if(data_part_temp2 > 16384*2):
data_part_temp2 = 0xffff - data_part_temp2 + 1
data_part_temp2 = -(data_part_temp2)
data_part_temp2 = data_part_temp2*9.8/16384
# print(data_part_temp2)
data_output = data_output + '\t' + str(data_part_temp2)
data_raw_axis = data_raw_axis + 1
if(data_raw_axis >= 3):
data_output = 'Info: ACC:' + str(data_output)
# print(data_output)
Save_log(data_output, argv2, 0)
data_output = ''
data_raw_axis = 0
else:
print("Warning: Data ID (" + str(data_ID) + ") duplicate.")
#print(line)
# Acquire each line.
line = f2read.readline()
# Close log file.
Save_log(0, 0, 2)
def Readfile(argv, argv2):
# print("Read file = " + argv + argv2)
# Check suffix of file.
Prifix_dir = argv[len(argv) - 3:len(argv)]
#print(Prifix_dir)
# Filter suffix file type of 'txt'.
if(Prifix_dir == "txt"):
Check_file(argv, argv2)
else:
print("No valid file type in this folder")
def Data_mgmt():
print("Data file management")
for parent, dirnames, filenames in os.walk(Datafile_path):
for dirname in dirnames:
# print("Directory: " + dirname + ">>>")
for filename in filenames:
# print(filename)
tmp_file_name = os.path.join(parent, filename)
# Check file duplicate or not?
# Check_logduplicate(tmp_file_name)
# print(tmp_file_name)
Readfile(tmp_file_name, filename)
if __name__ == "__main__":
Data_mgmt()
print("The end")
| 41.480315 | 116 | 0.510061 |
e7f8bc13f35b0c18ef7edea991e513feca2b2eb4 | 2,333 | py | Python | examples/kafka/kafka_event_streaming_sink.py | doru1004/rayvens | da89f405586a06b50cc8bb6273d8582400fbca9c | [
"Apache-2.0"
] | 24 | 2021-06-18T21:38:04.000Z | 2022-02-16T19:16:49.000Z | examples/kafka/kafka_event_streaming_sink.py | doru1004/rayvens | da89f405586a06b50cc8bb6273d8582400fbca9c | [
"Apache-2.0"
] | 11 | 2021-06-22T14:36:27.000Z | 2021-12-09T16:33:15.000Z | examples/kafka/kafka_event_streaming_sink.py | doru1004/rayvens | da89f405586a06b50cc8bb6273d8582400fbca9c | [
"Apache-2.0"
] | 5 | 2021-06-18T22:03:55.000Z | 2021-08-02T05:11:46.000Z | #
# Copyright IBM Corporation 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ray
import rayvens
import sys
# Send message to Slack sink using the kafka transport.
# Command line arguments and validation:
if len(sys.argv) < 4:
print(f'usage: {sys.argv[0]} <brokers> <password> <slack_channel>'
'<slack_webhook> <run_mode> OR'
f' {sys.argv[0]} <slack_channel> <slack_webhook> <run_mode>')
sys.exit(1)
# Brokers and run mode:
brokers = None
password = None
slack_channel = sys.argv[1]
slack_webhook = sys.argv[2]
run_mode = sys.argv[3]
if len(sys.argv) == 6:
brokers = sys.argv[1]
password = sys.argv[2]
slack_channel = sys.argv[3]
slack_webhook = sys.argv[4]
run_mode = sys.argv[5]
if run_mode not in ['local', 'mixed', 'operator']:
raise RuntimeError(f'Invalid run mode provided: {run_mode}')
# The Kafka topic used for communication.
topic = "externalTopicSink"
# Initialize ray either on the cluster or locally otherwise.
if run_mode == 'operator':
ray.init(address='auto')
else:
ray.init()
# Start rayvens in operator mode."
rayvens.init(mode=run_mode, transport="kafka")
# Create stream.
stream = rayvens.Stream('slack')
# Event sink config.
sink_config = dict(kind='slack-sink',
channel=slack_channel,
webhook_url=slack_webhook,
kafka_transport_topic=topic,
kafka_transport_partitions=3)
# Add sink to stream.
sink = stream.add_sink(sink_config)
# Sends message to all sinks attached to this stream.
stream << f'Message to Slack sink in run mode {run_mode} and Kafka transport.'
# Disconnect any sources or sinks attached to the stream 2 seconds after
# the stream is idle (i.e. no events were propagated by the stream).
stream.disconnect_all(after_idle_for=2)
| 30.298701 | 78 | 0.705101 |
494994e4708f37e25faf9a190cc33ade49aab08a | 1,996 | py | Python | fortran_da/compute_rmse_wcp.py | lysun0725/MAOOAM | ad0df11876031b8df31bec8171bc4ce5ac2dff9c | [
"MIT"
] | null | null | null | fortran_da/compute_rmse_wcp.py | lysun0725/MAOOAM | ad0df11876031b8df31bec8171bc4ce5ac2dff9c | [
"MIT"
] | null | null | null | fortran_da/compute_rmse_wcp.py | lysun0725/MAOOAM | ad0df11876031b8df31bec8171bc4ce5ac2dff9c | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import sys
ens_num = sys.argv[1]
tw_da = sys.argv[2]
tw_solo = sys.argv[3]
infl = 1.0
wc_file = "Xam_etkf_wcp_%s_%3.1f%s.dat" % (str(ens_num),infl,tw_da)
sp_file = "../../MAOOAM_solo_atm/fortran_da/h%s/Xam_etkf_%s_%3.1f%s.dat" % (tw_solo,str(37),infl,'.1E+00')
sp_file2 = "../../MAOOAM_solo_ocn/fortran_da/h%s/Xam_etkf_%s_%3.1f%s.dat" % (tw_solo,str(37),infl,'.1E+00')
fr_file = "../../MAOOAM_solo_atm/fortran_da/freerun_atm.010.dat"
fr_file2 = "../../MAOOAM_solo_ocn/fortran_da/freerun_ocn.010.dat"
true = np.loadtxt('nature.dat')[1:900002,1:37]
wcda = np.loadtxt(wc_file)[:,1:37]
sprd = np.loadtxt(sp_file)[:,1:21]
sprd2 = np.loadtxt(sp_file2)[0:900001,1:17]
free = np.loadtxt(fr_file)[1:900002,1:21]
free2 = np.loadtxt(fr_file2)[1:900002,21:37]
days = np.arange(1,900002)*0.1/8.9
# atm
err6 = np.linalg.norm(true[:,0:20]-free,axis=1)/np.sqrt(20)
err8 = np.linalg.norm(true[:,0:20]-wcda[:,0:20],axis=1)/np.sqrt(20)
err3 = np.linalg.norm(true[:,0:20]-sprd,axis=1)/np.sqrt(20)
# ocn
err7 = np.linalg.norm(true[:,20:36]-free2,axis=1)/np.sqrt(16)
err9 = np.linalg.norm(true[:,20:36]-wcda[:,20:36],axis=1)/np.sqrt(16)
err5 = np.linalg.norm(true[:,20:36]-sprd2,axis=1)/np.sqrt(16)
plt.figure(1)
plt.plot(days,err6,label = 'freerun')
plt.plot(days,err8,label = 'wcpl ETKF')
plt.plot(days,err3,label = 'uncpld ETKF; h=%s' % tw_solo)
plt.xlabel('days')
plt.ylabel('RMSE')
plt.yscale('log')
plt.xscale('log')
plt.title('ETKF; Atm; Ens_num = %s' % str(ens_num))
plt.legend()
plt.savefig("rmse_etkf_watm_%s_%s_%3.1f%s.png" % (tw_solo,str(ens_num),infl,tw_da))
plt.figure(2)
plt.plot(days,err7,label = 'freerun')
plt.plot(days,err9,label = 'wcpl ETKF')
plt.plot(days,err5,label = 'uncpld ETKF; h=%s' % tw_solo)
plt.xlabel('days')
plt.ylabel('RMSE')
plt.yscale('log')
plt.xscale('log')
plt.title('ETKF; Ocn; Ens_num = %s' % str(ens_num))
plt.legend()
plt.savefig("rmse_etkf_wocn_%s_%s_%3.1f%s.png" % (tw_solo,str(ens_num),infl,tw_da))
#plt.show()
| 32.721311 | 107 | 0.685872 |
9bad4353a2ed13e6ec264338897381d5e2631ee0 | 1,577 | py | Python | pcat2py/class/213a3774-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | pcat2py/class/213a3774-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | pcat2py/class/213a3774-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | #!/usr/bin/python
################################################################################
# 213a3774-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "213a3774-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKLM:\Software\Microsoft\Internet Explorer\Main\FeatureControl\FEATURE_UNC_SAVEDFILECHECK', 'pptview.exe')
# Output Lines
self.output = [r'HKLM:\Software\Microsoft\Internet Explorer\Main\FeatureControl\FEATURE_UNC_SAVEDFILECHECK', ('pptview.exe=' + str(dword))]
if dword == 1:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\Software\Microsoft\Internet Explorer\Main'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Microsoft\Internet Explorer\Main\FeatureControl'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Microsoft\Internet Explorer\Main\FeatureControl\FEATURE_UNC_SAVEDFILECHECK'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\Software\Microsoft\Internet Explorer\Main\FeatureControl\FEATURE_UNC_SAVEDFILECHECK' -name 'pptview.exe' -value 1 -Type DWord")
| 41.5 | 182 | 0.633481 |
7899d1de71c37ff196c2dd25995a5aef19b20ec8 | 13,182 | py | Python | processors/ner_span.py | quyuanhang/BERT-NER-Pytorch | 45d2680d61e9e9c00b91811fef1b994542cb5170 | [
"MIT"
] | null | null | null | processors/ner_span.py | quyuanhang/BERT-NER-Pytorch | 45d2680d61e9e9c00b91811fef1b994542cb5170 | [
"MIT"
] | null | null | null | processors/ner_span.py | quyuanhang/BERT-NER-Pytorch | 45d2680d61e9e9c00b91811fef1b994542cb5170 | [
"MIT"
] | null | null | null | """ Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task. """
import torch
import logging
import os
import copy
import json
from .utils_ner import DataProcessor,get_entities
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, text_a, subject):
self.guid = guid
self.text_a = text_a
self.subject = subject
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class InputFeature(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, input_len, segment_ids, start_ids,end_ids, subjects):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_ids = start_ids
self.input_len = input_len
self.end_ids = end_ids
self.subjects = subjects
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def collate_fn(batch):
"""
batch should be a list of (sequence, target, length) tuples...
Returns a padded tensor of sequences sorted from longest to shortest,
"""
all_input_ids, all_input_mask, all_segment_ids, all_start_ids,all_end_ids,all_lens = map(torch.stack, zip(*batch))
max_len = max(all_lens).item()
all_input_ids = all_input_ids[:, :max_len]
all_input_mask = all_input_mask[:, :max_len]
all_segment_ids = all_segment_ids[:, :max_len]
all_start_ids = all_start_ids[:,:max_len]
all_end_ids = all_end_ids[:, :max_len]
return all_input_ids, all_input_mask, all_segment_ids, all_start_ids,all_end_ids,all_lens
def convert_examples_to_features(examples,label_list,max_seq_length,tokenizer,
cls_token_at_end=False,cls_token="[CLS]",cls_token_segment_id=1,
sep_token="[SEP]",pad_on_left=False,pad_token=0,pad_token_segment_id=0,
sequence_a_segment_id=0,mask_padding_with_zero=True,):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label2id = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
textlist = example.text_a
subjects = example.subject
if isinstance(textlist,list):
textlist = " ".join(textlist)
tokens = tokenizer.tokenize(textlist)
start_ids = [0] * len(tokens)
end_ids = [0] * len(tokens)
subjects_id = []
for subject in subjects:
try:
label = subject[0]
start = subject[1]
end = subject[2]
start_ids[start] = label2id[label]
end_ids[end] = label2id[label]
subjects_id.append((label2id[label], start, end))
except:
logger.info("*** span error ***")
# Account for [CLS] and [SEP] with "- 2".
special_tokens_count = 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
start_ids = start_ids[: (max_seq_length - special_tokens_count)]
end_ids = end_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
start_ids += [0]
end_ids += [0]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
start_ids += [0]
end_ids += [0]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
start_ids = [0]+ start_ids
end_ids = [0]+ end_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
input_len = len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
start_ids = ([0] * padding_length) + start_ids
end_ids = ([0] * padding_length) + end_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
start_ids += ([0] * padding_length)
end_ids += ([0] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(start_ids) == max_seq_length
assert len(end_ids) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("start_ids: %s" % " ".join([str(x) for x in start_ids]))
logger.info("end_ids: %s" % " ".join([str(x) for x in end_ids]))
features.append(InputFeature(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_ids=start_ids,
end_ids=end_ids,
subjects=subjects_id,
input_len=input_len))
return features
class CnerProcessor(DataProcessor):
"""Processor for the chinese ner data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "train.char.bmes")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "dev.char.bmes")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "test.char.bmes")), "test")
def get_labels(self):
"""See base class."""
return ["O", "CONT", "ORG","LOC",'EDU','NAME','PRO','RACE','TITLE']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line['words']
labels = []
for x in line['labels']:
if 'M-' in x:
labels.append(x.replace('M-','I-'))
elif 'E-' in x:
labels.append(x.replace('E-', 'I-'))
else:
labels.append(x)
subject = get_entities(labels,id2label=None,markup='bios')
examples.append(InputExample(guid=guid, text_a=text_a, subject=subject))
return examples
class CluenerProcessor(DataProcessor):
"""Processor for the chinese ner data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["O", "address", "book","company",'game','government','movie','name','organization','position','scene']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['words']
labels = line['labels']
subject = get_entities(labels,id2label=None,markup='bios')
examples.append(InputExample(guid=guid, text_a=text_a, subject=subject))
return examples
class AipfProcessor(DataProcessor):
"""Processor for the chinese ner data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "train.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "dev.txt")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_text(os.path.join(data_dir, "test.txt")), "test")
def get_labels(self):
"""See base class."""
return [
"O",
"其他风险事件主体",
"财务造假事件主体",
"亏损事件主体",
"董监高成员异常事件主体",
"评级恶化事件主体",
"减持事件主体",
"资产异常事件主体",
"破产事件主体",
"停产减产事件主体",
"违约失信事件主体",
"资产查封事件主体",
]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['words']
labels = []
for x in line['labels']:
if 'M-' in x:
labels.append(x.replace('M-','I-'))
elif 'E-' in x:
labels.append(x.replace('E-', 'I-'))
else:
labels.append(x)
subject = get_entities(labels,id2label=None,markup='bios')
examples.append(InputExample(guid=guid, text_a=text_a, subject=subject))
return examples
ner_processors = {
"cner": CnerProcessor,
'cluener':CluenerProcessor,
"aipf2": AipfProcessor
}
| 42.798701 | 119 | 0.573433 |
e23a048950c3dbd50742fe558c3318692ef4b9b4 | 8,100 | py | Python | metrics.py | Lopez6969/chromium-dashboard | b35fb5372f33bfe1992c0ffaf1e723afbb3d9af2 | [
"Apache-2.0"
] | null | null | null | metrics.py | Lopez6969/chromium-dashboard | b35fb5372f33bfe1992c0ffaf1e723afbb3d9af2 | [
"Apache-2.0"
] | null | null | null | metrics.py | Lopez6969/chromium-dashboard | b35fb5372f33bfe1992c0ffaf1e723afbb3d9af2 | [
"Apache-2.0"
] | null | null | null | from __future__ import division
from __future__ import print_function
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'ericbidelman@chromium.org (Eric Bidelman)'
import webapp2
import datetime
import json
import logging
import ramcache
import common
import models
import settings
CACHE_AGE = 86400 # 24hrs
class TimelineHandler(common.JSONHandler):
def make_query(self, bucket_id):
query = self.MODEL_CLASS.all()
query.filter('bucket_id =', bucket_id)
# The switch to new UMA data changed the semantics of the CSS animated
# properties. Since showing the historical data alongside the new data
# does not make sense, filter out everything before the 2017-10-26 switch.
# See https://github.com/GoogleChrome/chromium-dashboard/issues/414
if self.MODEL_CLASS == models.AnimatedProperty:
query.filter('date >=', datetime.datetime(2017, 10, 26))
return query
def get(self):
ramcache.check_for_distributed_invalidation()
try:
bucket_id = int(self.request.get('bucket_id'))
except:
return super(self.MODEL_CLASS, self).get([])
KEY = '%s|%s' % (self.MEMCACHE_KEY, bucket_id)
keys = models.get_chunk_memcache_keys(self.make_query(bucket_id), KEY)
chunk_dict = ramcache.get_multi(keys)
if chunk_dict and len(chunk_dict) == len(keys):
datapoints = models.combine_memcache_chunks(chunk_dict)
else:
query = self.make_query(bucket_id)
query.order('date')
datapoints = query.fetch(None) # All matching results.
# Remove outliers if percentage is not between 0-1.
#datapoints = filter(lambda x: 0 <= x.day_percentage <= 1, datapoints)
chunk_dict = models.set_chunk_memcache_keys(KEY, datapoints)
ramcache.set_multi(chunk_dict, time=CACHE_AGE)
datapoints = self._clean_data(datapoints)
# Metrics json shouldn't be cached by intermediary caches because users
# see different data when logged in. Set Cache-Control: private.
super(TimelineHandler, self).get(datapoints, public=False)
class PopularityTimelineHandler(TimelineHandler):
MEMCACHE_KEY = 'css_pop_timeline'
MODEL_CLASS = models.StableInstance
def get(self):
super(PopularityTimelineHandler, self).get()
class AnimatedTimelineHandler(TimelineHandler):
MEMCACHE_KEY = 'css_animated_timeline'
MODEL_CLASS = models.AnimatedProperty
def get(self):
super(AnimatedTimelineHandler, self).get()
class FeatureObserverTimelineHandler(TimelineHandler):
MEMCACHE_KEY = 'featureob_timeline'
MODEL_CLASS = models.FeatureObserver
def get(self):
super(FeatureObserverTimelineHandler, self).get()
class FeatureHandler(common.JSONHandler):
def __query_metrics_for_properties(self):
datapoints = []
# First, grab a bunch of recent datapoints in a batch.
# That operation is fast and makes most of the iterations
# of the main loop become in-RAM operations.
batch_datapoints_query = self.MODEL_CLASS.all()
batch_datapoints_query.order('-date')
batch_datapoints_list = batch_datapoints_query.fetch(5000)
logging.info('batch query found %r recent datapoints',
len(batch_datapoints_list))
batch_datapoints_dict = {}
for dp in batch_datapoints_list:
if dp.bucket_id not in batch_datapoints_dict:
batch_datapoints_dict[dp.bucket_id] = dp
logging.info('batch query found datapoints for %r buckets',
len(batch_datapoints_dict))
# For every css property, fetch latest day_percentage.
buckets = self.PROPERTY_CLASS.all().fetch(None)
for b in buckets:
if b.bucket_id in batch_datapoints_dict:
datapoints.append(batch_datapoints_dict[b.bucket_id])
else:
query = self.MODEL_CLASS.all()
query.filter('bucket_id =', b.bucket_id)
query.order('-date')
last_result = query.get()
if last_result:
datapoints.append(last_result)
# Sort list by percentage. Highest first.
datapoints.sort(key=lambda x: x.day_percentage, reverse=True)
return datapoints
def get(self):
ramcache.check_for_distributed_invalidation()
# TODO(jrobbins): chunking is unneeded with ramcache, so we can
# simplify this code.
# Memcache doesn't support saving values > 1MB. Break up features into chunks
# and save those to memcache.
if self.MODEL_CLASS == models.FeatureObserver:
keys = models.get_chunk_memcache_keys(
self.PROPERTY_CLASS.all(), self.MEMCACHE_KEY)
logging.info('looking for keys %r' % keys)
properties = ramcache.get_multi(keys)
logging.info('found chunk keys %r' % (properties and properties.keys()))
# TODO(jrobbins): We are at risk of displaying a partial result if
# memcache loses some but not all chunks. We can't estimate the number of
# expected cached items efficiently. To counter that, we refresh
# every 30 minutes via a cron.
if not properties or self.request.get('refresh'):
properties = self.__query_metrics_for_properties()
# Memcache doesn't support saving values > 1MB. Break up list into chunks.
chunk_keys = models.set_chunk_memcache_keys(self.MEMCACHE_KEY, properties)
logging.info('about to store chunks keys %r' % chunk_keys.keys())
ramcache.set_multi(chunk_keys, time=CACHE_AGE)
else:
properties = models.combine_memcache_chunks(properties)
else:
properties = ramcache.get(self.MEMCACHE_KEY)
if properties is None:
properties = self.__query_metrics_for_properties()
ramcache.set(self.MEMCACHE_KEY, properties, time=CACHE_AGE)
properties = self._clean_data(properties)
# Metrics json shouldn't be cached by intermediary caches because users
# see different data when logged in. Set Cache-Control: private.
super(FeatureHandler, self).get(properties, public=False)
class CSSPopularityHandler(FeatureHandler):
MEMCACHE_KEY = 'css_popularity'
MODEL_CLASS = models.StableInstance
PROPERTY_CLASS = models.CssPropertyHistogram
def get(self):
super(CSSPopularityHandler, self).get()
class CSSAnimatedHandler(FeatureHandler):
MEMCACHE_KEY = 'css_animated'
MODEL_CLASS = models.AnimatedProperty
PROPERTY_CLASS = models.CssPropertyHistogram
def get(self):
super(CSSAnimatedHandler, self).get()
class FeatureObserverPopularityHandler(FeatureHandler):
MEMCACHE_KEY = 'featureob_popularity'
MODEL_CLASS = models.FeatureObserver
PROPERTY_CLASS = models.FeatureObserverHistogram
def get(self):
super(FeatureObserverPopularityHandler, self).get()
class FeatureBucketsHandler(common.BaseHandler):
def get(self, type):
if type == 'cssprops':
properties = sorted(
models.CssPropertyHistogram.get_all().iteritems(), key=lambda x:x[1])
else:
properties = sorted(
models.FeatureObserverHistogram.get_all().iteritems(), key=lambda x:x[1])
self.response.headers['Content-Type'] = 'application/json;charset=utf-8'
return self.response.write(json.dumps(properties, separators=(',',':')))
app = webapp2.WSGIApplication([
('/data/timeline/cssanimated', AnimatedTimelineHandler),
('/data/timeline/csspopularity', PopularityTimelineHandler),
('/data/timeline/featurepopularity', FeatureObserverTimelineHandler),
('/data/csspopularity', CSSPopularityHandler),
('/data/cssanimated', CSSAnimatedHandler),
('/data/featurepopularity', FeatureObserverPopularityHandler),
('/data/blink/(.*)', FeatureBucketsHandler),
], debug=settings.DEBUG)
| 34.763948 | 83 | 0.728148 |
ac7b1e06c02b7c3175ff4d2f46959c4cb55924c9 | 1,458 | py | Python | zeppelin-jupyter-interpreter/src/main/resources/grpc/jupyter/kernel_client.py | lfrancke/zeppelin | 4fe32f5174f39dc630e08f8f444325e78afe3e1f | [
"Apache-2.0"
] | 1 | 2019-12-20T16:58:08.000Z | 2019-12-20T16:58:08.000Z | zeppelin-jupyter-interpreter/src/main/resources/grpc/jupyter/kernel_client.py | lfrancke/zeppelin | 4fe32f5174f39dc630e08f8f444325e78afe3e1f | [
"Apache-2.0"
] | null | null | null | zeppelin-jupyter-interpreter/src/main/resources/grpc/jupyter/kernel_client.py | lfrancke/zeppelin | 4fe32f5174f39dc630e08f8f444325e78afe3e1f | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
import kernel_pb2
import kernel_pb2_grpc
def run():
channel = grpc.insecure_channel('localhost:50053')
stub = kernel_pb2_grpc.JupyterKernelStub(channel)
response = stub.execute(kernel_pb2.ExecuteRequest(code="import time\nfor i in range(1,4):\n\ttime.sleep(1)\n\tprint(i)\n" +
"%matplotlib inline\nimport matplotlib.pyplot as plt\ndata=[1,1,2,3,4]\nplt.figure()\nplt.plot(data)"))
for r in response:
print("output:" + r.output)
response = stub.execute(kernel_pb2.ExecuteRequest(code="range?"))
for r in response:
print(r)
if __name__ == '__main__':
run()
| 39.405405 | 163 | 0.711248 |
19eee04ce73a133da9668aea353468bc0922378e | 846 | py | Python | tokenizer_hub/__init__.py | Yoctol/tokenizer-hub | 41e8b565d6c2b7b5ab2540da6c68aa19bddaf1fe | [
"MIT"
] | null | null | null | tokenizer_hub/__init__.py | Yoctol/tokenizer-hub | 41e8b565d6c2b7b5ab2540da6c68aa19bddaf1fe | [
"MIT"
] | null | null | null | tokenizer_hub/__init__.py | Yoctol/tokenizer-hub | 41e8b565d6c2b7b5ab2540da6c68aa19bddaf1fe | [
"MIT"
] | null | null | null | from itertools import permutations
int_with_digits = ["_{}int_".format(num) for num in range(1, 13)]
float_with_digits = [
"_{}float{}_".format(pair[0], pair[1]) for pair in list(
permutations(list(range(1, 13)), 2)
) if pair[1] < 5
]
RESERVED_TOKENS = [
"_int_",
"_float_",
"_num_",
] + int_with_digits + float_with_digits
from .purewords_tokenizer import PureWordsTokenizer # noqa
from .parallel_jieba_tokenizer import ParallelJiebaTokenizer # noqa
from .chinese_char_tokenizer import ChineseCharTokenizer # noqa
from .pure_char_tokenizer import PureChineseCharTokenizer # noqa
from .custom_jieba_tokenizer import CustomJiebaTokenizer # noqa
from .nltk_tokenizer import NltkTokenizer # noqa
from .nltk_custom_jieba_tokenizer import NltkCustomJiebaTokenizer # noqa
from .add_words import AddWords # noqa
| 32.538462 | 73 | 0.760047 |
c61a773c379ba647f47a3de51dcb3f7ba5e84567 | 101,244 | py | Python | hi_pipeline.py | AMIGA-IAA/hcg_hi_pipeline | de14c6066b06f055cecc3aa89a615ff8c0fd002e | [
"MIT"
] | 1 | 2020-02-20T08:56:48.000Z | 2020-02-20T08:56:48.000Z | hi_pipeline.py | AMIGA-IAA/hcg_hi_pipeline | de14c6066b06f055cecc3aa89a615ff8c0fd002e | [
"MIT"
] | 5 | 2019-12-11T12:02:22.000Z | 2022-03-29T08:46:05.000Z | hi_pipeline.py | AMIGA-IAA/hcg_hi_pipeline | de14c6066b06f055cecc3aa89a615ff8c0fd002e | [
"MIT"
] | 1 | 2022-03-23T13:11:01.000Z | 2022-03-23T13:11:01.000Z | import time
import numpy
import shutil
import readline
import logging
import ConfigParser
from ast import literal_eval
import glob
import collections
# Read configuration file
def read_config(configfile):
'''
Parses the configuration file of parameters passed when the pipeline is executed.
Input:
configfile = Path to configuration file. (String)
Output:
config_raw = The instance of the parser.
'''
if not os.path.isfile(configfile):
logger.critical('configfile: {} not found'.format(configfile))
sys.exit(-1)
config_raw = ConfigParser.RawConfigParser()
config_raw.read(configfile)
config = config_raw._sections
for key in config.keys():
config[key].pop('__name__')
for key2 in config[key].keys():
try:
config[key][key2] = literal_eval(config[key][key2])
except ValueError:
pass
except SyntaxError:
pass
return config,config_raw
# Utilities
def makedir(pathdir):
'''
Makes new directory.
Input:
pathdir = Path for new directory to create. (String)
'''
try:
os.mkdir(pathdir)
logger.info('Create directory: {}'.format(pathdir))
except:
logger.debug('Cannot create directory: {}'.format(pathdir))
pass
def rmdir(pathdir):
'''
Removes an entire directory.
Input:
pathdir = Path of the directory to be removed. (String)
'''
if os.path.exists(pathdir):
try:
shutil.rmtree(pathdir)
logger.info('Deleted: {0}'.format(pathdir))
except:
logger.debug('Could not delete: {0}'.format(pathdir))
pass
def rmfile(pathdir):
'''
Removes an file.
Input:
pathdir = Path of the file to be removed. (String)
'''
if os.path.exists(pathdir):
try:
os.remove(pathdir)
logger.info('Deleted: {0}'.format(pathdir))
except:
logger.debug('Could not delete: {0}'.format(pathdir))
pass
#User input function
def uinput(prompt, default=''):
'''
Prompts the user to input a string and provides a default.
Input:
prompt = Input prompt. (String)
default = Default input. (String)
Output:
Final string entered by user. (String)
'''
readline.set_startup_hook(lambda: readline.insert_text(default))
try:
return raw_input(prompt)
finally:
readline.set_startup_hook()
# Set up the logger
def get_logger(
LOG_FORMAT = '%(asctime)s | %(levelname)s | %(message)s',
DATE_FORMAT = '%Y-%m-%d %H:%M:%S',
LOG_NAME = 'logger',
LOG_FILE_INFO = 'mylog.log',
LOG_FILE_ERROR = 'errors.log'):
""" Set up a logger with UTC timestamps"""
logger = logging.getLogger(LOG_NAME)
log_formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=DATE_FORMAT)
logging.Formatter.converter = time.gmtime
# comment this to suppress console output
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(log_formatter)
logger.addHandler(stream_handler)
# File mylog.log with all information
file_handler_info = logging.FileHandler(LOG_FILE_INFO, mode='a+')
file_handler_info.setFormatter(log_formatter)
file_handler_info.setLevel(logging.INFO)
logger.addHandler(file_handler_info)
logger.setLevel(logging.INFO)
return logger
def import_data(data_files, msfile):
"""
Import VLA archive files from a location to a single MS.
Input:
data_files = Paths to the VLA archive files. (List/Array of Strings)
msfile = Path where the MS will be created. (String)
"""
logger.info('Starting import vla data')
sum_dir = './summary/'
makedir(sum_dir)
rmdir(msfile)
logger.info('Input files: {}'.format(data_files))
logger.info('Output msfile: {}'.format(msfile))
command = "importvla(archivefiles = {0}, vis = '{1}')".format(data_files, msfile)
logger.info('Executing command: '+command)
exec(command)
logger.info('Completed import vla data')
def listobs_sum(msfile):
"""
Write the listobs summary to file.
Input:
msfile = Path where the MS will be created. (String)
"""
logger.info('Starting listobs summary.')
sum_dir = './summary/'
makedir(sum_dir)
listobs_file = sum_dir+msfile+'.listobs.summary'
rmdir(msfile)
rmfile(listobs_file)
logger.info('Writing listobs summary of data set to: {}'.format(listobs_file))
listobs(vis=msfile, listfile=listobs_file)
logger.info('Completed listobs summary.')
def get_obsfreq(msfile):
"""
Returns freq of first and last channels, channel resolution and number of channels (first spw) in GHz.
Input:
msfile = Path to the MS. (String)
Output:
freq_ini = Start frequency. (Float)
freq_end = Final frequency. (Float)
chan_res = Channel width. (Float)
nchan = Number of channels. (Integer)
"""
msmd.open(msfile)
nspw = msmd.nspw()
freq_ini = msmd.chanfreqs(0)[0]/1e9
freq_end = msmd.chanfreqs(nspw-1)[-1]/1e9
chan_res = msmd.chanwidths(0)[0]/1e9
nchan = len(msmd.chanwidths(0))
msmd.done()
return freq_ini, freq_end, chan_res, nchan
def find_mssources(msfile):
"""
Extract source names from msfile metadata.
Output format is a comma-separated string.
Input:
msfile = Path to the MS. (String)
Output:
mssources = All the fields observed in the MS separated by ','. (String)
"""
msmd.open(msfile)
mssources = ','.join(np.sort(msmd.fieldnames()))
msmd.done()
logger.info('Sources in MS {0}: {1}'.format(msfile, mssources))
return mssources
def get_project(msfile):
"""
Extract project code from msfile metadata.
Input:
msfile = Path to the MS. (String)
Output:
Project identifier. (String)
"""
tb.open(msfile+'/OBSERVATION')
project = tb.getcol('PROJECT')
tb.close()
return project[0]
def get_msinfo(msfile):
"""
Extracts and prints basic information from the measurement set.
Input:
msfile = Path to the MS. (String)
Output:
msinfo = Summary of the the observations. (Ordered dictionary)
"""
logger.info('Reading ms file information for MS: {0}'.format(msfile))
msinfo = collections.OrderedDict()
msinfo['msfile'] = msfile
msinfo['project'] = get_project(msfile)
msinfo['mssources'] = find_mssources(msfile)
freq_ini, freq_end, chan_res, nchan = get_obsfreq(msfile)
msinfo['freq_ini'] = freq_ini
msinfo['freq_end'] = freq_end
msinfo['chan_res'] = chan_res
msinfo['nchan'] = nchan
msinfo['num_spw'] = len(vishead(msfile, mode = 'list', listitems = ['spw_name'])['spw_name'][0])
# Print summary
logger.info('> Sources ({0}): {1}'.format(len(msinfo['mssources'].split(',')),
msinfo['mssources']))
logger.info('> Number of spw: {0}'.format(msinfo['num_spw']))
logger.info('> Channels per spw: {0}'.format(msinfo['nchan']))
return msinfo
# Plotting
def plot_elevation(msfile,config):
"""
Plots the elevation of the fields in each SPW as a function of time.
Input:
msfile = Path to the MS. (String)
config = The parameters read from the configuration file. (Ordered dictionary)
"""
logger.info('Starting plotting elevation.')
plots_obs_dir = './plots/'
makedir(plots_obs_dir)
plot_file = plots_obs_dir+'{0}_elevation.png'.format(msfile)
logger.info('Plotting elevation to: {}'.format(plot_file))
elev = config['plot_elevation']
avgtime = elev['avgtime']
correlation = elev['correlation']
width = elev['width']
min_elev = elev['min_elev']
max_elev = elev['max_elev']
showgui = False
plotms(vis=msfile, xaxis='time', yaxis='elevation',
correlation=correlation, coloraxis = 'field',
symbolsize=5, plotrange=[-1,-1, min_elev, max_elev],
averagedata=True, avgtime=str(avgtime), plotfile = plot_file,
expformat = 'png', customsymbol = True, symbolshape = 'circle',
overwrite=True, showlegend=False, showgui=showgui,
exprange='all', iteraxis='spw')
logger.info('Completed plotting elevation.')
def plot_ants(msfile):
"""
Plots the layout of the antennae during the observations
Input:
msfile = Path to the MS. (String)
"""
logger.info('Starting plotting antenna positions.')
plots_obs_dir = './plots/'
makedir(plots_obs_dir)
plot_file = plots_obs_dir+'{0}_antpos.png'.format(msfile)
logger.info('Plotting antenna positions to: {}'.format(plot_file))
plotants(vis=msfile,figfile=plot_file)
logger.info('Completed plotting antenna positions.')
def manual_flags():
"""
Apply manual flags from the file 'manual_flags.py'.
"""
logger.info('Starting manual flagging.')
if interactive:
print("\nManual flags from 'manual_flags.py' are about to be applied.")
print("It is strongly recommended that you inspect the data and modify (and save) 'manual_flags.py' appropriately before proceeding.\n")
resp = str(raw_input('Do you want to proceed (y/n): '))
while resp.lower() not in ['yes','ye','y']:
resp = str(raw_input('Do you want to proceed (y/n): '))
logger.info('Applying flags from manual_flags.py')
try:
flag_file = open('manual_flags.py', 'r')
lines = flag_file.readlines()
if lines == []:
logger.warning("The file is empty. Continuing without manual flagging.")
else:
for command in lines:
logger.info('Executing command: '+command)
exec(command)
logger.info('Completed manual flagging.')
flag_file.close()
except IOError:
logger.warning("'manual_flags.py' does not exist. Continuing without manual flagging.")
def base_flags(msfile, config):
"""
Sets basic initial data flags.
Input:
msfile = Path to the MS. (String)
config = The parameters read from the configuration file. (Ordered dictionary)
"""
logger.info('Starting basic flagging.')
flag = config['flagging']
tol = flag['shadow_tol']
quack_int = flag['quack_int']
logger.info('Flagging antennae with more than {} m of shadowing.'.format(tol))
command = "flagdata(vis='{0}', mode='shadow', tolerance={1}, flagbackup=False)".format(msfile,tol)
logger.info('Executing command: '+command)
exec(command)
logger.info('Flagging zero amplitude data.')
command = "flagdata(vis='{}', mode='clip', clipzeros=True, flagbackup=False)".format(msfile)
logger.info('Executing command: '+command)
exec(command)
logger.info('Flagging first {} s of every scan.'.format(quack_int))
command = "flagdata(vis='{0}', mode='quack', quackinterval={1}, quackmode='beg', flagbackup=False)".format(msfile,quack_int)
logger.info('Executing command: '+command)
exec(command)
logger.info('Completed basic flagging.')
def tfcrop(msfile,config):
"""
Runs CASA's TFcrop flagging algorithm.
Input:
msfile = Path to the MS. (String)
config = The parameters read from the configuration file. (Ordered dictionary)
"""
flag = config['flagging']
logger.info('Starting running TFCrop.')
command = "flagdata(vis='{0}', mode='tfcrop', action='apply', display='', timecutoff={1}, freqcutoff={2}, flagbackup=False)".format(msfile,flag['timecutoff'],flag['freqcutoff'])
logger.info('Executing command: '+command)
exec(command)
logger.info('Completed running TFCrop.')
def rflag(msfile,config):
"""
Runs CASA's rflag flagging algorithm.
Input:
msfile = Path to the MS. (String)
config = The parameters read from the configuration file. (Ordered dictionary)
"""
flag = config['flagging']
thresh = flag['rthresh']
logger.info('Starting running rflag with a threshold of {}.'.format(thresh))
command = "flagdata(vis='{0}', mode='rflag', action='apply', datacolumn='corrected', freqdevscale={1}, timedevscale={1}, display='', flagbackup=False)".format(msfile,thresh)
logger.info('Executing command: '+command)
exec(command)
exec(command)
logger.info('Completed running rflag.')
def extend_flags(msfile):
"""
Extends existing flags.
Input:
msfile = Path to the MS. (String)
"""
flag_version = 'extended'
logger.info('Starting extending existing flags.')
command = "flagdata(vis='{}', mode='extend', spw='', extendpols=True, action='apply', display='', flagbackup=False)".format(msfile)
logger.info('Executing command: '+command)
exec(command)
command = "flagdata(vis='{}', mode='extend', spw='', growtime=75.0, growfreq=90.0, action='apply', display='', flagbackup=False)".format(msfile)
logger.info('Executing command: '+command)
exec(command)
logger.info('Completed extending existing flags.')
def flag_sum(msfile,name):
"""
Writes a summary of the current flags to file.
Input:
msfile = Path to the MS. (String)
name = Root of filename where flags summary will be saved. (String)
"""
sum_dir = './summary/'
makedir(sum_dir)
out_file = sum_dir+'{0}.{1}flags.summary'.format(msfile,name)
logger.info('Starting writing flag summary to: {}.'.format(out_file))
flag_info = flagdata(vis=msfile, mode='summary')
out_file = open(out_file, 'w')
out_file.write('Total flagged data: {:.2%}\n\n'.format(flag_info['flagged']/flag_info['total']))
logger.info('Total flagged data: {:.2%}'.format(flag_info['flagged']/flag_info['total']))
out_file.write('Flagging per spectral window\n')
for spw in flag_info['spw'].keys():
out_file.write('SPW {0}: {1:.2%}\n'.format(spw,flag_info['spw'][spw]['flagged']/flag_info['spw'][spw]['total']))
out_file.write('\nFlagging per field\n')
for field in flag_info['field'].keys():
out_file.write('{0}: {1:.2%}\n'.format(field,flag_info['field'][field]['flagged']/flag_info['field'][field]['total']))
out_file.write('\nFlagging per antenna\n')
for ant in flag_info['antenna'].keys():
out_file.write('{0}: {1:.2%}\n'.format(ant,flag_info['antenna'][ant]['flagged']/flag_info['antenna'][ant]['total']))
out_file.close()
logger.info('Completed writing flag summary.')
def restore_flags(msfile,name):
"""
Restored the flag version corresponding to the named file.
Input:
msfile = Path to the MS. (String)
name = Root of filename for the flag version. (String)
"""
logger.info('Restoring flag version from: {}.'.format(name))
command = "flagmanager(vis='{0}', mode='restore', versionname='{1}')".format(msfile,name)
logger.info('Executing command: '+command)
exec(command)
logger.info('Completed restoring flag version.')
def save_flags(msfile,name):
"""
Save the current flag version as "name".
Input:
msfile = Path to the MS. (String)
name = Root of filename for the flag version. (String)
"""
logger.info('Saving flag version as: {}.'.format(name))
command = "flagmanager(vis='{0}', mode='save', versionname='{1}')".format(msfile,name)
logger.info('Executing command: '+command)
exec(command)
logger.info('Completed saving flag version.')
def rm_flags(msfile,name):
"""
Delete the flag version "name".
Input:
msfile = Path to the MS. (String)
name = Root of filename for the flag version. (String)
"""
logger.info('Removing flag version: {}.'.format(name))
command = "flagmanager(vis='{0}', mode='delete', versionname='{1}')".format(msfile,name)
logger.info('Executing command: '+command)
exec(command)
logger.info('Completed removing flag version.')
def select_refant(msfile,config,config_raw,config_file):
"""
Checks if a reference antenna is set, if it has not been then the user is queried to set it.
Input:
msfile = Path to the MS. (String)
config = The parameters read from the configuration file. (Ordered dictionary)
config_raw = The instance of the parser.
config_file = Path to configuration file. (String)
"""
logger.info('Starting reference antenna selection.')
calib = config['calibration']
tb.open(msfile+'/ANTENNA')
ant_names = tb.getcol('NAME')
tb.close()
if calib['refant'] not in ant_names:
logger.warning('No valid reference antenna set. Requesting user input.')
first = True
print('\n\n\n')
while calib['refant'] not in ant_names:
if not first:
print('\n\nString entered is not a valid antenna name.')
print('Valid antenna names:\n{}\n'.format(ant_names))
calib['refant'] = str(raw_input('Please select a reference antenna by name: '))
first = False
logger.info('Updating config file ({0}) to set reference antenna as {1}.'.format(config_file,calib['refant']))
config_raw.set('calibration','refant',calib['refant'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Completed reference antenna selection.')
else:
logger.info('Reference antenna already set as: {}.'.format(calib['refant']))
def set_fields(msfile,config,config_raw,config_file):
"""
Checks if the field intentions have already been set, if not then the user is queried.
Input:
msfile = Path to the MS. (String)
config = The parameters read from the configuration file. (Ordered dictionary)
config_raw = The instance of the parser.
config_file = Path to configuration file. (String)
"""
logger.info('Starting set field purposes.')
calib = config['calibration']
tb.open(msfile+'/FIELD')
field_names = tb.getcol('NAME')
tb.close()
tb.open('{}/SPECTRAL_WINDOW'.format(msfile))
spw_names = tb.getcol('NAME')
if not config['importdata']['jvla']:
spw_IDs = tb.getcol('DOPPLER_ID')
else:
spw_IDs = tb.getcol('FREQ_GROUP')
nspw = len(spw_IDs)
tb.close()
std_flux_mods = ['3C48_L.im', '3C138_L.im', '3C286_L.im', '3C147_L.im']
std_flux_names = {'0134+329': '3C48_L.im', '0137+331': '3C48_L.im', '3C48': '3C48_L.im', 'J0137+3309': '3C48_L.im',
'0518+165': '3C138_L.im', '0521+166': '3C138_L.im', '3C138': '3C138_L.im', 'J0521+1638': '3C138_L.im',
'1328+307': '3C286_L.im', '1331+305': '3C286_L.im', '3C286': '3C286_L.im', 'J1331+3030': '3C286_L.im',
'0538+498': '3C147_L.im', '0542+498': '3C147_L.im', '3C147': '3C147_L.im', 'J0542+4951': '3C147_L.im'}
change_made = False
if len(calib['targets']) == 0:
if not interactive:
logger.critical('There are no targets listed in the parameters file.')
sys.exit(-1)
else:
logger.warning('No target field(s) set. Requesting user input.')
print('\n\n')
while True:
target = ''
print('Valid field names:\n{}\n'.format(field_names))
target = str(raw_input('Please select a target field by name: '))
if target not in field_names:
print('\n\nString entered is not a valid field name.')
continue
else:
calib['targets'].append(target)
logger.info('{} set as a target field.'.format(target))
resp = ''
while (resp.lower() not in ['yes','ye','y']) and (resp.lower() not in ['no','n']) :
resp = str(raw_input('Do you want to add another target (y/n): '))
if resp.lower() in ['yes','ye','y']:
continue
else:
break
change_made = True
else:
logger.info('Target field(s) already set as: {}.'.format(calib['targets']))
if interactive:
resp = str(raw_input('Do you want to add another target (y/n): '))
while (resp.lower() not in ['yes','ye','y']) and (resp.lower() not in ['no','n']) :
resp = str(raw_input('Do you want to add another target (y/n): '))
if resp.lower() in ['yes','ye','y']:
while True:
target = ''
print('Valid field names:\n{}\n'.format(field_names))
target = str(raw_input('Please select a target field by name: '))
if target not in field_names:
print('\n\nString entered is not a valid field name.')
continue
else:
calib['targets'].append(target)
logger.info('{} set as a target field.'.format(target))
resp = ''
while (resp.lower() not in ['yes','ye','y']) and (resp.lower() not in ['no','n']) :
resp = str(raw_input('Do you want to add another target (y/n): '))
if resp.lower() in ['yes','ye','y']:
continue
else:
break
change_made = True
if len(calib['target_names']) == 0 or len(calib['target_names']) != len(calib['targets']):
if len(calib['target_names']) < len(calib['targets']):
logger.warning('There are more target fields than simple target names. Appending blanks.')
while len(calib['target_names']) < len(calib['targets']):
calib['target_names'].append('')
elif len(calib['target_names']) > len(calib['targets']):
logger.warning('There are more simple target names than target fields.')
logger.info('Current simple target names: {}'.format(calib['target_names']))
logger.warning('The simple target name list will now be truncated to match the number of targets.')
calib['target_names'] = calib['target_names'][:len(calib['targets'])]
change_made = True
if interactive:
print('Current simple target names set as:')
print(calib['target_names'])
print('For the targets:')
print(calib['targets'])
resp = ''
while (resp.lower() not in ['yes','ye','y']) and (resp.lower() not in ['no','n']) :
resp = str(raw_input('Do you want to revise these names (y/n): '))
if resp.lower() in ['yes','ye','y']:
print('Note: Target names should NOT include spaces.')
for i in range(len(calib['target_names'])):
calib['target_names'][i] = uinput('Enter simple name for target {}: '.format(calib['targets'][i]), calib['target_names'][i])
else:
pass
if len(calib['target_names']) != len(calib['targets']):
logger.warning('The number of targets ({0}) and simple names ({1}) do not match.'.format(len(calib['targets']),len(calib['target_names'])))
logger.info('The original field names will be used.')
logger.info('Replacing simple name: {}'.format(calib['target_names']))
logger.info('With original field names: {}'.format(calib['targets']))
calib['target_names'] = calib['targets']
change_made = True
elif numpy.any(numpy.array(calib['target_names'],dtype='str') == ''):
inx = numpy.where(numpy.array(calib['target_names'],dtype='str') == '')[0]
logger.warning('The following target have no simple names set: {}'.format(calib['targets'][inx]))
logger.info('The original field names will be used.')
calib['target_names'][inx] = calib['targets'][inx]
change_made = True
if len(calib['targets']) != nspw:
msmd.open(msfile)
spw_IDs = []
for target in calib['targets']:
spw_IDs.extend(list(msmd.spwsforfield(target)))
spw_IDs = list(set(list(spw_IDs)))
spw_names = msmd.namesforspws(spw_IDs)
nspw = len(spw_IDs)
msmd.close()
flux_cal_names_bad = False
for i in range(len(calib['fluxcal'])):
if calib['fluxcal'][i] not in field_names:
flux_cal_names_bad = True
if not interactive:
logger.critical('Illegal name for flux calibrator: {}'.format(calib['fluxcal'][i]))
sys.exit(-1)
if flux_cal_names_bad or len(calib['fluxcal']) != nspw:
if nspw == 1:
if not interactive:
logger.critical('No valid flux calibrator set.')
sys.exit(-1)
else:
logger.warning('No valid flux calibrator set. Requesting user input.')
if len(calib['fluxcal']) == 0:
calib['fluxcal'].append('')
first = True
while calib['fluxcal'][0] not in field_names:
if not first:
print('\n\nString entered is not a valid field name.')
print('Valid field names:\n{}\n'.format(field_names))
calib['fluxcal'][0] = str(raw_input('Please select a flux calibrator by name: '))
first = False
change_made = True
else:
if not interactive:
logger.critical('The number of flux calibrators does not match the number of spectral windows ({}).'.format(nspw))
logger.info('Flux calibrators: {}'.format(calib['fluxcal']))
sys.exit(-1)
else:
if len(calib['fluxcal']) != nspw:
logger.warning('Incorrect number of flux calibrators set. Requesting user input.')
else:
logger.warning('At least one flux calibrator is incorrect. Please revise the list.')
logger.info('Current calibrators list: {}'.format(calib['fluxcal']))
if len(calib['fluxcal']) > nspw:
logger.warning('Too many flux calibrators set.')
logger.warning('The following will be truncated: {}'.format(calib['fluxcal'][nspw-1:]))
calib['fluxcal'] = calib['fluxcal'][:nspw]
if len(calib['fluxcal']) < nspw:
logger.warning('Too few flux calibrators set.')
for i in range(len(calib['fluxcal']),nspw):
calib['fluxcal'].append('')
i = 0
first = True
print('Valid field names:\n{}\n'.format(field_names))
while i in range(len(calib['fluxcal'])):
if first:
print('SPW {0}: {1}'.format(spw_IDs[i],spw_names[i]))
calib['fluxcal'][i] = uinput('Enter flux calibrator for SPW {}: '.format(spw_IDs[i], default=calib['fluxcal'][i]))
if calib['fluxcal'][i] not in field_names:
print('\n\nString entered is not a valid field name.')
print('Valid field names:\n{}\n'.format(field_names))
first = False
else:
i += 1
first = True
change_made = True
logger.info('Flux calibrators set as: {}.'.format(calib['fluxcal']))
else:
logger.info('Flux calibrator already set as: {}.'.format(calib['fluxcal']))
flux_mod_names_bad = False
for i in range(len(calib['fluxmod'])):
if calib['fluxmod'][i] not in std_flux_mods:
flux_mod_names_bad = True
if not interactive:
logger.error('Non-standard name for flux model: {}'.format(calib['fluxmod'][i]))
if flux_mod_names_bad or len(calib['fluxmod']) != len(calib['fluxcal']):
logger.warning('Flux calibrator models do not match flux calibrators.')
else:
logger.info('Flux models already set as: {}.'.format(calib['fluxmod']))
if len(calib['fluxmod']) == 0:
if not interactive:
logger.warning('There is no flux calibrator model listed in the parameters file.')
flux_mod_names_bad = False
for i in range(len(calib['fluxcal'])):
if calib['fluxcal'][i] in std_flux_names.keys():
calib['fluxmod'].append(std_flux_names[calib['fluxcal'][i]])
else:
flux_mod_names_bad = True
if not interactive:
logger.critical('Some flux calibrator models cannot be automatcially assigned.')
sys.exit(-1)
if not flux_mod_names_bad:
logger.info('Flux models automatically set as: {}.'.format(calib['fluxmod']))
change_made = True
if flux_mod_names_bad or len(calib['fluxmod']) != len(calib['fluxcal']):
if not interactive:
if len(calib['fluxmod']) != len(calib['fluxcal']):
logger.critical('The number of models does not match the number of flux calibrators.')
logger.info('Flux calibrators: {}'.format(calib['fluxcal']))
logger.info('Flux calibrator models: {}'.format(calib['fluxmod']))
sys.exit(-1)
elif calib['man_mod']:
logger.warning('Proceeding with non-standard flux model assumed to be a manual flux scale.')
else:
logger.critical('Non-standard flux models in parameters and not indicated as manual flux scales.')
logger.info('Flux calibrators: {}'.format(calib['fluxcal']))
logger.info('Flux calibrator models: {}'.format(calib['fluxmod']))
sys.exit(-1)
else:
if len(calib['fluxcal']) == 1:
if len(calib['fluxmod']) == 0:
calib['fluxmod'].append('')
logger.warning('No valid flux model set. Requesting user input.')
while calib['fluxmod'][0] not in std_flux_mods:
print('Usual flux calibrator models will be 3C48_L.im, 3C138_L.im, 3C286_L.im, or 3C147_L.im.\n')
calib['fluxmod'][0] = str(raw_input('Please select a flux model name: '))
if calib['fluxmod'][0] not in std_flux_mods:
resp = str(raw_input('The model name provided is not one of the 3 expected options.\nDo you want to proceed with the model {} ?'.format(calib['fluxmod'][0])))
if resp.lower() in ['yes','ye','y']:
resp = ''
while resp.lower() not in ['yes','ye','y'] and resp.lower() not in ['no','n']:
resp = str(raw_input('Is this a manually defined flux model? '))
if resp.lower() in ['yes','ye','y']:
calib['man_mod'] = True
else:
calib['man_mod'] = False
break
else:
continue
else:
if len(calib['fluxmod']) != len(calib['fluxcal']):
logger.warning('Incorrect number of flux models set. Requesting user input.')
else:
logger.warning('At least one flux model is incorrect. Please revise the list.')
logger.info('Current models list: {}'.format(calib['fluxmod']))
if len(calib['fluxmod']) > len(calib['fluxcal']):
logger.warning('Too many flux models set.')
logger.warning('The following will be truncated: {}'.format(calib['fluxmod'][len(calib['fluxcal'])-1:]))
calib['fluxmod'] = calib['fluxmod'][:len(calib['fluxcal'])]
if len(calib['fluxmod']) < len(calib['fluxcal']):
logger.warning('Too few flux models set.')
for i in range(len(calib['fluxmod']),len(calib['fluxcal'])):
calib['fluxmod'].append('')
i = 0
while i in range(len(calib['fluxmod'])):
print('Usual flux calibrator models will be 3C48_L.im, 3C138_L.im, or 3C286_L.im.\n')
calib['fluxmod'][i] = uinput('Enter flux model for calibrator {}: '.format(calib['fluxcal'][i], default=calib['fluxmod'][i]))
if calib['fluxmod'][i] not in std_flux_mods:
resp = str(raw_input('The model name provided is not one of the 3 expected options.\nDo you want to proceed with the model {} ?'.format(calib['fluxmod'][i])))
if resp.lower() in ['yes','ye','y']:
resp = ''
while resp.lower() not in ['yes','ye','y'] and resp.lower() not in ['no','n']:
resp = str(raw_input('Is this a manually defined flux model? '))
if resp.lower() in ['yes','ye','y']:
calib['man_mod'] = True
else:
calib['man_mod'] = False
i += 1
else:
i += 1
change_made = True
logger.info('Flux models set as: {}.'.format(calib['fluxmod']))
band_cal_names_bad = False
for i in range(len(calib['bandcal'])):
if calib['bandcal'][i] not in field_names:
band_cal_names_bad = True
if not interactive:
logger.critical('Illegal name for bandpass calibrator: {}'.format(calib['bandcal'][i]))
sys.exit(-1)
if band_cal_names_bad or len(calib['bandcal']) != nspw:
if nspw == 1:
if not interactive:
logger.critical('No valid bandpass calibrator set.')
sys.exit(-1)
else:
logger.warning('No valid bandpass calibrator set. Requesting user input.')
if len(calib['bandcal']) == 0:
calib['bandcal'].append('')
first = True
while calib['bandcal'][0] not in field_names:
if not first:
print('\n\nString entered is not a valid field name.')
print('Valid field names:\n{}\n'.format(field_names))
calib['bandcal'][0] = str(raw_input('Please select a bandpass calibrator by name: '))
first = False
change_made = True
else:
if not interactive:
logger.critical('The number of bandpass calibrators does not match the number of spectral windows ({}).'.format(nspw))
logger.info('Bandpass calibrators: {}'.format(calib['bandcal']))
sys.exit(-1)
else:
if len(calib['bandcal']) != nspw:
logger.warning('Incorrect number of bandpass calibrators set. Requesting user input.')
else:
logger.warning('At least one bandpass calibrator is incorrect. Please revise the list.')
logger.info('Current calibrators list: {}'.format(calib['bandcal']))
if len(calib['bandcal']) > nspw:
logger.warning('Too many bandpass calibrators set.')
logger.warning('The following will be truncated: {}'.format(calib['bandcal'][nspw-1:]))
calib['bandcal'] = calib['bandcal'][:nspw]
if len(calib['bandcal']) < nspw:
logger.warning('Too few bandpass calibrators set.')
for i in range(len(calib['bandcal']),nspw):
calib['bandcal'].append('')
i = 0
first = True
print('Valid field names:\n{}\n'.format(field_names))
while i in range(len(calib['bandcal'])):
if first:
print('SPW {0}: {1}'.format(spw_IDs[i],spw_names[i]))
calib['bandcal'][i] = uinput('Enter bandpass calibrator for SPW {}: '.format(spw_IDs[i], default=calib['bandcal'][i]))
if calib['bandcal'][i] not in field_names:
print('\n\nString entered is not a valid field name.')
print('Valid field names:\n{}\n'.format(field_names))
first = False
else:
i += 1
first = True
change_made = True
logger.info('Bandpass calibrators set as: {}.'.format(calib['bandcal']))
else:
logger.info('Bandpass calibrator already set as: {}.'.format(calib['bandcal']))
phase_cal_names_bad = False
for i in range(len(calib['phasecal'])):
if calib['phasecal'][i] not in field_names:
phase_cal_names_bad = True
if not interactive:
logger.critical('Illegal name for phase calibrator: {}'.format(calib['phasecal'][i]))
sys.exit(-1)
if phase_cal_names_bad or len(calib['phasecal']) != len(calib['targets']):
if len(calib['targets']) == 1:
if not interactive:
logger.critical('No valid phase calibrator set.')
sys.exit(-1)
else:
logger.warning('No valid phase calibrator set. Requesting user input.')
if len(calib['phasecal']) == 0:
calib['phasecal'].append('')
first = True
while calib['phasecal'][0] not in field_names:
if not first:
print('\n\nString entered is not a valid field name.')
print('Valid field names:\n{}\n'.format(field_names))
calib['phasecal'][0] = str(raw_input('Please select a phase calibrator by name: '))
first = False
change_made = True
else:
if not interactive:
logger.critical('The number of phase calibrators does not match the number of targets.')
logger.info('Phase calibrators: {}'.format(calib['phasecal']))
logger.info('Targets: {}'.format(calib['targets']))
sys.exit(-1)
else:
if len(calib['phasecal']) != len(calib['targets']):
logger.warning('Incorrect number of phase calibrators set. Requesting user input.')
else:
logger.warning('At least one phase calibrator is incorrect. Please revise the list.')
logger.info('Current calibrators list: {}'.format(calib['phasecal']))
if len(calib['phasecal']) > len(calib['targets']):
logger.warning('Too many phase calibrators set.')
logger.warning('The following will be truncated: {}'.format(calib['phasecal'][len(calib['targets'])-1:]))
calib['phasecal'] = calib['phasecal'][:len(calib['targets'])]
if len(calib['phasecal']) < len(calib['targets']):
logger.warning('Too few phase calibrators set.')
for i in range(len(calib['phasecal']),len(calib['targets'])):
calib['phasecal'].append('')
i = 0
print('Valid field names:\n{}\n'.format(field_names))
while i in range(len(calib['phasecal'])):
calib['phasecal'][i] = uinput('Enter phase calibrator for {}: '.format(calib['targets'][i]), default=calib['phasecal'][i])
if calib['phasecal'][i] not in field_names:
print('\n\nString entered is not a valid field name.')
print('Valid field names:\n{}\n'.format(field_names))
else:
i += 1
change_made = True
logger.info('Phase calibrators set as: {}.'.format(calib['phasecal']))
else:
logger.info('Phase calibrator already set as: {}.'.format(calib['phasecal']))
if change_made:
logger.info('Updating config file to set target and calibrator fields.')
config_raw.set('calibration','fluxcal',calib['fluxcal'])
config_raw.set('calibration','bandcal',calib['bandcal'])
config_raw.set('calibration','phasecal',calib['phasecal'])
config_raw.set('calibration','targets',calib['targets'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
else:
logger.info('No changes made to preset target and calibrator fields.')
logger.info('Completed setting field purposes.')
def calibration(msfile,config):
"""
Runs the basic calibration steps on each SPW based on the intents described in the configuration file.
Applies the calibration to all science target fields.
Input:
msfile = Path to the MS. (String)
config = The parameters read from the configuration file. (Ordered dictionary)
"""
logger.info('Starting calibration.')
plots_obs_dir = './plots/'
makedir(plots_obs_dir)
sum_dir = './summary/'
makedir(sum_dir)
cal_tabs = './cal_tabs/'
makedir(cal_tabs)
calib = config['calibration']
msmd.open(msfile)
spw_IDs = []
for target in calib['targets']:
spw_IDs.extend(list(msmd.spwsforfield(target)))
spw_IDs = list(set(list(spw_IDs)))
spw_names = msmd.namesforspws(spw_IDs)
nspw = len(spw_IDs)
msmd.close()
phase_cals = calib['phasecal']
std_flux_mods = ['3C48_L.im', '3C138_L.im', '3C286_L.im', '3C147_L.im']
if len(calib['targets']) != nspw:
if len(calib['targets']) < nspw:
logger.info('Some targets were observed in multiple SPWs.')
else:
logger.critical('There are more targets than SPWs. The pipeline is not designed for this eventuality.')
sys.exit(-1)
logger.info('Matching phase calibrators to the appropriate SPWs.')
for i in range(nspw):
msmd.open(msfile)
spw_fields = msmd.fieldsforspw(spw_IDs[i], asnames=True)
msmd.close()
cals_in_spw = list(set(spw_fields).intersection(calib['phasecal']))
targets_in_spw = list(set(spw_fields).intersection(calib['targets']))
if len(cals_in_spw) == 0:
logger.critical('No phase calibrator for SPW {}.'.format(spw_IDs[i]))
sys.exit(-1)
if len(targets_in_spw) == 0:
logger.critical('No targets in SPW {}.'.format(spw_IDs[i]))
sys.exit(-1)
if len(targets_in_spw) > 1:
logger.warning('More than one target in SPW {}.'.format(spw_IDs[i]))
inx1 = list(calib['targets']).index(targets_in_spw[0])
inx2 = list(calib['targets']).index(targets_in_spw[1])
if calib['phasecal'][inx1] == calib['phasecal'][inx2]:
logger.info('Both used the same phase calibrator, which should not cause problems.')
else:
logger.warning('Multiple targets with different phase calibrators in SPW {}.'.format(spw_IDs[i]))
sys.exit(-1)
inx = list(calib['targets']).index(targets_in_spw[0])
if calib['phasecal'][inx] in cals_in_spw:
phase_cals.append(calib['phasecal'][inx])
else:
logger.critical('The expected phase calibrator for {0} was not observed in SPW {1}.'.format(targets_in_spw[0],spw_IDs[i]))
sys.exit(-1)
gctab = cal_tabs+'gaincurve.cal'
logger.info('Calibrating gain vs elevation({}).'.format(gctab))
command = "gencal(vis='{0}', caltable='{1}', caltype='gceff')".format(msfile,gctab)
logger.info('Executing command: '+command)
exec(command)
for i in range(nspw):
msmd.open(msfile)
spw_fields = msmd.fieldsforspw(spw_IDs[i], asnames=True)
msmd.close()
logger.info('Beginning calibration of SPW {}.'.format(spw_IDs[i]))
logger.info('Load model for flux calibrator {0} ({1}).'.format(calib['fluxcal'][i],calib['fluxmod'][i]))
if calib['fluxmod'][i] not in std_flux_mods and calib['man_mod']:
command = "setjy(vis='{0}', field='{1}', spw='{2}', scalebychan=True, fluxdensity=[{3},0,0,0], standard='manual')".format(msfile,calib['fluxcal'][i],spw_IDs[i],calib['fluxmod'][i])
logger.info('Executing command: '+command)
exec(command)
elif calib['fluxmod'][i] in std_flux_mods:
command = "setjy(vis='{0}', field='{1}', spw='{2}', scalebychan=True, model='{3}')".format(msfile,calib['fluxcal'][i],spw_IDs[i],calib['fluxmod'][i])
logger.info('Executing command: '+command)
exec(command)
else:
logger.warning('The flux model cannot be recognised. The setjy task will not be run. Fluxes will be incorrect.')
plot_file = plots_obs_dir+'{0}_bpphaseint_spw{1}.png'.format(msfile,spw_IDs[i])
logger.info('Plotting bandpass phase vs. time for reference antenna to: {}'.format(plot_file))
plotms(vis=msfile, plotfile=plot_file, xaxis='channel', yaxis='phase', field=calib['bandcal'][i], spw = str(spw_IDs[i]), correlation='RR,LL', avgtime='1E10', antenna=calib['refant'], coloraxis='antenna2', expformat='png', overwrite=True, showlegend=False, showgui=False)
dltab = cal_tabs+'delays_spw{}.cal'.format(spw_IDs[i])
logger.info('Calibrating delays for bandpass calibrator {0} ({1}).'.format(calib['bandcal'][i],dltab))
command = "gaincal(vis='{0}', field='{1}', spw='{2}', caltable='{3}', refant='{4}', gaintype='K', gaintable=['{5}'])".format(msfile,calib['bandcal'][i],spw_IDs[i],dltab,calib['refant'],gctab)
logger.info('Executing command: '+command)
exec(command)
bptab = cal_tabs+'bpphase_spw{}.gcal'.format(spw_IDs[i])
logger.info('Make bandpass calibrator phase solutions for {0} ({1}).'.format(calib['bandcal'][i],bptab))
command = "gaincal(vis='{0}', field='{1}', spw='{2}', caltable='{3}', refant='{4}', calmode='p', solint='int', combine='', minsnr=2.0, gaintable=['{5}','{6}'])".format(msfile,calib['bandcal'][i],spw_IDs[i],bptab,calib['refant'],gctab,dltab)
logger.info('Executing command: '+command)
exec(command)
plot_file = plots_obs_dir+'{0}_bpphasesol_spw{1}.png'.format(msfile,spw_IDs[i])
logger.info('Plotting bandpass phase solutions to: {}'.format(plot_file))
plotms(vis=bptab, plotfile=plot_file, gridrows=3, gridcols=3, xaxis='time', yaxis='phase',
plotrange=[0,0,-180,180], expformat='png', overwrite=True, showlegend=False, showgui=False, exprange='all',
iteraxis='antenna', spw=str(spw_IDs[i]))
bstab = cal_tabs+'bandpass_spw{}.bcal'.format(spw_IDs[i])
logger.info('Determining bandpass solution ({}).'.format(bstab))
command = "bandpass(vis='{0}', caltable='{1}', field='{2}', spw='{3}', refant='{4}', solint='inf', solnorm=True, gaintable=['{5}', '{6}', '{7}'])".format(msfile,bstab,calib['bandcal'][i],spw_IDs[i],calib['refant'],gctab, dltab, bptab)
logger.info('Executing command: '+command)
exec(command)
plot_file = plots_obs_dir+'{0}_bandpasssol_spw{1}.png'.format(msfile,spw_IDs[i])
logger.info('Plotting bandpass amplitude solutions to: {}'.format(plot_file))
plotms(vis=bstab, plotfile=plot_file, gridrows=3, gridcols=3, xaxis='chan', yaxis='amp',
expformat='png', overwrite=True, showlegend=False, showgui=False, exprange='all',
iteraxis='antenna', coloraxis='corr', spw=str(spw_IDs[i]))
calfields = []
for field in calib['fluxcal']:
if field in spw_fields:
calfields.append(field)
for field in calib['bandcal']:
if field in spw_fields:
calfields.append(field)
for field in calib['phasecal']:
if field in spw_fields:
calfields.append(field)
calfields = list(set(calfields))
calfields = ','.join(calfields)
iptab = cal_tabs+'intphase_spw{}.gcal'.format(spw_IDs[i])
logger.info('Determining integration phase solutions ({}).'.format(iptab))
command = "gaincal(vis='{0}', field='{1}', spw='{2}', caltable='{3}', refant='{4}', calmode='p', solint='int', minsnr=2.0, gaintable=['{5}', '{6}', '{7}'])".format(msfile,calfields,spw_IDs[i],iptab,calib['refant'],gctab, dltab, bstab)
logger.info('Executing command: '+command)
exec(command)
sptab = cal_tabs+'scanphase_spw{}.gcal'.format(spw_IDs[i])
logger.info('Determining scan phase solutions ({}).'.format(sptab))
command = "gaincal(vis='{0}', field='{1}', spw='{2}', caltable='{3}', refant='{4}', calmode='p', solint='inf', minsnr=2.0, gaintable=['{5}', '{6}', '{7}'])".format(msfile,calfields,spw_IDs[i],sptab,calib['refant'],gctab, dltab, bstab)
logger.info('Executing command: '+command)
exec(command)
amtab = cal_tabs+'amp_spw{}.gcal'.format(spw_IDs[i])
logger.info('Determining amplitude solutions ({}).'.format(amtab))
command = "gaincal(vis='{0}', field='{1}', spw='{2}', caltable='{3}', refant='{4}', calmode='ap', solint='inf', minsnr=2.0, gaintable=['{5}', '{6}', '{7}', '{8}'])".format(msfile,calfields,spw_IDs[i],amtab,calib['refant'],gctab, dltab, bstab, iptab)
logger.info('Executing command: '+command)
exec(command)
plot_file = plots_obs_dir+'phasesol_spw{0}.png'.format(spw_IDs[i])
logger.info('Plotting phase solutions to: {}'.format(plot_file))
plotms(vis=amtab, plotfile=plot_file, gridrows=3, gridcols=3, xaxis='time', yaxis='phase',
expformat='png', overwrite=True, showlegend=False, showgui=False, exprange='all',
iteraxis='antenna', coloraxis='corr', plotrange=[-1,-1,-20,20])
plot_file = plots_obs_dir+'ampsol_spw{0}.png'.format(spw_IDs[i])
logger.info('Plotting amplitude solutions to: {}'.format(plot_file))
plotms(vis=amtab, plotfile=plot_file, gridrows=3, gridcols=3, xaxis='time', yaxis='amp',
expformat='png', overwrite=True, showlegend=False, showgui=False, exprange='all',
iteraxis='antenna', coloraxis='corr', plotrange=[-1,-1,0,3])
if not (calib['bandcal'][i] == calib['fluxcal'][i] and calib['bandcal'][i] == phase_cals[i]):
fxtab = cal_tabs+'fluxsol_spw{}.cal'.format(spw_IDs[i])
logger.info('Applying flux scale to calibrators ({}).'.format(fxtab))
command = "fluxscale(vis='{0}', caltable='{1}', fluxtable='{2}', reference='{3}', incremental=True)".format(msfile,amtab,fxtab,calib['fluxcal'][i])
logger.info('Executing command: flux_info = '+command)
exec('flux_info = '+command)
out_file = sum_dir+'{0}.flux.summary'.format(msfile)
logger.info('Writing calibrator fluxes summary to: {}.'.format(out_file))
out_file = open(out_file, 'a+')
out_file.write('Spectral window: {}\n'.format(spw_IDs[i]))
for k in range(len(flux_info.keys())):
if 'spw' in flux_info.keys()[k] or 'freq' in flux_info.keys()[k]:
continue
else:
fieldID = flux_info.keys()[k]
out_file.write('Flux density for {0}: {1} +/- {2} Jy\n'.format(flux_info[fieldID]['fieldName'], flux_info[fieldID][str(spw_IDs[i])]['fluxd'][0], flux_info[fieldID][str(spw_IDs[i])]['fluxdErr'][0]))
out_file.write('\n')
out_file.close()
else:
logger.info('Only one calibrator for bandpass, flux, and phase in SPW {}. No calibrator fluxes added to summary.'.format(spw_IDs[i]))
logger.info('Apply all calibrations to bandpass and flux calibrators in SPW {}.'.format(spw_IDs[i]))
logger.info('Applying clibration to: {}'.format(calib['bandcal'][i]))
if not (calib['bandcal'][i] == calib['fluxcal'][i] and calib['bandcal'][i] == phase_cals[i]):
command = "applycal(vis='{0}', field='{1}', gaintable=['{2}', '{3}', '{4}', '{5}', '{6}', '{7}'], gainfield=['', '{1}', '{1}', '{1}', '{1}', '{1}'], calwt=False)".format(msfile,calib['bandcal'][i],gctab, dltab, bstab, iptab, amtab, fxtab)
logger.info('Executing command: '+command)
exec(command)
if calib['fluxcal'][i] != calib['bandcal'][i]:
logger.info('Applying clibration to: {}'.format(calib['fluxcal'][i]))
command = "applycal(vis='{0}', field='{1}', gaintable=['{2}', '{3}', '{4}', '{5}', '{6}', '{7}'], gainfield=['', '{8}', '{8}', '{1}', '{1}', '{1}'], calwt=False)".format(msfile,calib['fluxcal'][i],gctab, dltab, bstab, iptab, amtab, fxtab,calib['bandcal'][i])
logger.info('Executing command: '+command)
exec(command)
else:
command = "applycal(vis='{0}', field='{1}', gaintable=['{2}', '{3}', '{4}', '{5}', '{6}'], gainfield=['', '{1}', '{1}', '{1}', '{1}'], calwt=False)".format(msfile,calib['bandcal'][i],gctab, dltab, bstab, iptab, amtab)
logger.info('Executing command: '+command)
exec(command)
plot_file = plots_obs_dir+'corr_phase_spw{}.png'.format(spw_IDs[i])
logger.info('Plotting corrected phases for {0} to: {1}'.format(calib['bandcal'][i],plot_file))
plotms(vis=msfile, plotfile=plot_file, field=calib['bandcal'][i], xaxis='channel', yaxis='phase', ydatacolumn='corrected', correlation='RR,LL', avgtime='1E10', antenna=calib['refant'], spw=spw_IDs[i], coloraxis='antenna2', expformat='png', overwrite=True, showlegend=False, showgui=False)
plot_file = plots_obs_dir+'corr_amp_spw{}.png'.format(spw_IDs[i])
logger.info('Plotting corrected amplitudes for {0} to: {1}'.format(calib['bandcal'][i],plot_file))
plotms(vis=msfile, plotfile=plot_file, field=calib['bandcal'][i], xaxis='channel', yaxis='amp', ydatacolumn='corrected', correlation='RR,LL', avgtime='1E10', antenna=calib['refant'], spw=spw_IDs[i], coloraxis='antenna2', expformat='png', overwrite=True, showlegend=False, showgui=False)
for target in calib['targets']:
inx = calib['targets'].index(target)
phasecal = calib['phasecal'][inx]
fluxcal = calib['fluxcal'][inx]
logger.info('Applying clibration to: {0} and {1}'.format(target,phasecal))
msmd.open(msfile)
spws = msmd.spwsforfield(target)
msmd.close()
logger.info('{0} was observed in SPW(s): {1}'.format(target,spws))
for spw in spws:
i = spw_IDs.index(spw)
dltab = cal_tabs+'delays_spw{}.cal'.format(spw_IDs[i])
bptab = cal_tabs+'bpphase_spw{}.gcal'.format(spw_IDs[i])
bstab = cal_tabs+'bandpass_spw{}.bcal'.format(spw_IDs[i])
iptab = cal_tabs+'intphase_spw{}.gcal'.format(spw_IDs[i])
sptab = cal_tabs+'scanphase_spw{}.gcal'.format(spw_IDs[i])
amtab = cal_tabs+'amp_spw{}.gcal'.format(spw_IDs[i])
fxtab = cal_tabs+'fluxsol_spw{}.cal'.format(spw_IDs[i])
logger.info('Apply applying calibrations in SPW: {}'.format(spw))
if phasecal != fluxcal:
logger.info('Applying clibration to: {}'.format(phasecal))
command = "applycal(vis='{0}', field='{1}', gaintable=['{2}', '{3}', '{4}', '{5}', '{6}', '{7}'], gainfield=['', '{8}', '{8}', '{1}', '{1}', '{1}'], calwt=False)".format(msfile,phasecal,gctab, dltab, bstab, iptab, amtab, fxtab,calib['bandcal'][i])
logger.info('Executing command: '+command)
exec(command)
logger.info('Applying clibration to: {}'.format(target))
command = "applycal(vis='{0}', field='{1}', gaintable=['{2}', '{3}', '{4}', '{5}', '{6}', '{7}'], gainfield=['', '{8}', '{8}', '{9}', '{9}', '{9}'], calwt=False)".format(msfile,target,gctab, dltab, bstab, iptab, amtab, fxtab,calib['bandcal'][i],phasecal)
logger.info('Executing command: '+command)
exec(command)
else:
logger.info('Applying clibration to: {}'.format(target))
command = "applycal(vis='{0}', field='{1}', gaintable=['{2}', '{3}', '{4}', '{5}', '{6}'], gainfield=['', '{7}', '{7}', '{8}', '{8}'], calwt=False)".format(msfile,target,gctab, dltab, bstab, iptab, amtab,calib['bandcal'][i],phasecal)
logger.info('Executing command: '+command)
exec(command)
logger.info('Completed calibration.')
def split_fields(msfile,config):
"""
Splits the MS into separate MS for each science target.
Input:
msfile = Path to the MS. (String)
config = The parameters read from the configuration file. (Ordered dictionary)
"""
logger.info('Starting split fields.')
calib = config['calibration']
src_dir = config['global']['src_dir']+'/'
sum_dir = './summary/'
makedir(sum_dir)
makedir('./'+src_dir)
for i in range(len(calib['targets'])):
field = calib['targets'][i]
target_name = calib['target_names'][i]
msmd.open(msfile)
spws = msmd.spwsforfield(field)
msmd.close()
if len(spws) > 1:
logger.info('{0} was observed in multiple SPWs. These will now be combined and the field split into a separate MS.'.format(field))
command = "mstransform(vis='{0}', outputvis='{2}{1}.split', field='{3}', spw='{4}', combinespws=True)".format(msfile,target_name,src_dir,field,','.join(numpy.array(spws,dtype='str')))
logger.info('Executing command: '+command)
exec(command)
else:
logger.info('Splitting {0} into separate file: {1}.'.format(field, field+'.split'))
command = "split(vis='{0}', outputvis='{1}{2}'+'.split', field='{3}')".format(msfile,src_dir,target_name,field)
logger.info('Executing command: '+command)
exec(command)
listobs_file = sum_dir+target_name+'.listobs.summary'
rmfile(listobs_file)
logger.info('Writing listobs summary for split data set to: {}'.format(listobs_file))
listobs(vis=src_dir+target_name+'.split', listfile=listobs_file)
logger.info('Completed split fields.')
def contsub(msfile,config,config_raw,config_file):
"""
Subtracts the continuum from each of the science target MSs.
If the no line free range is set then the user is queried (in interactive mode) and the configuration file updated.
Input:
msfile = Path to the MS. (String)
config = The parameters read from the configuration file. (Ordered dictionary)
config_raw = The instance of the parser.
config_file = Path to configuration file. (String)
"""
logger.info('Starting continuum subtraction.')
contsub = config['continuum_subtraction']
calib = config['calibration']
src_dir = config['global']['src_dir']+'/'
logger.info('Checking for line free channel ranges in parameters.')
reset_ch = False
if len(contsub['linefree_ch']) == 0 or len(contsub['linefree_ch']) != len(calib['target_names']):
reset_ch = True
if len(contsub['linefree_ch']) < len(calib['target_names']):
logger.warning('There are more target fields than channel ranges. Appending blank ranges.')
while len(contsub['linefree_ch']) < len(calib['target_names']):
contsub['linefree_ch'].append('')
elif len(contsub['linefree_ch']) > len(calib['target_names']):
logger.warning('There are more channel ranges than target fields.')
logger.info('Current channel ranges: {}'.format(contsub['linefree_ch']))
logger.warning('The channel range list will now be truncated to match the number of targets.')
contsub['linefree_ch'] = contsub['linefree_ch'][:len(calib['target_names'])]
elif interactive:
print('Current line free channels set as:')
for i in range(len(contsub['linefree_ch'])):
print('{0}: {1}'.format(calib['target_names'][i],contsub['linefree_ch'][i]))
resp = str(raw_input('Do you want revise the line free channels (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_ch = True
if reset_ch:
if not interactive:
logger.critical('The number of line free channel ranges provided does not match the number of targets.')
logger.info('Line free change ranges: {}'.format(contsub['linefree_ch']))
logger.info('Targets: {}'.format(calib['target_names']))
sys.exit(-1)
else:
print('For each target enter the line free channels in the following format:\nspwID1:min_ch1~max_ch1;min_ch2~max_ch2,spwID2:min_ch3~max_ch3 etc.')
for i in range(len(calib['target_names'])):
contsub['linefree_ch'][i] = uinput('Line free channels for {}: '.format(calib['target_names'][i]), contsub['linefree_ch'][i])
logger.info('Setting line free channels for {0} as: {1}.'.format(calib['target_names'][i], contsub['linefree_ch'][i]))
logger.info('Updating config file to set line free channels.')
config_raw.set('continuum_subtraction','linefree_ch',contsub['linefree_ch'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Line free channels set as: {}.'.format(contsub['linefree_ch']))
logger.info('For the targets: {}.'.format(calib['target_names']))
for i in range(len(calib['target_names'])):
target = calib['target_names'][i]
field = calib['targets'][i]
chans = contsub['linefree_ch'][i]
spws = chans.split(',')
for i in range(len(spws)):
spw = spws[i].strip()
spw = spw[0]
spws[i] = spw
logger.info('Subtracting the continuum from field: {}'.format(target))
command = "uvcontsub(vis='{0}{1}'+'.split', field='{2}', fitspw='{3}', spw='{4}', excludechans=False,combine='spw',solint='int', fitorder={5}, want_cont={6})".format(src_dir,target,field,chans,','.join(spws),contsub['fitorder'],contsub['save_cont'])
logger.info('Executing command: '+command)
exec(command)
logger.info('Completed continuum subtraction.')
def plot_spec(config):
"""
For each SPW and each science target amplitude vs channel and amplitude vs velocity are plotted.
Input:
config = The parameters read from the configuration file. (Ordered dictionary)
"""
logger.info('Starting plotting amplitude spectrum.')
plots_obs_dir = './plots/'
makedir(plots_obs_dir)
calib = config['calibration']
targets = calib['target_names']
fields = calib['targets']
src_dir = config['global']['src_dir']+'/'
for i in range(len(targets)):
target = targets[i]
field = fields[i]
msmd.open('{0}{1}.split'.format(src_dir,target))
spws = msmd.spwsforfield('{}'.format(field))
msmd.close()
for spw in spws:
plot_file = plots_obs_dir+'{0}_amp_chn_spw{1}.png'.format(target,spw)
logger.info('Plotting amplitude vs channel to {}'.format(plot_file))
plotms(vis=src_dir+target+'.split', xaxis='chan', yaxis='amp',
ydatacolumn='corrected', spw=str(spw), plotfile=plot_file,
expformat='png', overwrite=True, showgui=False)
plot_file = plots_obs_dir+'{0}_amp_vel_spw{1}.png'.format(target,spw)
logger.info('Plotting amplitude vs velocity to {}'.format(plot_file))
plotms(vis=src_dir+target+'.split', xaxis='velocity', yaxis='amp',
ydatacolumn='corrected', spw=str(spw), plotfile=plot_file,
expformat='png', overwrite=True, showgui=False,
freqframe='BARY', restfreq=str(config['global']['rest_freq']), veldef='OPTICAL')
logger.info('Completed plotting amplitude spectrum.')
def dirty_cont_image(config,config_raw,config_file):
"""
Generates a dirty image of each science target including the continuum emission.
Checks that the pixel size and image size are set (will prompt user if in interactive mode).
Input:
config = The parameters read from the configuration file. (Ordered dictionary)
config_raw = The instance of the parser.
config_file = Path to configuration file. (String)
"""
logger.info('Starting making dirty continuum image.')
calib = config['calibration']
rest_freq = config['global']['rest_freq']
targets = calib['target_names']
cln_param = config['clean']
src_dir = config['global']['src_dir']+'/'
img_dir = config['global']['img_dir']+'/'
makedir('/.'+img_dir)
logger.info('Removing any existing dirty continuum images.')
del_list = glob.glob(img_dir+'*cont.dirty*')
for file_path in del_list:
logger.info('Deleting: '+file_path)
shutil.rmtree(file_path)
logger.info('Checking clean parameters for dirty image (inc. continuum).')
reset_cln = False
if (len(cln_param['pix_size']) == 0) or (len(cln_param['pix_size']) != len(targets)):
if not interactive:
logger.critical('The number of pixel sizes provided does not match the number of targets.')
logger.info('Pixel sizes: {}'.format(cln_param['pix_size']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
reset_cln = True
if len(cln_param['pix_size']) < len(targets):
logger.warning('There are more target fields than pixel sizes. Appending blanks.')
while len(cln_param['pix_size']) < len(targets):
cln_param['pix_size'].append('')
elif len(cln_param['pix_size']) > len(targets):
logger.warning('There are more pixel sizes than target fields.')
logger.info('Current pixel sizes: {}'.format(cln_param['pix_size']))
logger.warning('The pixel size list will now be truncated to match the number of targets.')
cln_param['pix_size'] = cln_param['pix_size'][:len(targets)]
elif interactive:
print('Current pixel sizes set as:')
for i in range(len(cln_param['pix_size'])):
print('{0}: {1}'.format(targets[i],cln_param['pix_size'][i]))
resp = str(raw_input('Do you want revise the pixel sizes (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('For each target enter the desired pixel size:')
for i in range(len(targets)):
cln_param['pix_size'][i] = uinput('Pixel size for {}: '.format(targets[i]), cln_param['pix_size'][i])
logger.info('Setting pixel size for {0} as: {1}.'.format(targets[i], cln_param['pix_size'][i]))
logger.info('Updating config file to set pixel sizes.')
config_raw.set('clean','pix_size',cln_param['pix_size'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Pixel sizes set as: {}.'.format(cln_param['pix_size']))
logger.info('For the targets: {}.'.format(targets))
reset_cln = False
if len(cln_param['im_size']) == 0 or len(cln_param['im_size']) != len(targets):
if not interactive:
logger.critical('The number of image sizes provided does not match the number of targets.')
logger.info('Image sizes: {}'.format(cln_param['im_size']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
reset_cln = True
if len(cln_param['im_size']) < len(targets):
logger.warning('There are more target fields than image sizes. Appending blanks.')
while len(cln_param['im_size']) < len(targets):
cln_param['im_size'].append('')
elif len(cln_param['im_size']) > len(targets):
logger.warning('There are more image sizes than target fields.')
logger.info('Current image sizes: {} pixels.'.format(cln_param['im_size']))
logger.warning('The image size list will now be truncated to match the number of targets.')
cln_param['im_size'] = cln_param['im_size'][:len(targets)]
elif interactive:
print('Current images sizes set as:')
for i in range(len(cln_param['im_size'])):
print('{0}: {1}'.format(targets[i],cln_param['im_size'][i]))
resp = str(raw_input('Do you want revise the image sizes (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('For each target enter the desired image size:')
for i in range(len(targets)):
print('Note: The pixel size for this target was set to: {}'.format(cln_param['pix_size'][i]))
cln_param['im_size'][i] = uinput('Image size for {}: '.format(targets[i]), cln_param['im_size'][i])
logger.info('Setting image size for {0} as: {1} x {2}.'.format(targets[i], cln_param['im_size'][i],cln_param['pix_size'][i]))
logger.info('Updating config file to set image sizes.')
config_raw.set('clean','im_size',cln_param['im_size'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Image sizes set as: {} pixels.'.format(cln_param['im_size']))
logger.info('For the targets: {}.'.format(targets))
for i in range(len(targets)):
target = targets[i]
field = calib['targets'][i]
logger.info('Making dirty image of {} (inc. continuum).'.format(target))
command = "tclean(vis='{0}{1}'+'.split', field='{2}', imagename='{3}{1}'+'.cont.dirty', cell='{4}', imsize=[{5},{5}], specmode='cube', outframe='bary', veltype='radio', restfreq='{6}', gridder='wproject', wprojplanes=128, pblimit=0.1, normtype='flatnoise', deconvolver='hogbom', weighting='briggs', robust={7}, niter=0, interactive=False)".format(src_dir,target,field,img_dir,cln_param['pix_size'][i],cln_param['im_size'][i],rest_freq,cln_param['robust'])
logger.info('Executing command: '+command)
exec(command)
logger.info('Completed making dirty continuum image.')
def dirty_image(config,config_raw,config_file):
"""
Generates a dirty (continuum subtracted) image of each science target.
Checks that the pixel size, image size, and line emission channels are set (will prompt user if in interactive mode).
Input:
config = The parameters read from the configuration file. (Ordered dictionary)
config_raw = The instance of the parser.
config_file = Path to configuration file. (String)
"""
logger.info('Starting making dirty image.')
calib = config['calibration']
contsub = config['continuum_subtraction']
rest_freq = config['global']['rest_freq']
targets = calib['target_names']
cln_param = config['clean']
src_dir = config['global']['src_dir']+'/'
img_dir = config['global']['img_dir']+'/'
makedir('./'+img_dir)
logger.info('Removing any existing dirty images.')
for target in targets:
del_list = glob.glob(img_dir+'{}.dirty*'.format(target))
for file_path in del_list:
logger.info('Deleting: '+file_path)
shutil.rmtree(file_path)
logger.info('Checking clean parameters for dirty image.')
reset_cln = False
if len(cln_param['pix_size']) == 0 or len(cln_param['pix_size']) != len(targets):
if not interactive:
logger.critical('The number of pixel sizes provided does not match the number of targets.')
logger.info('Pixel sizes: {}'.format(cln_param['pix_size']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
reset_cln = True
if len(cln_param['pix_size']) < len(targets):
logger.warning('There are more target fields than pixel sizes. Appending blanks.')
while len(cln_param['pix_size']) < len(targets):
cln_param['pix_size'].append('')
elif len(cln_param['pix_size']) > len(targets):
logger.warning('There are more pixel sizes than target fields.')
logger.info('Current pixel sizes: {}'.format(cln_param['pix_size']))
logger.warning('The pixel size list will now be truncated to match the number of targets.')
cln_param['pix_size'] = cln_param['pix_size'][:len(targets)]
elif interactive:
print('Current pixel sizes set as:')
for i in range(len(cln_param['pix_size'])):
print('{0}: {1}'.format(targets[i],cln_param['pix_size'][i]))
resp = str(raw_input('Do you want revise the pixel sizes (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('For each target enter the desired pixel size:')
for i in range(len(targets)):
cln_param['pix_size'][i] = uinput('Pixel size for {}: '.format(targets[i]), cln_param['pix_size'][i])
logger.info('Setting pixel size for {0} as: {1}.'.format(targets[i], cln_param['pix_size'][i]))
logger.info('Updating config file to set pixel sizes.')
config_raw.set('clean','pix_size',cln_param['pix_size'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Pixel sizes set as: {}.'.format(cln_param['pix_size']))
logger.info('For the targets: {}.'.format(targets))
reset_cln = False
if len(cln_param['im_size']) == 0 or len(cln_param['im_size']) != len(targets):
if not interactive:
logger.critical('The number of image sizes provided does not match the number of targets.')
logger.info('Image sizes: {}'.format(cln_param['im_size']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
reset_cln = True
if len(cln_param['im_size']) < len(targets):
logger.warning('There are more target fields than image sizes. Appending blanks.')
while len(cln_param['im_size']) < len(targets):
cln_param['im_size'].append('')
elif len(cln_param['im_size']) > len(targets):
logger.warning('There are more image sizes than target fields.')
logger.info('Current image sizes: {} pixels.'.format(cln_param['im_size']))
logger.warning('The image size list will now be truncated to match the number of targets.')
cln_param['im_size'] = cln_param['im_size'][:len(targets)]
elif interactive:
print('Current images sizes set as:')
for i in range(len(cln_param['im_size'])):
print('{0}: {1}'.format(targets[i],cln_param['im_size'][i]))
resp = str(raw_input('Do you want revise the image sizes (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('For each target enter the desired image size:')
for i in range(len(targets)):
print('Note: The pixel size for this target was set to: {}'.format(cln_param['pix_size'][i]))
cln_param['im_size'][i] = uinput('Image size for {}: '.format(targets[i]), cln_param['im_size'][i])
logger.info('Setting image size for {0} as: {1} x {2}.'.format(targets[i], cln_param['im_size'][i],cln_param['pix_size'][i]))
logger.info('Updating config file to set image sizes.')
config_raw.set('clean','im_size',cln_param['im_size'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Image sizes set as: {} pixels.'.format(cln_param['im_size']))
logger.info('For the targets: {}.'.format(targets))
reset_cln = False
if len(cln_param['line_ch']) == 0 or len(cln_param['line_ch']) != len(targets):
if not interactive:
logger.critical('The number of line channel ranges provided does not match the number of targets.')
logger.info('Pixel sizes: {}'.format(cln_param['line_ch']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
reset_cln = True
if len(cln_param['line_ch']) < len(targets):
logger.warning('There are more target fields than channel ranges. Appending blank ranges.')
while len(cln_param['line_ch']) < len(targets):
cln_param['line_ch'].append('')
elif len(cln_param['line_ch']) > len(targets):
logger.warning('There are more channel ranges than target fields.')
logger.info('Current channel ranges: {}'.format(cln_param['line_ch']))
logger.warning('The channel range list will now be truncated to match the number of targets.')
cln_param['line_ch'] = cln_param['line_ch'][:len(targets)]
elif interactive:
print('Current image channels set as:')
for i in range(len(cln_param['line_ch'])):
print('{0}: {1}'.format(targets[i],cln_param['line_ch'][i]))
resp = str(raw_input('Do you want revise the channels that will be imaged (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('For each target enter the channels you want to image in the following format:\nspwID:min_ch~max_ch')
for i in range(len(targets)):
print('Note: The continuum channels for this target were set to: {}'.format(contsub['linefree_ch'][i]))
cln_param['line_ch'][i] = uinput('Channels to image for {}: '.format(targets[i]), cln_param['line_ch'][i])
logger.info('Setting image channels for {0} as: {1}.'.format(targets[i], cln_param['line_ch'][i]))
logger.info('Updating config file to set channels to be imaged.')
config_raw.set('clean','line_ch',cln_param['line_ch'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Line emission channels set as: {}.'.format(cln_param['line_ch']))
logger.info('For the targets: {}.'.format(targets))
for i in range(len(targets)):
target = targets[i]
field = calib['targets'][i]
logger.info('Making dirty image of {} (line only).'.format(target))
command = "tclean(vis='{0}{1}'+'.split.contsub', field='{2}', imagename='{3}{1}'+'.dirty', cell='{4}', imsize=[{5},{5}], specmode='cube', outframe='bary', veltype='radio', restfreq='{6}', gridder='wproject', wprojplanes=128, pblimit=0.1, normtype='flatnoise', deconvolver='hogbom', weighting='briggs', robust={7}, restoringbeam='common', niter=0, interactive=False)".format(src_dir,target,field,img_dir,cln_param['pix_size'][i],cln_param['im_size'][i],rest_freq,cln_param['robust'])
logger.info('Executing command: '+command)
exec(command)
logger.info('Completed making dirty image.')
def noise_est(config):
"""
Makes an estimate of the theortically expected noise level for each science target.
Input:
config = The parameters read from the configuration file. (Ordered dictionary)
Output:
noise = Estimate of the theortical noise in Jy/beam. (List of Floats)
"""
logger.info('Starting making noise estimation.')
targets = config['calibration']['target_names']
cln_param = config['clean']
src_dir = config['global']['src_dir']+'/'
noise = []
for target in targets:
msmd.open(src_dir+target+'.split.contsub')
N = msmd.nantennas()
t_int = msmd.effexposuretime()['value']
t_unit = msmd.effexposuretime()['unit']
if t_unit != 's' or 'sec' not in t_unit:
logger.warning('Integration time units are not in seconds. Estimated noise will be incorrect.')
ch_wid = numpy.mean(msmd.chanwidths(0))
#Note: The above line may cause issues if different spectral windows
#have very difference frequency resolutions
corr_eff = cln_param['corr_eff']
SEFD = cln_param['sefd']
N_pol = 2.
noise.append(SEFD/(corr_eff*numpy.sqrt(N_pol*N*(N-1.)*t_int*ch_wid)))
logger.info('Effective integration time for {0}: {1} {2}'.format(target,int(t_int),msmd.effexposuretime()['unit']))
logger.info('Expected rms noise for {0}: {1} Jy/beam'.format(target,SEFD/(corr_eff*numpy.sqrt(N_pol*N*(N-1.)*t_int*ch_wid))))
msmd.close()
logger.info('Completed making noise estimation.')
return noise
def image(config,config_raw,config_file):
"""
Generates a clean (continuum subtracted) image of each science target.
Checks that the CLEANing scales and line emission channels are set (may prompt user if in interactive mode).
Makes varies check on the ratio of pixel size to beam size and the scales and the maximum baseline (may prompt user if in interactive mode).
Exports the final images as fits cubes (after regridding to J2000 if necessary).
Input:
config = The parameters read from the configuration file. (Ordered dictionary)
config_raw = The instance of the parser.
config_file = Path to configuration file. (String)
"""
noises = noise_est(config)
calib = config['calibration']
contsub = config['continuum_subtraction']
rest_freq = config['global']['rest_freq']
targets = calib['target_names']
cln_param = config['clean']
src_dir = config['global']['src_dir']+'/'
img_dir = config['global']['img_dir']+'/'
makedir('./'+img_dir)
logger.info('Removing any existing images.')
for target in targets:
del_list = glob.glob(img_dir+'{}.image'.format(target))
for file_path in del_list:
logger.info('Deleting: '+file_path)
shutil.rmtree(file_path)
del_list = glob.glob(img_dir+'{}.model'.format(target))
for file_path in del_list:
logger.info('Deleting: '+file_path)
shutil.rmtree(file_path)
del_list = glob.glob(img_dir+'{}.pb'.format(target))
for file_path in del_list:
logger.info('Deleting: '+file_path)
shutil.rmtree(file_path)
del_list = glob.glob(img_dir+'{}.psf'.format(target))
for file_path in del_list:
logger.info('Deleting: '+file_path)
shutil.rmtree(file_path)
del_list = glob.glob(img_dir+'{}.residual'.format(target))
for file_path in del_list:
logger.info('Deleting: '+file_path)
shutil.rmtree(file_path)
del_list = glob.glob(img_dir+'{}.sumwt'.format(target))
for file_path in del_list:
logger.info('Deleting: '+file_path)
shutil.rmtree(file_path)
logger.info('Starting generation of clean image(s).')
reset_cln = False
reset_cln = False
if len(cln_param['line_ch']) == 0 or len(cln_param['line_ch']) != len(targets):
if not interactive:
logger.critical('The number of line channel ranges provided does not match the number of targets.')
logger.info('Pixel sizes: {}'.format(cln_param['line_ch']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
reset_cln = True
if len(cln_param['line_ch']) < len(targets):
logger.warning('There are more target fields than channel ranges. Appending blank ranges.')
while len(cln_param['line_ch']) < len(targets):
cln_param['line_ch'].append('')
elif len(cln_param['line_ch']) > len(targets):
logger.warning('There are more channel ranges than target fields.')
logger.info('Current channel ranges: {}'.format(cln_param['line_ch']))
logger.warning('The channel range list will now be truncated to match the number of targets.')
cln_param['line_ch'] = cln_param['line_ch'][:len(targets)]
elif interactive:
print('Current image channels set as:')
for i in range(len(cln_param['line_ch'])):
print('{0}: {1}'.format(targets[i],cln_param['line_ch'][i]))
resp = str(raw_input('Do you want revise the channels that will be imaged (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('For each target enter the channels you want to image in the following format:\nspwID:min_ch~max_ch')
for i in range(len(targets)):
print('Note: The continuum channels for this target were set to: {}'.format(contsub['linefree_ch'][i]))
cln_param['line_ch'][i] = uinput('Channels to image for {}: '.format(targets[i]), cln_param['line_ch'][i])
logger.info('Setting image channels for {0} as: {1}.'.format(targets[i], cln_param['line_ch'][i]))
logger.info('Updating config file to set channels to be imaged.')
config_raw.set('clean','line_ch',cln_param['line_ch'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Line emission channels set as: {}.'.format(cln_param['line_ch']))
logger.info('For the targets: {}.'.format(targets))
if cln_param['multiscale']:
algorithm = 'multiscale'
logger.info('Setting CLEAN algorithm to MS-CLEAN.')
reset_cln = False
if cln_param['scales'] == []:
reset_cln = True
logger.warning('MS-CLEAN scales not set.')
elif 0 not in cln_param['scales']:
logger.warning('MS-CLEAN scales do not include point sources. This is highly recommended.')
if interactive:
resp = str(raw_input('Do you want revise MS-CLEAN scales (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
else:
logger.info('Adding point source to MS-CLEAN scales.')
cln_param['scales'].append(0)
reset_cln = True
if reset_cln:
if interactive:
print('Current scales set to: {} beam diameters.'.format(cln_param['scales']))
cln_param['scales'] = uinput('Enter new scales: ', cln_param['scales'])
logger.info('Setting MS-CLEAN scales as {} beams.'.format(cln_param['scales']))
logger.info('Updating config file to set MS-CLEAN scales.')
config_raw.set('clean','scales',cln_param['scales'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
reset_cln = False
scales = cln_param['scales']
else:
algorithm = 'hogbom'
logger.info('Setting CLEAN algorithm to Hogbom.')
scales = None
for i in range(len(targets)):
target = targets[i]
field = calib['targets'][i]
logger.info('Starting {} image.'.format(target))
reset_cln = False
ia.open(img_dir+target+'.dirty.image')
rest_beam = ia.restoringbeam()
ia.close()
if rest_beam['minor']['unit'] not in cln_param['pix_size'][i]:
logger.error('The pixel size and beam size have diffent units.')
if cln_param['multiscale']:
logger.error('MS-CLEAN scales will likely be incorrect.')
logger.info('Pixel size: {}'.format(cln_param['pix_size'][i]))
logger.info('Beam size units: {}'.format(rest_beam['minor']['unit']))
pix_size = cln_param['pix_size'][i]
pix_size = float(pix_size[:pix_size.find(rest_beam['minor']['unit'])])
if pix_size > 0.2*rest_beam['minor']['value']:
logger.warning('There are fewer than 5 pixels across the beam minor axis. Consider decreasing the pixel size.')
if interactive:
print('Beam dimensions:')
print('Major: {0:.2f} {1}'.format(rest_beam['major']['value'],rest_beam['major']['unit']))
print('Minor: {0:.2f} {1}'.format(rest_beam['minor']['value'],rest_beam['minor']['unit']))
print('Pixel size: {}'.format(cln_param['pix_size']))
resp = str(raw_input('Do you want revise the pixel size (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('Enter the desired pixel size:')
cln_param['pix_size'][i] = uinput('Pixel size for {}: '.format(target), cln_param['pix_size'][i])
logger.info('Setting pixel size for {0} as: {1}.'.format(target, cln_param['pix_size'][i]))
logger.info('Updating config file to set pixel size.')
config_raw.set('clean','pix_size',cln_param['pix_size'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
reset_cln = False
if cln_param['automask_sl'] == '':
cln_param['automask_sl'] == 2.0
logger.warning('Automasking sidelobe threshold not set. Using default value: {}'.format(cln_param['automask_sl']))
if cln_param['automask_ns'] == '':
cln_param['automask_ns'] == 4.25
logger.warning('Automasking noise threshold not set. Using default value: {}'.format(cln_param['automask_ns']))
if cln_param['automask_lns'] == '':
cln_param['automask_lns'] == 1.5
logger.warning('Automasking low noise threshold not set. Using default value: {}'.format(cln_param['automask_lns']))
if cln_param['automask_mbf'] == '':
cln_param['automask_mbf'] == 0.3
logger.warning('Automasking minimum beam fraction not set. Using default value: {}'.format(cln_param['automask_mbf']))
if cln_param['automask_neg'] == '':
cln_param['automask_neg'] == 15.0
logger.warning('Automasking negative threshold not set. Using default value: {}'.format(cln_param['automask_neg']))
logger.info('Automasking parameters set as:')
logger.info('sidelobethreshold = {}'.format(cln_param['automask_sl']))
logger.info('noisethreshold = {}'.format(cln_param['automask_ns']))
logger.info('lownoisethreshold = {}'.format(cln_param['automask_lns']))
logger.info('minbeamfraction = {}'.format(cln_param['automask_mbf']))
logger.info('negativethreshold = {}'.format(cln_param['automask_neg']))
if cln_param['multiscale']:
pix_size = cln_param['pix_size'][i]
pix_size = float(pix_size[:pix_size.find(rest_beam['minor']['unit'])])
pix_per_beam = rest_beam['major']['value']/pix_size
scales = cln_param['scales']
scales = list(numpy.array(numpy.array(scales)*pix_per_beam,dtype='int'))
B_min = au.getBaselineLengths('{0}{1}.split.contsub'.format(src_dir,target), sort=True)[0][1]
msmd.open('{0}{1}.split.contsub'.format(src_dir,target))
spws = msmd.spwsforfield(field)
f_min = None
for spw in spws:
if f_min == None or f_min > min(msmd.chanfreqs(spw=spw,unit='Hz')):
f_min = min(msmd.chanfreqs(spw=spw,unit='Hz'))
msmd.close()
max_scale = 180.*3600.*299792458./(1.2*numpy.pi*f_min*B_min)
logger.info('The maximum recoverable scale for {0} is {1} arcsec.'.format(target,int(max_scale)))
if 'arcsec' not in cln_param['pix_size'][i]:
logger.warning('Pixel size not in arcsec. Maximum scale not checked.')
else:
pix_size = cln_param['pix_size'][i]
pix_size = float(pix_size[:pix_size.find('arcsec')])
if max(scales)*pix_size > max_scale:
logger.warning('Some MS-CLEAN scale(s) is (are) larger than largest recoverable angular scales.')
logger.info('Removing offending scales.')
scales = list(set(numpy.where(numpy.array(scales)*pix_size <= max_scale,scales,0)))
logger.info('CLEANing with scales of {} pixels.'.format(scales))
logger.info('CLEANing {0} to a threshold of {1} Jy.'.format(target,noises[i]*cln_param['thresh']))
if cln_param['automask']:
mask = 'auto-multithresh'
else:
mask = 'pb'
command = "tclean(vis='{0}{1}'+'.split.contsub', field='{2}', spw='{3}', imagename='{4}{1}', cell='{5}', imsize=[{6},{6}], specmode='cube', outframe='bary', veltype='radio', restfreq='{7}', gridder='wproject', wprojplanes=128, pblimit=0.1, normtype='flatnoise', deconvolver='{8}', scales={9}, restoringbeam='common', pbcor=True, weighting='briggs', robust={10}, niter=100000, gain=0.1, threshold='{11}Jy', usemask='{12}', sidelobethreshold={13}, noisethreshold={14}, lownoisethreshold={15}, minbeamfrac={16}, negativethreshold={17}, cyclefactor=2.0,interactive=False)".format(src_dir,target,field,cln_param['line_ch'][i],img_dir,cln_param['pix_size'][i],cln_param['im_size'][i],rest_freq,algorithm,scales,cln_param['robust'],noises[i]*cln_param['thresh'],mask,cln_param['automask_sl'],cln_param['automask_ns'],cln_param['automask_lns'],cln_param['automask_mbf'],cln_param['automask_neg'])
logger.info('Executing command: '+command)
exec(command)
logger.info('CLEANing finished. Image cube saved as {}.'.format(target+'.image'))
ia.open(img_dir+target+'.dirty.image')
coords = ia.coordsys()
coord_chn = False
if 'J2000' not in coords.referencecode()[0]:
coord_chn = True
logger.info('Coordinate system not J2000. Image will be regridded.')
command = "imregrid(imagename='{0}{1}'+'.image', template='J2000', output='{0}{1}'+'.image.J2000', asvelocity=True, interpolation='linear', decimate=10, overwrite=True)".format(img_dir,target)
logger.info('Executing command: '+command)
exec(command)
logger.info('{} regridded in J2000 coordinates.'.format(target+'.image.J2000'))
command = "imregrid(imagename='{0}{1}'+'.image.pbcor', template='J2000', output='{0}{1}'+'.image.pbcor.J2000', asvelocity=True, interpolation='linear', decimate=10, overwrite=True)".format(img_dir,target)
logger.info('Executing command: '+command)
exec(command)
logger.info('{} regridded in J2000 coordinates.'.format(target+'.image.pbcor.J2000'))
coords.done()
ia.close()
fitsname = target+'_HI.fits'
logger.info('Saving image cube as {}'.format(fitsname))
if coord_chn:
imagename = target+'.image.J2000'
else:
imagename = target+'.image'
command = "exportfits(imagename='{0}{1}', fitsimage='{0}{2}', velocity=True,optical=False,overwrite=True,dropstokes=True,stokeslast=True,history=True,dropdeg=True)".format(img_dir,imagename,fitsname)
logger.info('Executing command: '+command)
exec(command)
fitsname = target+'_HI.pbcor.fits'
logger.info('Saving primary beam corrected image cube as {}'.format(fitsname))
if coord_chn:
imagename = target+'.image.pbcor.J2000'
else:
imagename = target+'.image.pbcor'
command = "exportfits(imagename='{0}{1}', fitsimage='{0}{2}', velocity=True,optical=False,overwrite=True,dropstokes=True,stokeslast=True,history=True,dropdeg=True)".format(img_dir,imagename,fitsname)
logger.info('Executing command: '+command)
exec(command)
coord_chn = False
logger.info('Completed generation of clean image(s).')
def cleanup(config):
"""
Deleted non-essential files at the end of the pipeline.
Uses the 'cleanup_level' parameter. The levels are as follows:
1) Calibration and flagging tabled deleted as well as CASA .last files.
2) In addition to 1, the full (not split) MS is deleted along with the dirty images amd non-essential output from tclean.
3) Everything except the final fits cubes and the summary information is deleted.
Input:
config = The parameters read from the configuration file. (Ordered dictionary)
"""
src_dir = config['global']['src_dir']+'/'
img_dir = config['global']['img_dir']+'/'
cln_lvl = config['global']['cleanup_level']
logger.info('Starting level {} cleanup.'.format(cln_lvl))
if cln_lvl >= 1:
logger.info('Deleting CASA .last files.')
del_list = glob.glob('./*.last')
for file_path in del_list:
os.remove(file_path)
logger.info('Deleting calibration tables.')
shutil.rmtree('./cal_tabs')
logger.info('Deleting flag tables.')
shutil.rmtree('./{}.flagversions'.format(msfile))
if cln_lvl >= 2:
logger.info('Deleting full measurement set.')
shutil.rmtree('./{}'.format(msfile))
logger.info('Deleting dirty images.')
del_list = glob.glob(img_dir+'*.dirty.*')
for file_path in del_list:
shutil.rmtree(file_path)
logger.info('Deleting CLEANing masks.')
del_list = glob.glob(img_dir+'*.mask')
for file_path in del_list:
shutil.rmtree(file_path)
logger.info('Deleting CLEAN models.')
del_list = glob.glob(img_dir+'*.model')
for file_path in del_list:
shutil.rmtree(file_path)
logger.info('Deleting primary beam and PSF models.')
del_list = glob.glob(img_dir+'*.pb')
for file_path in del_list:
shutil.rmtree(file_path)
del_list = glob.glob(img_dir+'*.psf')
for file_path in del_list:
shutil.rmtree(file_path)
logger.info('Deleting weighting.')
del_list = glob.glob(img_dir+'*.sumwt')
for file_path in del_list:
shutil.rmtree(file_path)
if cln_lvl >= 3:
logger.info('Deleting split measurement sets.')
shutil.rmtree(src_dir)
logger.info('Deleting CLEAN residuals.')
del_list = glob.glob(img_dir+'*.residual')
for file_path in del_list:
shutil.rmtree(file_path)
logger.info('Deleting image files (except fits).')
del_list = glob.glob(img_dir+'*.image*')
for file_path in del_list:
shutil.rmtree(file_path)
logger.info('Cleanup completed.')
###################### Processing ####################
# Read configuration file with parameters
config_file = sys.argv[-1]
config,config_raw = read_config(config_file)
interactive = config['global']['interactive']
# Set up your logger
logger = get_logger(LOG_FILE_INFO = '{}_log.log'.format(config['global']['project_name']),
LOG_FILE_ERROR = '{}_errors.log'.format(config['global']['project_name'])) # Set up your logger
# Start processing
msfile = '{0}.ms'.format(config['global']['project_name'])
# 1. Import data and write listobs to file
data_path = config['importdata']['data_path']
if not config['importdata']['jvla']:
data_files = glob.glob(os.path.join(data_path, '*'))
import_data(sorted(data_files), msfile)
else:
os.symlink(data_path+msfile,msfile)
listobs_sum(msfile)
msinfo = get_msinfo(msfile)
# 2. Diagnostic plots
plot_elevation(msfile,config)
plot_ants(msfile)
# 3. Apply baisc flags
manual_flags()
base_flags(msfile,config)
tfcrop(msfile,config)
flag_version = 'initial'
rm_flags(msfile,flag_version)
save_flags(msfile,flag_version)
flag_sum(msfile,flag_version)
# 4. Calibration
restore_flags(msfile,'initial')
select_refant(msfile,config,config_raw,config_file)
set_fields(msfile,config,config_raw,config_file)
calibration(msfile,config)
rflag(msfile,config)
flag_version = 'rflag'
rm_flags(msfile,flag_version)
save_flags(msfile,flag_version)
flag_sum(msfile,flag_version)
extend_flags(msfile)
flag_version = 'extended'
rm_flags(msfile,flag_version)
save_flags(msfile,flag_version)
flag_sum(msfile,flag_version)
calibration(msfile,config)
flag_version = 'final'
rm_flags(msfile,flag_version)
save_flags(msfile,flag_version)
flag_sum(msfile,flag_version)
#5. Split, continuum subtract and make dirty image
restore_flags(msfile,'final')
rmdir(config['global']['src_dir'])
split_fields(msfile,config)
dirty_cont_image(config,config_raw,config_file)
plot_spec(config)
contsub(msfile,config,config_raw,config_file)
dirty_image(config,config_raw,config_file)
#6. Clean and regrid (if necessary) image
image(config,config_raw,config_file)
#7. Cleanup
cleanup(config)
| 50.546181 | 896 | 0.601418 |
24029297e02f9993fc89c9ae4a2b404c99d3a086 | 8,671 | py | Python | tests/test_controller.py | itsyosef/READemption | 3e9d950610b025372114fc46219cb43b9ba586e4 | [
"0BSD"
] | 5 | 2020-02-14T14:56:23.000Z | 2021-10-05T09:08:42.000Z | tests/test_controller.py | itsyosef/READemption | 3e9d950610b025372114fc46219cb43b9ba586e4 | [
"0BSD"
] | 22 | 2019-07-16T05:36:53.000Z | 2022-03-28T10:19:29.000Z | tests/test_controller.py | itsyosef/READemption | 3e9d950610b025372114fc46219cb43b9ba586e4 | [
"0BSD"
] | 7 | 2020-04-10T02:48:30.000Z | 2021-11-14T01:25:17.000Z | import os
import sys
import unittest
import shutil
sys.path.append(".")
from reademptionlib.controller import Controller
class ArgMock(object):
project_path = "a_test_project"
min_read_length = 12
segemehl_bin = "segemehl.x"
threads = 1
segemehl_accuracy = 95
segemehl_evalue = 5.0
paired_end = False
processes = 1
check_for_existing_files = False
poly_a_clipping = True
progress = False
split = False
realign = False
crossalign_cleaning_str = None
fastq = False
min_phred_score = None
adapter = None
reverse_complement = False
class TestController(unittest.TestCase):
def setUp(self):
arg_mock = ArgMock()
self.test_project_name = arg_mock.project_path
self.controller = Controller(arg_mock)
self.example_data = ExampleData()
self.maxDiff = None
def tearDown(self):
self._remove_project_folder()
def _generate_input_fasta_files(self):
genome_fh = open(
"%s/%s" % (self.controller._paths.ref_seq_folder, "agenome.fa"), "w"
)
read_fh_1 = open(
"%s/%s" % (self.controller._paths.read_fasta_folder, "libfoo.fa"),
"w",
)
read_fh_2 = open(
"%s/%s" % (self.controller._paths.read_fasta_folder, "libbar.fa"),
"w",
)
genome_fh.write(self.example_data.genome_fasta)
genome_fh.close()
read_fh_1.write(self.example_data.read_fasta_1)
read_fh_1.close()
read_fh_2.write(self.example_data.read_fasta_2)
read_fh_2.close()
def _generate_mapping_files(self):
for file_path, sam_content in zip(
self.controller._paths.read_mapping_result_sam_paths,
[self.example_data.sam_content_1, self.example_data.sam_content_2],
):
mapping_fh = open(file_path, "w")
mapping_fh.write(sam_content)
mapping_fh.close()
def _generate_annotation_files(self):
annotation_fh = open(
"%s/some_annos.gff" % self.controller._paths.annotation_folder, "w"
)
print(self.controller._paths.annotation_folder)
annotation_fh.write(self.example_data.gff_content_1)
annotation_fh.close()
def _remove_project_folder(self):
if os.path.exists(self.test_project_name):
shutil.rmtree(self.test_project_name)
class TestControllerCreateProject(TestController):
def test_create_project(self):
self._version = 0.1
self.controller.create_project(self._version)
self.assertEqual(
set(list(os.listdir(self.test_project_name))),
set(["input", "output"]),
)
self._remove_project_folder()
class TestControllerReadAligning(TestController):
def test_read_aligning(self):
self._version = 0.1
self.controller.create_project(self._version)
self.controller._paths._set_folder_names()
self._generate_input_fasta_files()
self.controller.align_reads()
self._remove_project_folder()
class ExampleData(object):
genome_fasta = """>SL1344 genome sequence
AGAGATTACGTCTGGTTGCAAGAGATCATGACAGGGGGAATTGGTTGAAAATAAATATAT
CGCCAGCAGCACATGAACAAGTTTCGGAATGTGATCAATTTAAAAATTTATTGACTTAGG
CGGGCAGATACTTTAACCAATATAGGAATACAAGACAGACAAATAAAAATGACAGAGTAC
ACAACATCCATGAACCGCATCAGCACCACCACCATTACCACCATCACCATTACCACAGGT
AACGGTGCGGGCTGACGCGTACAGGAAACACAGAAAAAAGCCCGCACCTGAACAGTGCGG
GCTTTTTTTTCGACCAGAGATCACGAGGTAACAACCATGCGAGTGTTGAAGTTCGGCGGT
ACATCAGTGGCAAATGCAGAACGTTTTCTGCGTGTTGCCGATATTCTGGAAAGCAATGCC
AGGCAAGGGCAGGTAGCGACCGTACTTTCCGCCCCCGCGAAAATTACCAACCATCTGGTG
GCAATGATTGAAAAAACTATCGGCGGCCAGGATGCTTTGCCGAATATCAGCGATGCAGAA
CGTATTTTTTCTGACCTGCTCGCAGGACTTGCCAGCGCGCAGCCGGGATTCCCGCTTGCA
CGGTTGAAAATGGTTGTCGAACAAGAATTCGCTCAGATCAAACATGTTCTGCATGGTATC
AGCCTGCTGGGTCAGTGCCCGGATAGCATCAACGCCGCGCTGATTTGCCGTGGCGAAAAA
ATGTCGATCGCGATTATGGCGGGACTTCTGGAGGCGCGTGGGCATCGCGTCACGGTGATC
GATCCGGTAGAAAAATTGCTGGCGGTGGGCCATTACCTTGAATCTACCGTCGATATCGCG
GAATCGACTCGCCGTATCGCCGCCAGCCAGATCCCGGCCGATCACATGATCCTGATGGCG
GGCTTTACCGCCGGTAATGAAAAGGGTGAACTGGTGGTGCTGGGCCGTAATGGTTCCGAC
"""
read_fasta_1 = """>read_01
AACGGTGCGGGCTGACGCGTACAGGAAACACAGAAAAAAGCCCGCACCTGAACAGTGCGG
>read_02
CGGTTGAAAATGGTTGTCGAACAAGAATTCGCTCAGATCAAACATGTTCTGCATGGTATC
>read_03
ATGTCGATCGCGATTATGGCGGGACTTCTGGAGGCGCGTGGGCATCGCGTCACGGTGATC
>read_04
AGGCAAGGGCAGGTAGCGACCGTACTTAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
>read_05
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
>read_06
TTGTCGAACAAGAATTCGCTCAGATCAAAAAAAAAAAAGGGGGTGTAAAAAAAGTGTAAA
>read_07
GTGGGGTGGGTAGAGAGAGAGATTTTTTTGAGAGAGAGAAGGGTTTTTAGAGTAGAGAGG
>read_08
CGCCAGCCAGATCCCGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
>read_09
GGCCATTACCTTGAATCTACCGTCGATATCGCGGAATCGACTCGCCGTATCGAAAAAAAA
>read_10
AAAGGGACTTCTGGAGGCGCGTGGGCATCGCGTCACGGTGAAAAAAAAAAAAAAAAAAAA
"""
read_fasta_2 = """>read_01
TCTGGAGGCGCGTGGGCATCGCGTCACGGTAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
>read_02
GAATCGACTCGCCGTATCGCCGCCAGCCAGATCCCGGCCGATCAGATGATCCTGATGGCG
>read_03
ATGGCGGGACTTCTGGAGGCGCGTGGGCATCGCGTCACGGTGATCAAAAAAAAAAAAAAA
>read_04
GGTCAGTGCCCGGATAGCATCAACGCCGCGCTGATTTGCAAAAAAAAAAAAAAAAAAAAA
>read_05
AAGTTTTTTTGTGAGAGAGAAGTTTTGAGAGAGAGTTAGAGGAAAAAAAAAAAAAAAAAA
>read_06
CGCCAGCAGCACATGAACAAGTTTCGGAATGTGATCAATTTAAAAATTTATTGACTTAGG
>read_07
CGCCAGCAGCACATGAACAAGTTTCGGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
>read_08
ATGAACAAGTTTCGGAATGTGATCAATTTAAAAATTTATTGACTTAGGAAAAAAAAAAAA
>read_09
TGTGATCAATTTAAAAATTTATTGACTTAGGAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
>read_10
GGCCATGACCTTGAATCTACCGTCGATATCGCGGAATCGACTCGCCGTATCGAAAAAAAA
"""
sam_content_1 = """@HD VN:1.0
@SQ SN:SL1344 LN:960
@PG ID:segemehl VN:0.9.4-$Rev: 316 $ ($Date: 2011-08-18 16:37:19 +0200 (Thu, 18 Aug 2011) $)
read_01 0 SL1344 1 255 10M * 0 0 ACAACATCCA * NM:i:0 MD:Z:10 NH:i:1 XA:Z:Q
read_01 0 SL1344 50 255 10M * 0 0 ACAACATCCA * NM:i:0 MD:Z:10 NH:i:1 XA:Z:Q
"""
sam_content_2 = """@HD VN:1.0
@SQ SN:SL1344 LN:960
@PG ID:segemehl VN:0.9.4-$Rev: 316 $ ($Date: 2011-08-18 16:37:19 +0200 (Thu, 18 Aug 2011) $)
read_01 0 SL1344 100 255 10M * 0 0 ACAACATCCA * NM:i:0 MD:Z:10 NH:i:1 XA:Z:Q
read_01 0 SL1344 500 255 10M * 0 0 ACAACATCCA * NM:i:0 MD:Z:10 NH:i:1 XA:Z:Q
"""
gff_content_1 = """##gff-version 3
#!gff-spec-version 1.14
#!source-version NCBI C++ formatter 0.2
##Type DNA SL1344
SL1344 EMBL gene 99 115 . + . ID=SL1344:foo;locus_tag=SL1344_0001
SL1344 EMBL gene 99 115 . - . ID=SL1344:bar;locus_tag=SL1344_0002
SL1344 EMBL gene 110 130 . + . ID=SL1344:samba;locus_tag=SL1344_0003
SL1344 EMBL gene 109 140 . + . ID=SL1344:limbo;locus_tag=SL1344_0004
SL1344 EMBL gene 505 550 . - . ID=SL1344:rumba;locus_tag=SL1344_0005
"""
# Currently not used
gff_content_2 = """##gff-version 3
#!gff-spec-version 1.14
#!source-version NCBI C++ formatter 0.2
##Type DNA SL1344
SL1344 EMBL source 1 4878012 . + . organism=Salmonella enterica subsp. enterica serovar Typhimurium str. SL1344;
SL1344 EMBL gene 169 255 . + . ID=SL1344:thrL;locus_tag=SL1344_0001
SL1344 EMBL CDS 169 252 . + 0 ID=SL1344:thrL:unknown_transcript_1;Parent=SL1344:thrL;locus_tag=SL1344_0001;
SL1344 EMBL start_codon 169 171 . + 0 ID=SL1344:thrL:unknown_transcript_1;Parent=SL1344:thrL;locus_tag=SL1344_0001;
SL1344 EMBL stop_codon 253 255 . + 0 ID=SL1344:thrL:unknown_transcript_1;Parent=SL1344:thrL;locus_tag=SL1344_0001;
SL1344 EMBL gene 337 2799 . + . ID=SL1344:thrA;locus_tag=SL1344_0002
SL1344 EMBL CDS 337 2796 . + 0 ID=SL1344:thrA:unknown_transcript_1;Parent=SL1344:thrA;locus_tag=SL1344_0002;
SL1344 EMBL start_codon 337 339 . + 0 ID=SL1344:thrA:unknown_transcript_1;Parent=SL1344:thrA;locus_tag=SL1344_0002;
SL1344 EMBL stop_codon 2797 2799 . + 0 ID=SL1344:thrA:unknown_transcript_1;Parent=SL1344:thrA;locus_tag=SL1344_0002;
SL1344 EMBL misc_feature 337 2796 . + . ID=SL1344:thrA:unknown_transcript_2;Parent=SL1344:thrA;locus_tag=SL1344_0002;
SL1344 EMBL misc_feature 337 1224 . + . ID=SL1344:thrA:unknown_transcript_3;Parent=SL1344:thrA;locus_tag=SL1344_0002;
SL1344 EMBL misc_feature 349 351 . + . ID=SL1344:thrA:unknown_transcript_4;Parent=SL1344:thrA;locus_tag=SL1344_0002;
"""
overlap_output_1 = """read_01 SL1344 100 109 + 1 SL1344 EMBL gene 99 115 . + . ID=SL1344:foo;locus_tag=SL1344_0001
read_01 SL1344 100 109 + 1 SL1344 EMBL gene 99 115 . - . ID=SL1344:bar;locus_tag=SL1344_0002
read_01 SL1344 100 109 + 1 SL1344 EMBL gene 109 140 . + . ID=SL1344:limbo;locus_tag=SL1344_0004
read_01 SL1344 500 509 + 1 SL1344 EMBL gene 505 550 . - . ID=SL1344:rumba;locus_tag=SL1344_0005
"""
overlap_output_2 = """read_01 SL1344 1 10 + 1 no_overlap
read_01 SL1344 50 59 + 1 no_overlap
"""
if __name__ == "__main__":
unittest.main()
| 38.367257 | 118 | 0.770038 |
3874bda6c5841b3afe3b384f2c7bfdb1d011ff4d | 1,354 | py | Python | aliyun-python-sdk-cr/aliyunsdkcr/request/v20160607/GetNamespaceListRequest.py | LittleJober/aliyun-openapi-python-sdk | f45cfa2248a5c8c47b2cebc1d4d1c2516b94df76 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cr/aliyunsdkcr/request/v20160607/GetNamespaceListRequest.py | LittleJober/aliyun-openapi-python-sdk | f45cfa2248a5c8c47b2cebc1d4d1c2516b94df76 | [
"Apache-2.0"
] | 1 | 2020-05-31T14:51:47.000Z | 2020-05-31T14:51:47.000Z | aliyun-python-sdk-cr/aliyunsdkcr/request/v20160607/GetNamespaceListRequest.py | LittleJober/aliyun-openapi-python-sdk | f45cfa2248a5c8c47b2cebc1d4d1c2516b94df76 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class GetNamespaceListRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'cr', '2016-06-07', 'GetNamespaceList','acr')
self.set_uri_pattern('/namespace')
self.set_method('GET')
def get_Authorize(self):
return self.get_query_params().get('Authorize')
def set_Authorize(self,Authorize):
self.add_query_param('Authorize',Authorize)
def get_Status(self):
return self.get_query_params().get('Status')
def set_Status(self,Status):
self.add_query_param('Status',Status) | 34.717949 | 74 | 0.757755 |
4808121bdf7ca87dc687fb84e66a6c4bdb8c32ee | 4,421 | py | Python | uhd_restpy/testplatform/sessions/ixnetwork/topology/learnedinfo/col_82c9f692cc4dfbaf274869de8a335e5e.py | rfrye-github/ixnetwork_restpy | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | [
"MIT"
] | null | null | null | uhd_restpy/testplatform/sessions/ixnetwork/topology/learnedinfo/col_82c9f692cc4dfbaf274869de8a335e5e.py | rfrye-github/ixnetwork_restpy | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | [
"MIT"
] | null | null | null | uhd_restpy/testplatform/sessions/ixnetwork/topology/learnedinfo/col_82c9f692cc4dfbaf274869de8a335e5e.py | rfrye-github/ixnetwork_restpy | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | [
"MIT"
] | null | null | null | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class Col(Base):
"""DEPRECATED A column view of learned information.
The Col class encapsulates a list of col resources that are managed by the system.
A list of resources can be retrieved from the server using the Col.find() method.
"""
__slots__ = ()
_SDM_NAME = 'col'
_SDM_ATT_MAP = {
'Value': 'value',
}
def __init__(self, parent):
super(Col, self).__init__(parent)
@property
def CellTable(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.celltable_bef6632b895c626cc7174eb89a76162c.CellTable): An instance of the CellTable class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.celltable_bef6632b895c626cc7174eb89a76162c import CellTable
return CellTable(self)
@property
def Row(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.row_dbafc34e8c4bf46a4ac7b647400c39d3.Row): An instance of the Row class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.row_dbafc34e8c4bf46a4ac7b647400c39d3 import Row
return Row(self)
@property
def Value(self):
"""
Returns
-------
- str: A learned information value
"""
return self._get_attribute(self._SDM_ATT_MAP['Value'])
def find(self, Value=None):
"""Finds and retrieves col resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve col resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all col resources from the server.
Args
----
- Value (str): A learned information value
Returns
-------
- self: This instance with matching col resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of col data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the col resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 38.112069 | 168 | 0.666139 |
75d25515e7c8511a1bb926e66454877f7bd0ff4e | 455 | py | Python | data/scripts/templates/object/draft_schematic/scout/shared_item_trap_flash_bomb.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/draft_schematic/scout/shared_item_trap_flash_bomb.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/draft_schematic/scout/shared_item_trap_flash_bomb.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/scout/shared_item_trap_flash_bomb.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.764706 | 81 | 0.731868 |
f1174bcb33ddff259c34a5b7b554cb7dfc82d8dd | 2,507 | py | Python | setup.py | datalayer-externals/jupyterlab-ksmm | 9641c33d8097d7154e424c8388f7092416ce0fcb | [
"MIT"
] | null | null | null | setup.py | datalayer-externals/jupyterlab-ksmm | 9641c33d8097d7154e424c8388f7092416ce0fcb | [
"MIT"
] | null | null | null | setup.py | datalayer-externals/jupyterlab-ksmm | 9641c33d8097d7154e424c8388f7092416ce0fcb | [
"MIT"
] | null | null | null | """ksmm setup
"""
import json
from pathlib import Path
import setuptools
HERE = Path(__file__).parent.resolve()
# The name of the project
name = "ksmm"
lab_path = HERE / name.replace("-", "_") / "labextension"
# Representative files that should exist after a successful build
ensured_targets = [str(lab_path / "package.json"), str(lab_path / "static/style.js")]
labext_name = "@quansight/jupyterlab-ksmm"
data_files_spec = [
(
"share/jupyter/labextensions/%s" % labext_name,
str(lab_path.relative_to(HERE)),
"**",
),
("share/jupyter/labextensions/%s" % labext_name, str("."), "install.json"),
(
"etc/jupyter/jupyter_server_config.d",
"jupyter-config/server-config",
"ksmm.json",
),
# For backward compatibility with notebook server.
("etc/jupyter/jupyter_notebook_config.d", "jupyter-config/nb-config", "ksmm.json"),
]
long_description = (HERE / "README.md").read_text()
# Get the package info from package.json
pkg_json = json.loads((HERE / "package.json").read_bytes())
setup_args = dict(
name=name,
version=pkg_json["version"],
url=pkg_json["homepage"],
description=pkg_json["description"],
license=pkg_json["license"],
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
install_requires=[
"jupyter_server>=1.6,<2",
"psutil",
"ulid-py",
],
zip_safe=False,
include_package_data=True,
python_requires=">=3.6",
platforms="Linux, Mac OS X, Windows",
keywords=["Jupyter", "JupyterLab", "JupyterLab3"],
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Framework :: Jupyter",
],
)
try:
from jupyter_packaging import wrap_installers, npm_builder, get_data_files
post_develop = npm_builder(
build_cmd="install:extension", source_dir="src", build_dir=lab_path
)
setup_args["cmdclass"] = wrap_installers(
post_develop=post_develop, ensured_targets=ensured_targets
)
setup_args["data_files"] = get_data_files(data_files_spec)
except ImportError:
pass
if __name__ == "__main__":
setuptools.setup(**setup_args)
| 28.488636 | 87 | 0.658556 |
6e3cc3ec03a1932a70ba5d79b0d7bcc9915186c3 | 1,620 | py | Python | tools/lib/dfbOptions.py | pfloos/QUESTDB_website | 720fb41c42f50e3614cf406fa6cade594f9dd526 | [
"BSD-3-Clause"
] | 2 | 2020-10-29T19:41:52.000Z | 2021-08-12T04:28:33.000Z | tools/lib/dfbOptions.py | rdguerrerom/QUESTDB_website | bebcfdd9596ca90f9c1ca210a68569b767fdfbce | [
"BSD-3-Clause"
] | 1 | 2020-11-20T10:06:44.000Z | 2020-11-20T10:06:44.000Z | tools/lib/dfbOptions.py | rdguerrerom/QUESTDB_website | bebcfdd9596ca90f9c1ca210a68569b767fdfbce | [
"BSD-3-Clause"
] | 2 | 2020-11-16T14:46:02.000Z | 2020-11-24T15:56:47.000Z | from TexSoup import TexSoup,TexCmd
from . import formats
from .data import dataFileBase,DataType,state
from collections import defaultdict
class dfbOptions(object):
def __init__(self):
self.defaultType=DataType.ABS
self.format="line"
self.suffix=None
self.initialStates=defaultdict(lambda : state(1,1,"A_1"))
@staticmethod
def readFromEnv(lateEnv):
dfb_Opt=dfbOptions()
dfbDefaultTypeNode=lateEnv.defaultType
if dfbDefaultTypeNode!=None:
dfbDefaultType=dfbDefaultTypeNode.expr
if type(dfbDefaultType) is TexCmd:
dfb_Opt.defaultType=DataType[dfbDefaultType.args[0].value.upper()]
dfbFormatNode=lateEnv.format
if dfbFormatNode!=None:
dfbFormat=dfbFormatNode.expr
if type(dfbFormat) is TexCmd:
dfb_Opt.format=dfbFormat.args[0].value
dfbSuffixNode=lateEnv.suffix
if dfbSuffixNode!=None:
dfbSuffix=dfbSuffixNode.expr
if type(dfbSuffix) is TexCmd:
dfb_Opt.suffix=dfbSuffix.args[0].value
dfbInitialStateNodes=list(lateEnv.find_all("initialState"))
for node in dfbInitialStateNodes:
initialState=node.expr
if type(initialState) is TexCmd:
vRArgs=[arg.value for arg in initialState.args if arg.type=="required"]
vOArgs=[arg.value for arg in initialState.args if arg.type=="optional"]
if len(vOArgs)==0:
defaultstate=state.fromString("1 "+vRArgs[0])
dfb_Opt.initialStates.default_factory=lambda : defaultstate
else:
mystate=state.fromString("1 "+vRArgs[0])
dfb_Opt.initialStates[vOArgs[0]]=mystate
return dfb_Opt | 36 | 79 | 0.709877 |
e88c5bdafcb09c209d2d242a067ffb77f25b412a | 230 | py | Python | src/lambdalith_router/__main__.py | andrewthetechie/py-lambdalith-router | a178b1d50b70657a5497b4c201780f406c1c083f | [
"MIT"
] | null | null | null | src/lambdalith_router/__main__.py | andrewthetechie/py-lambdalith-router | a178b1d50b70657a5497b4c201780f406c1c083f | [
"MIT"
] | 4 | 2022-03-18T01:30:37.000Z | 2022-03-31T01:50:38.000Z | src/lambdalith_router/__main__.py | andrewthetechie/py-lambdalith-router | a178b1d50b70657a5497b4c201780f406c1c083f | [
"MIT"
] | null | null | null | """Command-line interface."""
import click
@click.command()
@click.version_option()
def main() -> None:
"""Py Lambdalith Router."""
if __name__ == "__main__":
main(prog_name="py-lambdalith-router") # pragma: no cover
| 17.692308 | 62 | 0.665217 |
c1ca00fc2a9da0c851d6602bd77e1bea6db52288 | 1,058 | py | Python | runoob/advanced_tutorial/thread_3.py | zeroonegit/python | 919f8bb14ae91e37e42ff08192df24b60135596f | [
"MIT"
] | 1 | 2017-03-30T00:43:40.000Z | 2017-03-30T00:43:40.000Z | runoob/advanced_tutorial/thread_3.py | QuinceySun/Python | 919f8bb14ae91e37e42ff08192df24b60135596f | [
"MIT"
] | null | null | null | runoob/advanced_tutorial/thread_3.py | QuinceySun/Python | 919f8bb14ae91e37e42ff08192df24b60135596f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
############################
# File Name: thread_3.py
# Author: One Zero
# Mail: zeroonegit@gmail.com
# Created Time: 2015-12-29 18:58:05
############################
import threading
import time
exitFlag = 0
class myThread(threading.Thread): # 继承父类threading.Thread
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self): # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数
print("Starting " + self.name)
print_time(self.name, self.counter, 5)
print("Exiting " + self.name)
def print_time(threadName, delay, counter):
while counter:
if exitFlag:
threading.exit()
time.sleep(delay)
print("%s: %s" % (threadName, time.ctime(time.time())))
counter -= 1
# 创建新线程
thread1 = myThread(1, "Thread-1", 1)
thread2 = myThread(2, "Thread-2", 2)
# 开启线程
thread1.start()
thread2.start()
print("Exiting Main Thread")
| 24.045455 | 63 | 0.603025 |
976e3d0e806b1ded332d618aaaf09b5b91938eb0 | 1,872 | py | Python | awx_collection/test/awx/test_ad_hoc_wait.py | Geraldf/awx | 10d1b3a3b2680db1e8a7d3b846f5cbce02a37aba | [
"Apache-2.0"
] | 2 | 2021-03-18T11:08:15.000Z | 2021-03-19T09:20:27.000Z | awx_collection/test/awx/test_ad_hoc_wait.py | Saurabh-Thakre/awx | 8eb377a3ea8303c394ad4c958cc828c7239c1e11 | [
"Apache-2.0"
] | 24 | 2021-04-01T08:33:08.000Z | 2022-03-01T21:13:06.000Z | awx_collection/test/awx/test_ad_hoc_wait.py | hostinger/awx | dac01b14e2c04c201a162ea03ef8386d822e3923 | [
"Apache-2.0"
] | 24 | 2020-11-27T08:37:35.000Z | 2021-03-08T13:27:15.000Z | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from django.utils.timezone import now
from awx.main.models.ad_hoc_commands import AdHocCommand
@pytest.mark.django_db
def test_ad_hoc_command_wait_successful(run_module, admin_user):
command = AdHocCommand.objects.create(status='successful', started=now(), finished=now())
result = run_module('tower_ad_hoc_command_wait', dict(
command_id=command.id
), admin_user)
result.pop('invocation', None)
assert result.pop('finished', '')[:10] == str(command.finished)[:10]
assert result.pop('started', '')[:10] == str(command.started)[:10]
assert result == {
"status": "successful",
"changed": False,
"elapsed": str(command.elapsed),
"id": command.id
}
@pytest.mark.django_db
def test_ad_hoc_command_wait_failed(run_module, admin_user):
command = AdHocCommand.objects.create(status='failed', started=now(), finished=now())
result = run_module('tower_ad_hoc_command_wait', dict(
command_id=command.id
), admin_user)
result.pop('invocation', None)
assert result.pop('finished', '')[:10] == str(command.finished)[:10]
assert result.pop('started', '')[:10] == str(command.started)[:10]
assert result == {
"status": "failed",
"failed": True,
"changed": False,
"elapsed": str(command.elapsed),
"id": command.id,
"msg": "The ad hoc command - 1, failed"
}
@pytest.mark.django_db
def test_ad_hoc_command_wait_not_found(run_module, admin_user):
result = run_module('tower_ad_hoc_command_wait', dict(
command_id=42
), admin_user)
result.pop('invocation', None)
assert result == {
"failed": True,
"msg": "Unable to wait on ad hoc command 42; that ID does not exist in Tower."
}
| 33.428571 | 93 | 0.666132 |
5b444f8cef4fb4572c23b45f94b315e1d7dc9244 | 1,199 | py | Python | database/sql_utils/tag.py | john8680/tequila | 23d8a8dae4843bc42cd9dba76d6c07296b0a8632 | [
"Apache-2.0"
] | 3 | 2019-09-27T02:56:54.000Z | 2020-06-15T06:09:54.000Z | database/sql_utils/tag.py | john8680/tequila | 23d8a8dae4843bc42cd9dba76d6c07296b0a8632 | [
"Apache-2.0"
] | null | null | null | database/sql_utils/tag.py | john8680/tequila | 23d8a8dae4843bc42cd9dba76d6c07296b0a8632 | [
"Apache-2.0"
] | 2 | 2019-09-27T02:56:59.000Z | 2019-10-15T10:57:36.000Z | # -*- coding: utf-8 -*-
from tornado import gen
from database.sql_utils.connect import async_connect
@gen.coroutine
def get_all_tags():
conn = yield async_connect()
cur = conn.cursor()
sql = "SELECT tid, tag_name FROM t_tag ORDER BY tid;"
try:
yield cur.execute(sql)
data = cur.fetchall()
except Exception as e:
data = []
finally:
cur.close()
conn.close()
raise gen.Return(data)
@gen.coroutine
def get_tag_list():
conn = yield async_connect()
cur = conn.cursor()
sql = "SELECT d.tid, d.tag_name, SUM(d.qcount) question_count, SUM(d.ucount) user_count FROM ("
sql += "SELECT tid, tag_name, COUNT(tid) qcount, uid, username, COUNT(uid) ucount FROM ("
sql += "SELECT q.qid, t.tag_name, t.tid, u.username, u.uid FROM t_question q"
sql += " LEFT JOIN t_tag t ON t.tid = q.tid"
sql += " LEFT JOIN t_user u ON u.uid = q.uid) c"
sql += " GROUP BY tid, uid) d GROUP BY d.tid ORDER BY question_count DESC;"
try:
yield cur.execute(sql)
data = cur.fetchall()
except Exception as e:
data = []
finally:
cur.close()
conn.close()
raise gen.Return(data)
| 27.883721 | 99 | 0.613845 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.